sl@0: // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\nkernsmp\nkerns.cpp sl@0: // sl@0: // sl@0: sl@0: // NThreadBase member data sl@0: #define __INCLUDE_NTHREADBASE_DEFINES__ sl@0: sl@0: #include sl@0: #include sl@0: #include "nk_priv.h" sl@0: sl@0: extern "C" void ExcFault(TAny*); sl@0: sl@0: /****************************************************************************** sl@0: * Fast mutex sl@0: ******************************************************************************/ sl@0: /** Create a fast mutex sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: EXPORT_C NFastMutex::NFastMutex() sl@0: : iHoldingThread(0), iMutexLock(TSpinLock::EOrderFastMutex) sl@0: { sl@0: } sl@0: sl@0: /****************************************************************************** sl@0: * NSchedulable sl@0: ******************************************************************************/ sl@0: NSchedulable::NSchedulable() sl@0: : iSSpinLock(TSpinLock::EOrderThread) sl@0: { sl@0: iPriority = 0; sl@0: iReady = 0; sl@0: iCurrent = 0; sl@0: iLastCpu = 0; sl@0: iNSchedulableSpare1 = 0; sl@0: iPauseCount = 0; sl@0: iSuspended = 0; sl@0: iNSchedulableSpare2 = 0; sl@0: iCpuChange = 0; sl@0: iStopping = 0; sl@0: iFreezeCpu = 0; sl@0: iParent = (NSchedulable*)0xdeadbeef; sl@0: iCpuAffinity = 0; sl@0: new (i_IDfcMem) TDfc(&DeferredReadyIDfcFn, this); sl@0: iEventState = 0; sl@0: iTotalCpuTime64 = 0; sl@0: } sl@0: sl@0: /****************************************************************************** sl@0: * NThreadGroup sl@0: ******************************************************************************/ sl@0: NThreadGroup::NThreadGroup() sl@0: { sl@0: iParent = 0; sl@0: iThreadCount = 0; sl@0: new (&iSSpinLock) TSpinLock(TSpinLock::EOrderThreadGroup); sl@0: } sl@0: sl@0: /** Create a thread group sl@0: sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: EXPORT_C TInt NKern::GroupCreate(NThreadGroup* aGroup, SNThreadGroupCreateInfo& aInfo) sl@0: { sl@0: new (aGroup) NThreadGroup(); sl@0: aGroup->iCpuAffinity = aInfo.iCpuAffinity; sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: /** Destroy a thread group sl@0: sl@0: @pre Call in thread context, interrupts enabled, preemption enabled sl@0: @pre No fast mutex held sl@0: @pre Calling thread in critical section sl@0: @pre All threads have left the group sl@0: sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: EXPORT_C void NKern::GroupDestroy(NThreadGroup* aGroup) sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: aGroup->DetachTiedEvents(); sl@0: NKern::ThreadLeaveCS(); sl@0: } sl@0: sl@0: sl@0: /****************************************************************************** sl@0: * Thread sl@0: ******************************************************************************/ sl@0: void InvalidExec() sl@0: { sl@0: FAULT(); sl@0: } sl@0: sl@0: static const SFastExecTable DefaultFastExecTable={0,{0}}; sl@0: static const SSlowExecTable DefaultSlowExecTable={0,(TLinAddr)InvalidExec,0,{{0,0}}}; sl@0: sl@0: const SNThreadHandlers NThread_Default_Handlers = sl@0: { sl@0: NTHREAD_DEFAULT_EXIT_HANDLER, sl@0: NTHREAD_DEFAULT_STATE_HANDLER, sl@0: NTHREAD_DEFAULT_EXCEPTION_HANDLER, sl@0: NTHREAD_DEFAULT_TIMEOUT_HANDLER sl@0: }; sl@0: sl@0: NThreadWaitState::NThreadWaitState() sl@0: : iTimer(&TimerExpired, this) sl@0: { sl@0: iWtSt64 = 0; sl@0: iTimer.iTriggerTime = 0; sl@0: iTimer.iNTimerSpare1 = 0; sl@0: } sl@0: sl@0: NThreadBase::NThreadBase() sl@0: : iRequestSemaphore(), iWaitState() sl@0: { sl@0: iParent = this; sl@0: iWaitLink.iPriority = 0; sl@0: iBasePri = 0; sl@0: iMutexPri = 0; sl@0: i_NThread_Initial = 0; sl@0: iLinkedObjType = EWaitNone; sl@0: i_ThrdAttr = 0; sl@0: iNThreadBaseSpare10 = 0; sl@0: iFastMutexDefer = 0; sl@0: iRequestSemaphore.iOwningThread = (NThreadBase*)this; sl@0: iTime = 0; sl@0: iTimeslice = 0; sl@0: iSavedSP = 0; sl@0: iAddressSpace = 0; sl@0: iHeldFastMutex = 0; sl@0: iUserModeCallbacks = 0; sl@0: iLinkedObj = 0; sl@0: iNewParent = 0; sl@0: iFastExecTable = 0; sl@0: iSlowExecTable = 0; sl@0: iCsCount = 0; sl@0: iCsFunction = 0; sl@0: iHandlers = 0; sl@0: iSuspendCount = 0; sl@0: iStackBase = 0; sl@0: iStackSize = 0; sl@0: iExtraContext = 0; sl@0: iExtraContextSize = 0; sl@0: iNThreadBaseSpare6 = 0; sl@0: iNThreadBaseSpare7 = 0; sl@0: iNThreadBaseSpare8 = 0; sl@0: iNThreadBaseSpare9 = 0; sl@0: sl@0: // KILL sl@0: iTag = 0; sl@0: iVemsData = 0; sl@0: } sl@0: sl@0: TInt NThreadBase::Create(SNThreadCreateInfo& aInfo, TBool aInitial) sl@0: { sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT(">NThreadBase::Create %08x(%08x,%d)", this, &aInfo, aInitial)); sl@0: if (aInfo.iPriority<0 || aInfo.iPriority>63) sl@0: return KErrArgument; sl@0: if (aInfo.iPriority==0 && !aInitial) sl@0: return KErrArgument; sl@0: // if (aInfo.iCpu!=KCpuAny && aInfo.iCpu>=TheScheduler.iNumCpus) sl@0: // return KErrArgument; sl@0: iStackBase=(TLinAddr)aInfo.iStackBase; sl@0: iStackSize=aInfo.iStackSize; sl@0: iTimeslice=(aInfo.iTimeslice>0)?aInfo.iTimeslice:-1; sl@0: iTime=iTimeslice; sl@0: iPriority=TUint8(aInfo.iPriority); sl@0: iBasePri=TUint8(aInfo.iPriority); sl@0: iCpuAffinity = aInfo.iCpuAffinity; sl@0: iHandlers = aInfo.iHandlers ? aInfo.iHandlers : &NThread_Default_Handlers; sl@0: iFastExecTable=aInfo.iFastExecTable?aInfo.iFastExecTable:&DefaultFastExecTable; sl@0: iSlowExecTable=(aInfo.iSlowExecTable?aInfo.iSlowExecTable:&DefaultSlowExecTable)->iEntries; sl@0: i_ThrdAttr=(TUint8)aInfo.iAttributes; sl@0: if (aInitial) sl@0: { sl@0: TSubScheduler& ss = SubScheduler(); sl@0: iLastCpu = (TUint8)ss.iCpuNum; sl@0: iReady = (TUint8)(iLastCpu | EReadyOffset); sl@0: iCurrent = iReady; sl@0: iCpuAffinity = iLastCpu; sl@0: iEventState = (iLastCpu<= TheScheduler.iNumCpus) sl@0: ecpu = 0; // FIXME: Inactive CPU? sl@0: } sl@0: else sl@0: ecpu = iCpuAffinity; sl@0: iEventState = (ecpu<AcqSLock(); sl@0: iParent = (NSchedulable*)aInfo.iGroup; sl@0: ++aInfo.iGroup->iThreadCount; sl@0: iEventState |= EEventParent; sl@0: RelSLock(); sl@0: NKern::Unlock(); sl@0: } sl@0: } sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("iNState,aOperation,aParameter)); sl@0: #ifdef _DEBUG sl@0: DEBUGPRINT("UnknownState: thread %T op %08x par %08x",aThread,aOperation,aParameter); sl@0: #endif sl@0: FAULT(); sl@0: } sl@0: sl@0: void NThread_Default_Exception_Handler(TAny* aContext, NThread*) sl@0: { sl@0: ExcFault(aContext); sl@0: } sl@0: sl@0: sl@0: /** Create a nanothread. sl@0: sl@0: This function is intended to be used by the EPOC kernel and by personality sl@0: layers. A nanothread may not use most of the functions available to normal sl@0: Symbian OS threads. Use Kern::ThreadCreate() to create a Symbian OS thread. sl@0: sl@0: @param aThread Pointer to control block for thread to create. sl@0: @param aInfo Information needed for creating the thread. sl@0: sl@0: @see SNThreadCreateInfo sl@0: @see Kern::ThreadCreate sl@0: sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: */ sl@0: EXPORT_C TInt NKern::ThreadCreate(NThread* aThread, SNThreadCreateInfo& aInfo) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadCreate"); sl@0: return aThread->Create(aInfo,FALSE); sl@0: } sl@0: sl@0: // User-mode callbacks sl@0: sl@0: TUserModeCallback::TUserModeCallback(TUserModeCallbackFunc aFunc) sl@0: : iNext(KUserModeCallbackUnqueued), sl@0: iFunc(aFunc) sl@0: { sl@0: } sl@0: sl@0: TUserModeCallback::~TUserModeCallback() sl@0: { sl@0: __NK_ASSERT_DEBUG(iNext == KUserModeCallbackUnqueued); sl@0: } sl@0: sl@0: void NKern::CancelUserModeCallbacks() sl@0: { sl@0: // Call any queued callbacks with the EUserModeCallbackCancel reason code, in the current sl@0: // thread. sl@0: sl@0: TUserModeCallback* listHead = sl@0: (TUserModeCallback*)__e32_atomic_swp_ord_ptr(&NCurrentThread()->iUserModeCallbacks, NULL); sl@0: while (listHead) sl@0: { sl@0: TUserModeCallback* callback = listHead; sl@0: listHead = listHead->iNext; sl@0: callback->iNext = KUserModeCallbackUnqueued; sl@0: __e32_memory_barrier(); sl@0: callback->iFunc(callback, EUserModeCallbackCancel); sl@0: } sl@0: } sl@0: sl@0: void NKern::MoveUserModeCallbacks(NThreadBase* aDestThread, NThreadBase* aSrcThread) sl@0: { sl@0: // Move all queued user-mode callbacks from the source thread to the destination thread, and sl@0: // prevent any more from being queued. Used by the kernel thread code so that callbacks get sl@0: // cancelled in another thread if the thread they were originally queued on dies. sl@0: sl@0: // Atomically remove list of callbacks and set pointer to 1 sl@0: // The latter ensures any subsequent attempts to add callbacks fail sl@0: TUserModeCallback* sourceListStart = sl@0: (TUserModeCallback*)__e32_atomic_swp_ord_ptr(&aSrcThread->iUserModeCallbacks, (TAny*)1); sl@0: __NK_ASSERT_DEBUG(((TUint)sourceListStart & 3) == 0); // check this only gets called once per thread sl@0: sl@0: if (sourceListStart == NULL) sl@0: return; sl@0: sl@0: TUserModeCallback* sourceListEnd = sourceListStart; sl@0: while (sourceListEnd->iNext != NULL) sl@0: sourceListEnd = sourceListEnd->iNext; sl@0: sl@0: NKern::Lock(); sl@0: TUserModeCallback* destListStart = aDestThread->iUserModeCallbacks; sl@0: do sl@0: { sl@0: __NK_ASSERT_DEBUG(((TUint)destListStart & 3) == 0); // dest thread must not die sl@0: sourceListEnd->iNext = destListStart; sl@0: } while (!__e32_atomic_cas_ord_ptr(&aDestThread->iUserModeCallbacks, &destListStart, sourceListStart)); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: /** Initialise the null thread sl@0: @internalComponent sl@0: */ sl@0: void NKern::Init(NThread* aThread, SNThreadCreateInfo& aInfo) sl@0: { sl@0: aInfo.iFunction=NULL; // irrelevant sl@0: aInfo.iPriority=0; // null thread has lowest priority sl@0: aInfo.iTimeslice=0; // null thread not timesliced sl@0: aInfo.iAttributes=0; // null thread does not require implicit locks sl@0: aThread->Create(aInfo,TRUE); // create the null thread sl@0: } sl@0: sl@0: /** @internalTechnology */ sl@0: EXPORT_C void NKern::RecordIntLatency(TInt /*aLatency*/, TInt /*aIntMask*/) sl@0: { sl@0: } sl@0: sl@0: sl@0: /** @internalTechnology */ sl@0: EXPORT_C void NKern::RecordThreadLatency(TInt /*aLatency*/) sl@0: { sl@0: } sl@0: sl@0: /******************************************** sl@0: * Deterministic Priority List Implementation sl@0: ********************************************/ sl@0: sl@0: sl@0: /** Construct a priority list with the specified number of priorities sl@0: sl@0: @param aNumPriorities The number of priorities (must be 1-64). sl@0: */ sl@0: EXPORT_C TPriListBase::TPriListBase(TInt aNumPriorities) sl@0: { sl@0: memclr(this, sizeof(TPriListBase)+(aNumPriorities-1)*sizeof(SDblQueLink*) ); sl@0: } sl@0: sl@0: sl@0: /******************************************** sl@0: * Miscellaneous sl@0: ********************************************/ sl@0: sl@0: /** Get the current value of the high performance counter. sl@0: sl@0: If a high performance counter is not available, this uses the millisecond sl@0: tick count instead. sl@0: */ sl@0: EXPORT_C TUint32 NKern::FastCounter() sl@0: { sl@0: return (TUint32)Timestamp(); sl@0: } sl@0: sl@0: sl@0: /** Get the frequency of counter queried by NKern::FastCounter(). sl@0: */ sl@0: EXPORT_C TInt NKern::FastCounterFrequency() sl@0: { sl@0: return (TInt)TimestampFrequency(); sl@0: } sl@0: sl@0: sl@0: extern "C" { sl@0: TUint32 CrashState; sl@0: } sl@0: sl@0: EXPORT_C TBool NKern::Crashed() sl@0: { sl@0: return CrashState!=0; sl@0: } sl@0: sl@0: sl@0: /** Returns number of nanokernel timer ticks since system started. sl@0: @return tick count sl@0: @pre any context sl@0: */ sl@0: EXPORT_C TUint32 NKern::TickCount() sl@0: { sl@0: return NTickCount(); sl@0: } sl@0: sl@0: sl@0: TUint32 BTrace::BigTraceId = 0; sl@0: sl@0: TBool BTrace::DoOutBig(TUint32 a0, TUint32 a1, const TAny* aData, TInt aDataSize, TUint32 aContext, TUint32 aPc) sl@0: { sl@0: SBTraceData& traceData = BTraceData; sl@0: sl@0: // see if trace is small enough to fit in single record... sl@0: if(TUint(aDataSize)<=TUint(KMaxBTraceDataArray+4)) sl@0: { sl@0: a0 += aDataSize; sl@0: TUint32 a2 = 0; sl@0: TUint32 a3 = 0; sl@0: if(aDataSize) sl@0: { sl@0: a2 = *((TUint32*&)aData)++; // first 4 bytes into a2 sl@0: if(aDataSize>=4 && aDataSize<=8) sl@0: a3 = *(TUint32*)aData; // only 4 more bytes, so pass by value, not pointer sl@0: else sl@0: a3 = (TUint32)aData; sl@0: } sl@0: __ACQUIRE_BTRACE_LOCK(); sl@0: TBool r = traceData.iHandler(a0,0,aContext,a1,a2,a3,0,aPc); sl@0: __RELEASE_BTRACE_LOCK(); sl@0: return r; sl@0: } sl@0: sl@0: // adjust for header2, extra, and size word... sl@0: a0 |= BTrace::EHeader2Present<<(BTrace::EFlagsIndex*8)|BTrace::EExtraPresent<<(BTrace::EFlagsIndex*8); sl@0: a0 += 12; sl@0: sl@0: TUint32 traceId = __e32_atomic_add_ord32(&BigTraceId, 1); sl@0: TUint32 header2 = BTrace::EMultipartFirst; sl@0: TInt offset = 0; sl@0: do sl@0: { sl@0: TUint32 size = aDataSize-offset; sl@0: if(size>KMaxBTraceDataArray) sl@0: size = KMaxBTraceDataArray; sl@0: else sl@0: header2 = BTrace::EMultipartLast; sl@0: if(size<=4) sl@0: *(TUint32*)&aData = *(TUint32*)aData; // 4 bytes or less are passed by value, not pointer sl@0: sl@0: __ACQUIRE_BTRACE_LOCK(); sl@0: TBool result = traceData.iHandler(a0+size,header2,aContext,aDataSize,a1,(TUint32)aData,traceId,aPc); sl@0: __RELEASE_BTRACE_LOCK(); sl@0: if (!result) sl@0: return result; sl@0: sl@0: offset += size; sl@0: *(TUint8**)&aData += size; sl@0: sl@0: header2 = BTrace::EMultipartMiddle; sl@0: a1 = offset; sl@0: } sl@0: while(offset