sl@0: // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\include\nkernsmp\nk_priv.h sl@0: // sl@0: // WARNING: This file contains some APIs which are internal and are subject sl@0: // to change without notice. Such APIs should therefore not be used sl@0: // outside the Kernel and Hardware Services package. sl@0: // sl@0: sl@0: #ifndef __NK_PRIV_H__ sl@0: #define __NK_PRIV_H__ sl@0: #include sl@0: #include sl@0: sl@0: #define __USE_BTRACE_LOCK__ sl@0: sl@0: class Monitor; sl@0: sl@0: /******************************************** sl@0: * Schedulable = thread or thread group sl@0: ********************************************/ sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: sl@0: Base class for a nanokernel thread or group sl@0: */ sl@0: class NThreadGroup; sl@0: class NSchedulable : public TPriListLink sl@0: { sl@0: public: sl@0: enum sl@0: { sl@0: EReadyGroup=1, sl@0: EReadyCpuMask=0x7f, sl@0: EReadyOffset=0x80, sl@0: }; sl@0: sl@0: enum NReadyFlags sl@0: { sl@0: ENewTimeslice=1, sl@0: EPreferSameCpu=2, sl@0: EUnPause=4, sl@0: }; sl@0: sl@0: enum NEventState sl@0: { sl@0: EEventCountShift=16u, sl@0: EEventCountMask=0xffff0000u, sl@0: EEventCountInc=0x10000u, sl@0: EEventCpuShift=0u, sl@0: EEventCpuMask=0x1fu, sl@0: EThreadCpuShift=8u, sl@0: EThreadCpuMask=0x1f00u, sl@0: EDeferredReady=0x4000u, sl@0: EEventParent=0x8000u, sl@0: }; sl@0: public: sl@0: NSchedulable(); sl@0: void AcqSLock(); sl@0: void RelSLock(); sl@0: void LAcqSLock(); sl@0: void RelSLockU(); sl@0: void ReadyT(TUint aMode); // make ready, assumes lock held sl@0: TInt BeginTiedEvent(); sl@0: void EndTiedEvent(); sl@0: TInt AddTiedEvent(NEventHandler* aEvent); sl@0: TBool TiedEventReadyInterlock(TInt aCpu); sl@0: void UnPauseT(); // decrement pause count and make ready if necessary sl@0: static void DeferredReadyIDfcFn(TAny*); sl@0: void DetachTiedEvents(); sl@0: public: sl@0: inline TBool IsGroup() {return !iParent;} sl@0: inline TBool IsLoneThread() {return iParent==this;} sl@0: inline TBool IsGroupThread() {return iParent && iParent!=this;} sl@0: public: sl@0: // TUint8 iReady; /**< @internalComponent */ // flag indicating thread on ready list = cpu number | EReadyOffset sl@0: // TUint8 iCurrent; /**< @internalComponent */ // flag indicating thread is running sl@0: // TUint8 iLastCpu; /**< @internalComponent */ // CPU on which this thread last ran sl@0: TUint8 iPauseCount; /**< @internalComponent */ // count of externally requested pauses extending a voluntary wait sl@0: TUint8 iSuspended; /**< @internalComponent */ // flag indicating active external suspend (Not used for groups) sl@0: TUint8 iNSchedulableSpare1; /**< @internalComponent */ sl@0: TUint8 iNSchedulableSpare2; /**< @internalComponent */ sl@0: sl@0: TUint8 iCpuChange; /**< @internalComponent */ // flag showing CPU migration outstanding sl@0: TUint8 iStopping; /**< @internalComponent */ // thread is exiting, thread group is being destroyed sl@0: TUint16 iFreezeCpu; /**< @internalComponent */ // flag set if CPU frozen - count for groups sl@0: NSchedulable* iParent; /**< @internalComponent */ // Pointer to group containing thread, =this for normal thread, =0 for group sl@0: sl@0: TUint32 iCpuAffinity; /**< @internalComponent */ sl@0: volatile TUint32 iEventState; /**< @internalComponent */ // bits 16-31=count, 0-4=event CPU, 5-9=thread CPU, 10=defer, 11=parent sl@0: sl@0: TSpinLock iSSpinLock; /**< @internalComponent */ sl@0: sl@0: SDblQue iEvents; /**< @internalComponent */ // doubly-linked list of tied events sl@0: sl@0: TUint32 i_IDfcMem[sizeof(TDfc)/sizeof(TUint32)]; /**< @internalComponent */ // IDFC used to make thread ready after last tied event completes sl@0: // TDfc iDeferredReadyIDfc; /**< @internalComponent */ // IDFC used to make thread ready after last tied event completes sl@0: sl@0: union sl@0: { sl@0: TUint64 iRunCount64; sl@0: TUint32 iRunCount32[2]; sl@0: }; sl@0: union sl@0: { sl@0: TUint64 iTotalCpuTime64; /**< @internalComponent */ // total time spent running, in hi-res timer ticks sl@0: TUint32 iTotalCpuTime32[2]; /**< @internalComponent */ // total time spent running, in hi-res timer ticks sl@0: }; sl@0: }; sl@0: sl@0: __ASSERT_COMPILE(!(_FOFF(NSchedulable,iSSpinLock)&7)); sl@0: __ASSERT_COMPILE(!(_FOFF(NSchedulable,iRunCount64)&7)); sl@0: __ASSERT_COMPILE(!(_FOFF(NSchedulable,iTotalCpuTime64)&7)); sl@0: __ASSERT_COMPILE(!(sizeof(NSchedulable)&7)); sl@0: sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: inline TBool TDfc::IsValid() sl@0: { sl@0: if (iHType < KNumDfcPriorities) sl@0: return TRUE; sl@0: if (iHType != EEventHandlerIDFC) sl@0: return FALSE; sl@0: return !iTied || !iTied->iStopping; sl@0: } sl@0: sl@0: /******************************************** sl@0: * Thread sl@0: ********************************************/ sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: class NThreadWaitState sl@0: { sl@0: private: sl@0: enum TWtStFlags sl@0: { sl@0: EWtStWaitPending =0x01u, // thread is about to wait sl@0: EWtStWaitActive =0x02u, // thread is actually blocked sl@0: EWtStTimeout =0x04u, // timeout is active on this wait sl@0: EWtStObstructed =0x08u, // wait is due to obstruction (e.g. mutex) rather than lack of work to do sl@0: EWtStDead =0x80u, // thread is dead sl@0: }; sl@0: private: sl@0: NThreadWaitState(); sl@0: void SetUpWait(TUint aType, TUint aFlags, TAny* aWaitObj); sl@0: void SetUpWait(TUint aType, TUint aFlags, TAny* aWaitObj, TUint32 aTimeout); sl@0: void SetDead(TDfc* aKillDfc); sl@0: void CancelWait(); sl@0: TInt DoWait(); sl@0: static void TimerExpired(TAny*); sl@0: TInt UnBlockT(TUint aType, TAny* aWaitObj, TInt aReturnValue); sl@0: TUint32 ReleaseT(TAny*& aWaitObj, TInt aReturnValue); sl@0: void CancelTimerT(); sl@0: private: sl@0: inline NThreadBase* Thread(); sl@0: inline TBool WaitPending() sl@0: { return iWtC.iWtStFlags & (EWtStWaitPending|EWtStDead); } sl@0: inline TBool ThreadIsBlocked() sl@0: { return iWtC.iWtStFlags & (EWtStWaitActive|EWtStDead); } sl@0: inline TBool ThreadIsDead() sl@0: { return iWtC.iWtStFlags & EWtStDead; } sl@0: private: sl@0: struct S sl@0: { sl@0: volatile TUint8 iWtStFlags; sl@0: volatile TUint8 iWtObjType; sl@0: volatile TUint8 iWtStSpare1; sl@0: volatile TUint8 iWtStSpare2; sl@0: union sl@0: { sl@0: TAny* volatile iWtObj; sl@0: volatile TInt iRetVal; sl@0: TDfc* volatile iKillDfc; sl@0: }; sl@0: }; sl@0: union sl@0: { sl@0: S iWtC; sl@0: volatile TUint32 iWtSt32[2]; sl@0: volatile TUint64 iWtSt64; sl@0: }; sl@0: NTimer iTimer; sl@0: private: sl@0: friend class NSchedulable; sl@0: friend class NThreadBase; sl@0: friend class NThread; sl@0: friend class TScheduler; sl@0: friend class TSubScheduler; sl@0: friend class TDfc; sl@0: friend class TDfcQue; sl@0: friend class NFastSemaphore; sl@0: friend class NFastMutex; sl@0: friend class NTimer; sl@0: friend class NTimerQ; sl@0: friend class NKern; sl@0: friend class Monitor; sl@0: friend class NKTest; sl@0: }; sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: sl@0: Base class for a nanokernel thread. sl@0: */ sl@0: class TSubScheduler; sl@0: class NThreadBase : public NSchedulable sl@0: { sl@0: public: sl@0: /** sl@0: Defines the possible types of wait object sl@0: */ sl@0: enum NThreadWaitType sl@0: { sl@0: EWaitNone, sl@0: EWaitFastSemaphore, sl@0: EWaitFastMutex, sl@0: EWaitSleep, sl@0: EWaitBlocked, sl@0: EWaitDfc, sl@0: sl@0: ENumWaitTypes sl@0: }; sl@0: sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: enum NThreadCSFunction sl@0: { sl@0: ECSExitPending=-1, sl@0: ECSExitInProgress=-2, sl@0: ECSDivertPending=-3, sl@0: }; sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: enum NThreadTimeoutOp sl@0: { sl@0: ETimeoutPreamble=0, sl@0: ETimeoutPostamble=1, sl@0: ETimeoutSpurious=2, sl@0: }; sl@0: public: sl@0: NThreadBase(); sl@0: TInt Create(SNThreadCreateInfo& anInfo, TBool aInitial); sl@0: void UnReadyT(); sl@0: TBool SuspendOrKill(TInt aCount); sl@0: TBool DoSuspendOrKillT(TInt aCount, TSubScheduler* aS); sl@0: TBool CancelTimerT(); sl@0: void DoReleaseT(TInt aReturnCode, TUint aMode); sl@0: TBool CheckFastMutexDefer(); sl@0: void DoCsFunctionT(); sl@0: TBool Resume(TBool aForce); sl@0: IMPORT_C TBool Suspend(TInt aCount); /**< @internalComponent */ sl@0: IMPORT_C TBool Resume(); /**< @internalComponent */ sl@0: IMPORT_C TBool ForceResume(); /**< @internalComponent */ sl@0: IMPORT_C void Release(TInt aReturnCode, TUint aMode); /**< @internalComponent */ sl@0: IMPORT_C void RequestSignal(); /**< @internalComponent */ sl@0: IMPORT_C void SetPriority(TInt aPriority); /**< @internalComponent */ sl@0: void SetMutexPriority(NFastMutex* aMutex); sl@0: void LoseInheritedPriorityT(); sl@0: void ChangeReadyThreadPriority(); sl@0: TUint32 SetCpuAffinity(TUint32 aAffinity); sl@0: TBool TiedEventLeaveInterlock(); sl@0: TBool TiedEventJoinInterlock(); sl@0: IMPORT_C void Kill(); /**< @internalComponent */ sl@0: void Exit(); sl@0: // hooks for platform-specific code sl@0: void OnKill(); sl@0: void OnExit(); sl@0: public: sl@0: static void TimerExpired(TAny* aPtr); sl@0: sl@0: /** @internalComponent */ sl@0: inline void UnknownState(TInt aOp, TInt aParam) sl@0: { (*iHandlers->iStateHandler)((NThread*)this,aOp,aParam); } sl@0: sl@0: /** @internalComponent */ sl@0: inline TUint8 Attributes() sl@0: { return i_ThrdAttr; } sl@0: sl@0: /** @internalComponent */ sl@0: inline TUint8 SetAttributes(TUint8 aNewAtt) sl@0: { return __e32_atomic_swp_ord8(&i_ThrdAttr, aNewAtt); } sl@0: sl@0: /** @internalComponent */ sl@0: inline TUint8 ModifyAttributes(TUint8 aClearMask, TUint8 aSetMask) sl@0: { return __e32_atomic_axo_ord8(&i_ThrdAttr, (TUint8)~(aClearMask|aSetMask), aSetMask); } sl@0: sl@0: /** @internalComponent */ sl@0: inline void SetAddressSpace(TAny* a) sl@0: { iAddressSpace=a; } sl@0: sl@0: /** @internalComponent */ sl@0: inline void SetExtraContext(TAny* a, TInt aSize) sl@0: { iExtraContext = a; iExtraContextSize = aSize; } sl@0: sl@0: /** @internalTechnology */ sl@0: inline TBool IsDead() sl@0: { return iWaitState.ThreadIsDead(); } sl@0: public: sl@0: TPriListLink iWaitLink; /**< @internalComponent */ // used to link thread into a wait queue sl@0: // TUint8 iBasePri; /**< @internalComponent */ // priority with no fast mutex held sl@0: // TUint8 iMutexPri; /**< @internalComponent */ // priority from held fast mutex sl@0: // TUint8 iInitial; /**< @internalComponent */ // TRUE if this is an initial thread sl@0: TUint8 iLinkedObjType; sl@0: TUint8 i_ThrdAttr; /**< @internalComponent */ sl@0: TUint8 iNThreadBaseSpare10; sl@0: TUint8 iFastMutexDefer; /**< @internalComponent */ sl@0: sl@0: NFastSemaphore iRequestSemaphore; /**< @internalComponent */ sl@0: sl@0: TInt iTime; /**< @internalComponent */ // time remaining, 0 if expired sl@0: TInt iTimeslice; /**< @internalComponent */ // timeslice for this thread, -ve = no timeslicing sl@0: sl@0: TLinAddr iSavedSP; /**< @internalComponent */ sl@0: TAny* iAddressSpace; /**< @internalComponent */ sl@0: sl@0: NFastMutex* volatile iHeldFastMutex; /**< @internalComponent */ // fast mutex held by this thread sl@0: TUserModeCallback* volatile iUserModeCallbacks; /**< @internalComponent */ // Head of singly-linked list of callbacks sl@0: TAny* volatile iLinkedObj; /**< @internalComponent */ // object to which this thread is linked sl@0: NThreadGroup* iNewParent; /**< @internalComponent */ // group to join sl@0: sl@0: const SFastExecTable* iFastExecTable; /**< @internalComponent */ sl@0: const SSlowExecEntry* iSlowExecTable; /**< @internalComponent */ // points to first entry iEntries[0] sl@0: sl@0: volatile TInt iCsCount; /**< @internalComponent */ // critical section count sl@0: volatile TInt iCsFunction; /**< @internalComponent */ // what to do on leaving CS: +n=suspend n times, 0=nothing, -1=exit sl@0: sl@0: NThreadWaitState iWaitState; /**< @internalComponent */ sl@0: sl@0: const SNThreadHandlers* iHandlers; /**< @internalComponent */ // additional thread event handlers sl@0: TInt iSuspendCount; /**< @internalComponent */ // -how many times we have been suspended sl@0: sl@0: TLinAddr iStackBase; /**< @internalComponent */ sl@0: TInt iStackSize; /**< @internalComponent */ sl@0: sl@0: TAny* iExtraContext; /**< @internalComponent */ // parent FPSCR value (iExtraContextSize == -1), coprocessor context (iExtraContextSize > 0) or NULL sl@0: TInt iExtraContextSize; /**< @internalComponent */ // +ve=dynamically allocated, 0=none, -1=iExtraContext stores parent FPSCR value sl@0: sl@0: TUint32 iNThreadBaseSpare6; /**< @internalComponent */ // spare to allow growth while preserving BC sl@0: TUint32 iNThreadBaseSpare7; /**< @internalComponent */ // spare to allow growth while preserving BC sl@0: TUint32 iNThreadBaseSpare8; /**< @internalComponent */ // spare to allow growth while preserving BC sl@0: TUint32 iNThreadBaseSpare9; /**< @internalComponent */ // spare to allow growth while preserving BC sl@0: sl@0: // For EMI support - HOPEFULLY THIS CAN DIE sl@0: TUint32 iTag; /**< @internalComponent */ // User defined set of bits which is ANDed with a mask when the thread is scheduled, and indicates if a DFC should be scheduled. sl@0: TAny* iVemsData; /**< @internalComponent */ // This pointer can be used by any VEMS to store any data associated with the thread. This data must be clean up before the Thread Exit Monitor completes. sl@0: }; sl@0: sl@0: __ASSERT_COMPILE(!(_FOFF(NThreadBase,iWaitLink)&7)); sl@0: __ASSERT_COMPILE(!(sizeof(NThreadBase)&7)); sl@0: sl@0: #ifdef __INCLUDE_NTHREADBASE_DEFINES__ sl@0: #define iReady iSpare1 /**< @internalComponent */ sl@0: #define iCurrent iSpare2 /**< @internalComponent */ sl@0: #define iLastCpu iSpare3 /**< @internalComponent */ sl@0: sl@0: #define iBasePri iWaitLink.iSpare1 /**< @internalComponent */ sl@0: #define iMutexPri iWaitLink.iSpare2 /**< @internalComponent */ sl@0: #define i_NThread_Initial iWaitLink.iSpare3 /**< @internalComponent */ sl@0: sl@0: #endif sl@0: sl@0: /** @internalComponent */ sl@0: #define i_NThread_BasePri iWaitLink.iSpare1 sl@0: sl@0: /** @internalComponent */ sl@0: #define NTHREADBASE_CPU_AFFINITY_MASK 0x80000000 sl@0: sl@0: /** @internalComponent */ sl@0: inline NThreadBase* NThreadWaitState::Thread() sl@0: { return _LOFF(this, NThreadBase, iWaitState); } sl@0: sl@0: /******************************************** sl@0: * Thread group sl@0: ********************************************/ sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: sl@0: Base class for a nanokernel thread or group sl@0: */ sl@0: class NThreadGroup : public NSchedulable sl@0: { sl@0: public: sl@0: NThreadGroup(); sl@0: public: sl@0: TInt iThreadCount; /**< @internalComponent */ sl@0: TPriList iNThreadList; /**< @internalComponent */ sl@0: }; sl@0: sl@0: /******************************************** sl@0: * Scheduler sl@0: ********************************************/ sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: class TScheduler; sl@0: class NThread; sl@0: class NIrqHandler; sl@0: class TSubScheduler : public TPriListBase sl@0: { sl@0: public: sl@0: TSubScheduler(); sl@0: void QueueDfcs(); sl@0: void RotateReadyList(TInt aPriority); sl@0: NThread* SelectNextThread(); sl@0: TBool QueueEvent(NEventHandler* aEvent); sl@0: void QueueEventAndKick(NEventHandler* aEvent); sl@0: void SaveTimesliceTimer(NThreadBase* aThread); sl@0: void UpdateThreadTimes(NThreadBase* aOld, NThreadBase* aNew); sl@0: private: sl@0: SDblQueLink* iExtraQueues[KNumPriorities-1]; sl@0: public: sl@0: TSpinLock iExIDfcLock; // lock to protect exogenous IDFC queue sl@0: sl@0: SDblQue iExIDfcs; // list of pending exogenous IDFCs (i.e. ones punted over from another CPU) sl@0: sl@0: SDblQue iDfcs; // normal IDFC/DFC pending queue (only accessed by this CPU) sl@0: sl@0: TDfc* volatile iCurrentIDFC; // pointer to IDFC currently running on this CPU sl@0: NThread* iCurrentThread; // the thread currently running on this CPU sl@0: sl@0: TUint32 iCpuNum; sl@0: TUint32 iCpuMask; sl@0: sl@0: TSpinLock iReadyListLock; sl@0: sl@0: volatile TUint8 iRescheduleNeededFlag; // TRUE if a thread reschedule is pending sl@0: TUint8 iSubSchedulerSBZ1; // always zero sl@0: volatile TUint8 iDfcPendingFlag; // TRUE if a normal IDFC is pending sl@0: volatile TUint8 iExIDfcPendingFlag; // TRUE if an exogenous IDFC is pending sl@0: TInt iKernLockCount; // how many times the current CPU has locked the kernel sl@0: sl@0: TUint8 iInIDFC; // TRUE if IDFCs are currently being run on this CPU sl@0: volatile TUint8 iEventHandlersPending; // TRUE if an event handler is pending on this CPU sl@0: TUint8 iSubSchedulerSpare4; sl@0: TUint8 iSubSchedulerSpare5; sl@0: TAny* iAddressSpace; sl@0: sl@0: TUint32 iReschedIPIs; sl@0: TScheduler* iScheduler; sl@0: sl@0: union sl@0: { sl@0: TUint64 iLastTimestamp64; // NKern::Timestamp() value at last reschedule or timestamp sync sl@0: TUint32 iLastTimestamp32[2]; sl@0: }; sl@0: union sl@0: { sl@0: TUint64 iReschedCount64; sl@0: TUint32 iReschedCount32[2]; sl@0: }; sl@0: sl@0: TAny* iExtras[24]; // Space for platform-specific extras sl@0: sl@0: TGenericIPI* iNextIPI; // next generic IPI to run on this CPU sl@0: NThread* iInitialThread; // Initial (idle) thread on this CPU sl@0: sl@0: TSpinLock iEventHandlerLock; // lock to protect event handler queue sl@0: sl@0: SDblQue iEventHandlers; // queue of pending event handlers on this CPU sl@0: sl@0: TUint64 iSpinLockOrderCheck; // bitmask showing which spinlock orders currently held sl@0: sl@0: TUint32 iSubSchedulerPadding[8]; sl@0: }; sl@0: sl@0: __ASSERT_COMPILE(!(_FOFF(TSubScheduler,iExIDfcLock)&7)); sl@0: __ASSERT_COMPILE(!(_FOFF(TSubScheduler,iEventHandlerLock)&7)); sl@0: __ASSERT_COMPILE(!(_FOFF(TSubScheduler,iReadyListLock)&7)); sl@0: __ASSERT_COMPILE(!(_FOFF(TSubScheduler,iLastTimestamp64)&7)); sl@0: __ASSERT_COMPILE(!(_FOFF(TSubScheduler,iReschedCount64)&7)); sl@0: __ASSERT_COMPILE(sizeof(TSubScheduler)==512); // make it a nice power of 2 size for easy indexing sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: class TScheduler sl@0: { sl@0: public: sl@0: TScheduler(); sl@0: static void Reschedule(); sl@0: IMPORT_C static TScheduler* Ptr(); sl@0: inline void SetProcessHandler(TLinAddr aHandler) {iProcessHandler=aHandler;} sl@0: public: sl@0: TLinAddr iMonitorExceptionHandler; sl@0: TLinAddr iProcessHandler; sl@0: sl@0: TLinAddr iRescheduleHook; sl@0: TUint32 iActiveCpus1; // bit n set if CPU n is accepting unlocked threads sl@0: sl@0: TUint32 iActiveCpus2; // bit n set if CPU n is accepting generic IPIs sl@0: TInt iNumCpus; // number of CPUs under the kernel's control sl@0: sl@0: TSubScheduler* iSub[KMaxCpus]; // one subscheduler per CPU sl@0: sl@0: TAny* iExtras[24]; // Space for platform-specific extras sl@0: sl@0: NFastMutex iLock; // the 'system lock' fast mutex sl@0: sl@0: TSpinLock iIdleSpinLock; // lock to protect list of DFCs to be run on idle sl@0: sl@0: SDblQue iIdleDfcs; // list of DFCs to run when all CPUs go idle sl@0: sl@0: TUint32 iCpusNotIdle; // bitmask - Bit n set => CPU n is not idle sl@0: TUint8 iIdleGeneration; // Toggles between 0 and 1 each time iIdleDfcs list is spilled to a CPU IDFC queue sl@0: TUint8 iIdleSpillCpu; // Which CPU last spilled the iIdleDfcs list to its IDFC queue sl@0: TUint8 iTSchedulerSpare1; sl@0: TUint8 iTSchedulerSpare2; sl@0: sl@0: TUint32 iIdleGenerationCount; // Incremented each time iIdleDfcs list is spilled to a CPU IDFC queue sl@0: TUint32 i_Scheduler_Padding[3]; sl@0: sl@0: // For EMI support - HOPEFULLY THIS CAN DIE sl@0: NThread* iSigma; sl@0: TDfc* iEmiDfc; sl@0: TUint32 iEmiMask; sl@0: TUint32 iEmiState; sl@0: TUint32 iEmiDfcTrigger; sl@0: TBool iLogging; sl@0: TAny* iBufferStart; sl@0: TAny* iBufferEnd; sl@0: TAny* iBufferTail; sl@0: TAny* iBufferHead; sl@0: }; sl@0: sl@0: __ASSERT_COMPILE(!(_FOFF(TScheduler,iIdleSpinLock)&7)); sl@0: __ASSERT_COMPILE(sizeof(TScheduler)==512); sl@0: sl@0: extern TScheduler TheScheduler; sl@0: extern TSubScheduler TheSubSchedulers[KMaxCpus]; sl@0: sl@0: #ifdef __USE_BTRACE_LOCK__ sl@0: extern TSpinLock BTraceLock; sl@0: sl@0: #define __ACQUIRE_BTRACE_LOCK() TInt _btrace_irq = BTraceLock.LockIrqSave() sl@0: #define __RELEASE_BTRACE_LOCK() BTraceLock.UnlockIrqRestore(_btrace_irq) sl@0: sl@0: #else sl@0: sl@0: #define __ACQUIRE_BTRACE_LOCK() sl@0: #define __RELEASE_BTRACE_LOCK() sl@0: sl@0: #endif sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: extern "C" TSubScheduler& SubScheduler(); sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: extern "C" void send_resched_ipis(TUint32 aMask); sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: extern "C" void send_resched_ipi(TInt aCpu); sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: extern "C" void send_resched_ipi_and_wait(TInt aCpu); sl@0: sl@0: sl@0: #include sl@0: sl@0: /** sl@0: Call with kernel locked sl@0: sl@0: @internalComponent sl@0: */ sl@0: inline void RescheduleNeeded() sl@0: { SubScheduler().iRescheduleNeededFlag = 1; } sl@0: sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: #define NCurrentThread() NKern::CurrentThread() sl@0: sl@0: /** Optimised current thread function which can only be called from places where sl@0: CPU migration is not possible - i.e. with interrupts disabled or preemption sl@0: disabled. sl@0: sl@0: @internalComponent sl@0: */ sl@0: extern "C" NThread* NCurrentThreadL(); sl@0: sl@0: /** @internalComponent */ sl@0: inline TBool CheckCpuAgainstAffinity(TInt aCpu, TUint32 aAffinity) sl@0: { sl@0: if (aAffinity & NTHREADBASE_CPU_AFFINITY_MASK) sl@0: return aAffinity & (1<HeldByCurrentThread()); sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: #define __ASSERT_SYSTEM_LOCK __NK_ASSERT_DEBUG(TScheduler::Ptr()->iLock.HeldByCurrentThread()); sl@0: sl@0: #define __ASSERT_NOT_ISR __NK_ASSERT_DEBUG(NKern::CurrentContext()!=NKern::EInterrupt) sl@0: sl@0: #else sl@0: #define __ASSERT_NO_FAST_MUTEX sl@0: #define __ASSERT_FAST_MUTEX(m) sl@0: #define __ASSERT_SYSTEM_LOCK sl@0: #define __ASSERT_NOT_ISR sl@0: #endif sl@0: sl@0: /******************************************** sl@0: * System timer queue sl@0: ********************************************/ sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: class NTimerQ sl@0: { sl@0: friend class NTimer; sl@0: public: sl@0: typedef void (*TDebugFn)(TAny* aPtr, TInt aPos); /**< @internalComponent */ sl@0: enum { ETimerQMask=31, ENumTimerQueues=32 }; /**< @internalComponent */ // these are not easily modifiable sl@0: sl@0: /** @internalComponent */ sl@0: struct STimerQ sl@0: { sl@0: SDblQue iIntQ; sl@0: SDblQue iDfcQ; sl@0: }; sl@0: public: sl@0: NTimerQ(); sl@0: static void Init1(TInt aTickPeriod); sl@0: static void Init3(TDfcQue* aDfcQ); sl@0: IMPORT_C static TAny* TimerAddress(); sl@0: IMPORT_C void Tick(); sl@0: IMPORT_C static TInt IdleTime(); sl@0: IMPORT_C static void Advance(TInt aTicks); sl@0: private: sl@0: static void DfcFn(TAny* aPtr); sl@0: void Dfc(); sl@0: void Add(NTimer* aTimer); sl@0: void AddFinal(NTimer* aTimer); sl@0: public: sl@0: STimerQ iTickQ[ENumTimerQueues]; /**< @internalComponent */ // NOTE: the order of member data is important sl@0: TUint32 iPresent; /**< @internalComponent */ // The assembler code relies on it sl@0: TUint32 iMsCount; /**< @internalComponent */ sl@0: SDblQue iHoldingQ; /**< @internalComponent */ sl@0: SDblQue iOrderedQ; /**< @internalComponent */ sl@0: SDblQue iCompletedQ; /**< @internalComponent */ sl@0: TDfc iDfc; /**< @internalComponent */ sl@0: TUint8 iTransferringCancelled; /**< @internalComponent */ sl@0: TUint8 iCriticalCancelled; /**< @internalComponent */ sl@0: TUint8 iPad1; /**< @internalComponent */ sl@0: TUint8 iPad2; /**< @internalComponent */ sl@0: TDebugFn iDebugFn; /**< @internalComponent */ sl@0: TAny* iDebugPtr; /**< @internalComponent */ sl@0: TInt iTickPeriod; /**< @internalComponent */ // in microseconds sl@0: sl@0: /** sl@0: This member is intended for use by ASSP/variant interrupt code as a convenient sl@0: location to store rounding error information where hardware interrupts are not sl@0: exactly one millisecond. The Symbian kernel does not make any use of this member. sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: TInt iRounding; sl@0: TInt iDfcCompleteCount; /**< @internalComponent */ sl@0: TSpinLock iTimerSpinLock; /**< @internalComponent */ sl@0: }; sl@0: sl@0: __ASSERT_COMPILE(!(_FOFF(NTimerQ,iTimerSpinLock)&7)); sl@0: sl@0: sl@0: GLREF_D NTimerQ TheTimerQ; sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: inline TUint32 NTickCount() sl@0: {return TheTimerQ.iMsCount;} sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: inline TInt NTickPeriod() sl@0: {return TheTimerQ.iTickPeriod;} sl@0: sl@0: sl@0: extern "C" { sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: extern void NKCrashHandler(TInt aPhase, const TAny* a0, TInt a1); sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: extern TUint32 CrashState; sl@0: } sl@0: sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: class TGenIPIList : public SDblQue sl@0: { sl@0: public: sl@0: TGenIPIList(); sl@0: public: sl@0: TSpinLock iGenIPILock; sl@0: }; sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: class TCancelIPI : public TGenericIPI sl@0: { sl@0: public: sl@0: void Send(TDfc* aDfc, TInt aCpu); sl@0: static void Isr(TGenericIPI*); sl@0: public: sl@0: TDfc* volatile iDfc; sl@0: }; sl@0: sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: TBool InterruptsStatus(TBool aRequest); sl@0: sl@0: sl@0: //declarations for the checking of kernel preconditions sl@0: sl@0: /** sl@0: @internalComponent sl@0: sl@0: PRECOND_FUNCTION_CALLER is needed for __ASSERT_WITH_MESSAGE_ALWAYS(), sl@0: so is outside the #ifdef _DEBUG. sl@0: */ sl@0: #ifndef PRECOND_FUNCTION_CALLER sl@0: #define PRECOND_FUNCTION_CALLER 0 sl@0: #endif sl@0: sl@0: #ifdef _DEBUG sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: #define MASK_NO_FAST_MUTEX 0x1 sl@0: #define MASK_CRITICAL 0x2 sl@0: #define MASK_NO_CRITICAL 0x4 sl@0: #define MASK_KERNEL_LOCKED 0x8 sl@0: #define MASK_KERNEL_UNLOCKED 0x10 sl@0: #define MASK_KERNEL_LOCKED_ONCE 0x20 sl@0: #define MASK_INTERRUPTS_ENABLED 0x40 sl@0: #define MASK_INTERRUPTS_DISABLED 0x80 sl@0: #define MASK_SYSTEM_LOCKED 0x100 sl@0: #define MASK_NOT_ISR 0x400 sl@0: #define MASK_NOT_IDFC 0x800 sl@0: #define MASK_NOT_THREAD 0x1000 sl@0: #define MASK_NO_CRITICAL_IF_USER 0x2000 sl@0: #define MASK_THREAD_STANDARD ( MASK_NO_FAST_MUTEX | MASK_KERNEL_UNLOCKED | MASK_INTERRUPTS_ENABLED | MASK_NOT_ISR | MASK_NOT_IDFC ) sl@0: #define MASK_THREAD_CRITICAL ( MASK_THREAD_STANDARD | MASK_CRITICAL ) sl@0: #define MASK_ALWAYS_FAIL 0x4000 sl@0: #define MASK_NO_RESCHED 0x8000 sl@0: sl@0: #if defined(__STANDALONE_NANOKERNEL__) || (!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__)) sl@0: #define CHECK_PRECONDITIONS(mask,function) sl@0: #define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function) sl@0: sl@0: #else sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: extern "C" TInt CheckPreconditions(TUint32 aConditionMask, const char* aFunction, TLinAddr aAddr); sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: #define CHECK_PRECONDITIONS(mask,function) CheckPreconditions(mask,function,PRECOND_FUNCTION_CALLER) sl@0: sl@0: #ifdef __KERNEL_APIS_CONTEXT_CHECKS_FAULT__ sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: #define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function) \ sl@0: __ASSERT_DEBUG( (cond), ( \ sl@0: DEBUGPRINT("Assertion failed: %s\nFunction: %s; called from: %08x\n",message,function,PRECOND_FUNCTION_CALLER),\ sl@0: NKFault(function, 0))) sl@0: sl@0: #else//!__KERNEL_APIS_CONTEXT_CHECKS_FAULT__ sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: #define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function) \ sl@0: __ASSERT_DEBUG( (cond), \ sl@0: DEBUGPRINT("Assertion failed: %s\nFunction: %s; called from: %08x\n",message,function,PRECOND_FUNCTION_CALLER)) sl@0: sl@0: sl@0: #endif//__KERNEL_APIS_CONTEXT_CHECKS_FAULT__ sl@0: #endif//(!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__)) sl@0: sl@0: #else//if !DEBUG sl@0: sl@0: #define CHECK_PRECONDITIONS(mask,function) sl@0: #define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function ) sl@0: sl@0: #endif//_DEBUG sl@0: sl@0: #if (!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__)) sl@0: #define __ASSERT_WITH_MESSAGE_ALWAYS(cond,message,function ) sl@0: #else sl@0: #ifdef __KERNEL_APIS_CONTEXT_CHECKS_FAULT__ sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: #define __ASSERT_WITH_MESSAGE_ALWAYS(cond,message,function) \ sl@0: __ASSERT_ALWAYS( (cond), ( \ sl@0: DEBUGPRINT("Assertion failed: %s\nFunction: %s; called from: %08x\n",message,function,PRECOND_FUNCTION_CALLER),\ sl@0: NKFault(function, 0))) sl@0: #else sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: #define __ASSERT_WITH_MESSAGE_ALWAYS(cond,message,function) \ sl@0: __ASSERT_ALWAYS( (cond), \ sl@0: DEBUGPRINT("Assertion failed: %s\nFunction: %s; called from: %08x\n",message,function,PRECOND_FUNCTION_CALLER)) sl@0: #endif//__KERNEL_APIS_CONTEXT_CHECKS_FAULT__ sl@0: #endif//(!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__)) sl@0: sl@0: #endif