sl@0: // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\include\nkern\nkern.h sl@0: // sl@0: // WARNING: This file contains some APIs which are internal and are subject sl@0: // to change without notice. Such APIs should therefore not be used sl@0: // outside the Kernel and Hardware Services package. sl@0: // sl@0: sl@0: #ifndef __NKERN_H__ sl@0: #define __NKERN_H__ sl@0: sl@0: #ifdef __STANDALONE_NANOKERNEL__ sl@0: #undef __IN_KERNEL__ sl@0: #define __IN_KERNEL__ sl@0: #endif sl@0: sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: sl@0: extern "C" { sl@0: /** @internalComponent */ sl@0: IMPORT_C void NKFault(const char* file, TInt line); sl@0: /** @internalComponent */ sl@0: void NKIdle(TInt aStage); sl@0: } sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: #define FAULT() NKFault(__FILE__,__LINE__) sl@0: sl@0: #ifdef _DEBUG sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: #define __NK_ASSERT_DEBUG(c) ((void) ((c)||(FAULT(),0)) ) sl@0: sl@0: #else sl@0: sl@0: #define __NK_ASSERT_DEBUG(c) sl@0: sl@0: #endif sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: #define __NK_ASSERT_ALWAYS(c) ((void) ((c)||(FAULT(),0)) ) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: const TInt KNumPriorities=64; sl@0: sl@0: const TInt KMaxCpus=8; sl@0: sl@0: class NThread; sl@0: sl@0: sl@0: /** Spin lock sl@0: sl@0: Used for protecting a code fragment against both interrupts and concurrent sl@0: execution on another processor. sl@0: sl@0: @internalComponent sl@0: */ sl@0: class TSpinLock sl@0: { sl@0: public: sl@0: enum TOrder sl@0: { sl@0: // Bit 7 of order clear for locks used with interrupts disabled sl@0: EOrderGenericIrqLow0 =0x00u, // Device driver spin locks, low range sl@0: EOrderGenericIrqLow1 =0x01u, // Device driver spin locks, low range sl@0: EOrderGenericIrqLow2 =0x02u, // Device driver spin locks, low range sl@0: EOrderGenericIrqLow3 =0x03u, // Device driver spin locks, low range sl@0: EOrderGenericIrqHigh0 =0x18u, // Device driver spin locks, high range sl@0: EOrderGenericIrqHigh1 =0x19u, // Device driver spin locks, high range sl@0: EOrderGenericIrqHigh2 =0x1Au, // Device driver spin locks, high range sl@0: EOrderGenericIrqHigh3 =0x1Bu, // Device driver spin locks, high range sl@0: sl@0: // Bit 7 of order set for locks used with interrupts enabled, preemption disabled sl@0: EOrderGenericPreLow0 =0x80u, // Device driver spin locks, low range sl@0: EOrderGenericPreLow1 =0x81u, // Device driver spin locks, low range sl@0: EOrderGenericPreHigh0 =0x9Eu, // Device driver spin locks, high range sl@0: EOrderGenericPreHigh1 =0x9Fu, // Device driver spin locks, high range sl@0: sl@0: EOrderNone =0xFFu // No order check required (e.g. for dynamic ordering) sl@0: }; sl@0: public: sl@0: IMPORT_C TSpinLock(TUint aOrder); sl@0: private: sl@0: volatile TUint64 iLock; sl@0: }; sl@0: sl@0: /** Macro to disable interrupts and acquire the lock. sl@0: sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_LOCK_IRQ(lock) ((void)NKern::DisableAllInterrupts()) sl@0: sl@0: /** Macro to release the lock and enable interrupts. sl@0: sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_UNLOCK_IRQ(lock) (NKern::EnableAllInterrupts()) sl@0: sl@0: /** Macro to see if someone else is waiting for the lock, enabling IRQs sl@0: then disabling IRQs again. sl@0: sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_FLASH_IRQ(lock) (NKern::EnableAllInterrupts(),(void)NKern::DisableAllInterrupts(),((TBool)TRUE)) sl@0: sl@0: /** Macro to remember original interrupt state then disable interrupts sl@0: and acquire the lock. sl@0: sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_LOCK_IRQSAVE(lock) (NKern::DisableAllInterrupts()) sl@0: sl@0: /** Macro to release the lock then restore original interrupt state to that sl@0: supplied. sl@0: sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_UNLOCK_IRQRESTORE(lock,irq) (NKern::RestoreInterrupts(irq)) sl@0: sl@0: /** Macro to see if someone else is waiting for the lock, enabling IRQs to sl@0: the original state supplied then disabling IRQs again. sl@0: sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_FLASH_IRQRESTORE(lock,irq) (NKern::RestoreInterrupts(irq),((void)NKern::DisableAllInterrupts()),((TBool)TRUE)) sl@0: sl@0: /** Macro to acquire the lock. This assumes the caller has already disabled sl@0: interrupts/preemption. sl@0: sl@0: If interrupts/preemption is not disabled a run-time assert will occur sl@0: This is to protect against unsafe code that might lead to same core sl@0: deadlock. sl@0: sl@0: In device driver code it is safer to use __SPIN_LOCK_IRQSAVE() instead, sl@0: although not as efficient should interrupts aleady be disabled for the sl@0: duration the lock is held. sl@0: sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_LOCK(lock) sl@0: sl@0: /** Macro to release the lock, don't change interrupt/preemption state. sl@0: sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_UNLOCK(lock) sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: #define __SPIN_FLASH(lock) ((TBool)FALSE) sl@0: sl@0: /** Macro to see if someone else is waiting for the lock, enabling preemption sl@0: then disabling it again. sl@0: sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_FLASH_PREEMPT(lock) ((TBool)NKern::PreemptionPoint()) sl@0: sl@0: sl@0: /** Read/Write Spin lock sl@0: sl@0: @internalComponent sl@0: */ sl@0: class TRWSpinLock sl@0: { sl@0: public: sl@0: IMPORT_C TRWSpinLock(TUint aOrder); // Uses same order space as TSpinLock sl@0: private: sl@0: volatile TUint64 iLock; sl@0: }; sl@0: sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_LOCK_IRQ_R(lock) ((void)NKern::DisableAllInterrupts()) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_UNLOCK_IRQ_R(lock) (NKern::EnableAllInterrupts()) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_FLASH_IRQ_R(lock) (NKern::EnableAllInterrupts(),(void)NKern::DisableAllInterrupts(),((TBool)TRUE)) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_LOCK_IRQ_W(lock) ((void)NKern::DisableAllInterrupts()) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_UNLOCK_IRQ_W(lock) (NKern::EnableAllInterrupts()) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_FLASH_IRQ_W(lock) (NKern::EnableAllInterrupts(),(void)NKern::DisableAllInterrupts(),((TBool)TRUE)) sl@0: sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_LOCK_R(lock) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_UNLOCK_R(lock) sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: #define __SPIN_FLASH_R(lock) ((TBool)FALSE) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_LOCK_W(lock) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_UNLOCK_W(lock) sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: #define __SPIN_FLASH_W(lock) ((TBool)FALSE) sl@0: sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_LOCK_IRQSAVE_R(lock) (NKern::DisableAllInterrupts()) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_UNLOCK_IRQRESTORE_R(lock,irq) (NKern::RestoreInterrupts(irq)) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_FLASH_IRQRESTORE_R(lock,irq) (NKern::RestoreInterrupts(irq),((void)NKern::DisableAllInterrupts()),((TBool)TRUE)) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_LOCK_IRQSAVE_W(lock) (NKern::DisableAllInterrupts()) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_UNLOCK_IRQRESTORE_W(lock,irq) (NKern::RestoreInterrupts(irq)) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_FLASH_IRQRESTORE_W(lock,irq) (NKern::RestoreInterrupts(irq),((void)NKern::DisableAllInterrupts()),((TBool)TRUE)) sl@0: sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_FLASH_PREEMPT_R(lock) ((TBool)NKern::PreemptionPoint()) sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: #define __SPIN_FLASH_PREEMPT_W(lock) ((TBool)NKern::PreemptionPoint()) sl@0: sl@0: sl@0: /** Nanokernel fast semaphore sl@0: sl@0: A light-weight semaphore class that only supports a single waiting thread, sl@0: suitable for the Symbian OS thread I/O semaphore. sl@0: sl@0: Initialising a NFastSemaphore involves two steps: sl@0: sl@0: - Constructing the semaphore sl@0: - Setting the semaphore owning thread (the one allowed to wait on it) sl@0: sl@0: For example, creating one for the current thread to wait on: sl@0: sl@0: @code sl@0: NFastSemaphore sem; sl@0: sem.iOwningThread = NKern::CurrentThread(); sl@0: @endcode sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: class NFastSemaphore sl@0: { sl@0: public: sl@0: inline NFastSemaphore(); sl@0: inline NFastSemaphore(NThreadBase* aThread); sl@0: IMPORT_C void SetOwner(NThreadBase* aThread); sl@0: IMPORT_C void Wait(); sl@0: IMPORT_C void Signal(); sl@0: IMPORT_C void SignalN(TInt aCount); sl@0: IMPORT_C void Reset(); sl@0: void WaitCancel(); sl@0: public: sl@0: TInt iCount; /**< @internalComponent */ sl@0: sl@0: /** The thread allowed to wait on the semaphore sl@0: @internalComponent sl@0: */ sl@0: NThreadBase* iOwningThread; sl@0: }; sl@0: sl@0: /** Create a fast semaphore sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: inline NFastSemaphore::NFastSemaphore() sl@0: : iCount(0), iOwningThread(NULL) sl@0: {} sl@0: sl@0: /** Nanokernel fast mutex sl@0: sl@0: A light-weight priority-inheritance mutex that can be used if the following sl@0: conditions apply: sl@0: sl@0: - Threads that hold the mutex never block. sl@0: - The mutex is never acquired in a nested fashion sl@0: sl@0: If either of these conditions is not met, a DMutex object is more appropriate. sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: class NFastMutex sl@0: { sl@0: public: sl@0: IMPORT_C NFastMutex(); sl@0: IMPORT_C void Wait(); sl@0: IMPORT_C void Signal(); sl@0: IMPORT_C TBool HeldByCurrentThread(); /**< @internalComponent */ sl@0: public: sl@0: NThreadBase* iHoldingThread; /**< @internalComponent */ sl@0: sl@0: /** MUST ALWAYS BE 0 or 1 sl@0: @internalComponent sl@0: */ sl@0: TInt iWaiting; sl@0: }; sl@0: sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: sl@0: The type of the callback function used by the nanokernel timer. sl@0: sl@0: @see NTimer sl@0: */ sl@0: typedef void (*NTimerFn)(TAny*); sl@0: sl@0: sl@0: sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: sl@0: A basic relative timer provided by the nanokernel. sl@0: sl@0: It can generate either a one-shot interrupt or periodic interrupts. sl@0: sl@0: A timeout handler is called when the timer expires, either: sl@0: - from the timer ISR - if the timer is queued via OneShot(TInt aTime) or OneShot(TInt aTime, TBool EFalse), or sl@0: - from the nanokernel timer dfc1 thread - if the timer is queued via OneShot(TInt aTime, TBool ETrue) call, or sl@0: - from any other dfc thread that provided DFC belongs to - if the timer is queued via OneShot(TInt aTime, TDfc& aDfc) call. sl@0: Call-back mechanism cannot be changed in the life time of a timer. sl@0: sl@0: These timer objects may be manipulated from any context. sl@0: The timers are driven from a periodic system tick interrupt, sl@0: usually a 1ms period. sl@0: sl@0: @see NTimerFn sl@0: */ sl@0: class NTimer : public SDblQueLink sl@0: { sl@0: public: sl@0: /** sl@0: Default constructor. sl@0: */ sl@0: inline NTimer() sl@0: : iState(EIdle) sl@0: {} sl@0: /** sl@0: Constructor taking a callback function and a pointer to be passed sl@0: to the callback function. sl@0: sl@0: @param aFunction The callback function. sl@0: @param aPtr A pointer to be passed to the callback function sl@0: when called. sl@0: */ sl@0: inline NTimer(NTimerFn aFunction, TAny* aPtr) sl@0: : iPtr(aPtr), iFunction(aFunction), iState(EIdle) sl@0: {} sl@0: IMPORT_C TInt OneShot(TInt aTime); sl@0: IMPORT_C TInt OneShot(TInt aTime, TBool aDfc); sl@0: IMPORT_C TInt OneShot(TInt aTime, TDfc& aDfc); sl@0: IMPORT_C TInt Again(TInt aTime); sl@0: IMPORT_C TBool Cancel(); sl@0: IMPORT_C TBool IsPending(); sl@0: public: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: enum TState sl@0: { sl@0: EIdle=0, // not queued sl@0: ETransferring=1, // being transferred from holding to ordered queue sl@0: EHolding=2, // on holding queue sl@0: EOrdered=3, // on ordered queue sl@0: ECritical=4, // on ordered queue and in use by queue walk routine sl@0: EFinal=5, // on final queue sl@0: }; sl@0: public: sl@0: /** Argument for callback function or the pointer to TDfc */ sl@0: TAny* iPtr; /**< @internalComponent */ sl@0: sl@0: /** Pointer to callback function. NULL value indicates that queuing of provided Dfc queue will be done sl@0: instead of calling callback function on completion */ sl@0: NTimerFn iFunction; /**< @internalComponent */ sl@0: sl@0: TUint32 iTriggerTime; /**< @internalComponent */ sl@0: TUint8 iCompleteInDfc; /**< @internalComponent */ sl@0: TUint8 iState; /**< @internalComponent */ sl@0: TUint8 iPad1; /**< @internalComponent */ sl@0: sl@0: /** Available for timer client to use. sl@0: @internalTechnology */ sl@0: TUint8 iUserFlags; sl@0: }; sl@0: sl@0: /** sl@0: @internalTechnology sl@0: */ sl@0: #define i_NTimer_iUserFlags iUserFlags sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: #define i_NTimer_iState iState sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: typedef void (*NThreadFunction)(TAny*); sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: typedef TDfc* (*NThreadExitHandler)(NThread*); sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: typedef void (*NThreadStateHandler)(NThread*,TInt,TInt); sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: typedef void (*NThreadExceptionHandler)(TAny*,NThread*); sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: typedef void (*NThreadTimeoutHandler)(NThread*,TInt); sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: struct SNThreadHandlers sl@0: { sl@0: NThreadExitHandler iExitHandler; sl@0: NThreadStateHandler iStateHandler; sl@0: NThreadExceptionHandler iExceptionHandler; sl@0: NThreadTimeoutHandler iTimeoutHandler; sl@0: }; sl@0: sl@0: /** @internalComponent */ sl@0: extern void NThread_Default_State_Handler(NThread*, TInt, TInt); sl@0: sl@0: /** @internalComponent */ sl@0: extern void NThread_Default_Exception_Handler(TAny*, NThread*); sl@0: sl@0: /** @internalComponent */ sl@0: #define NTHREAD_DEFAULT_EXIT_HANDLER ((NThreadExitHandler)0) sl@0: sl@0: /** @internalComponent */ sl@0: #define NTHREAD_DEFAULT_STATE_HANDLER (&NThread_Default_State_Handler) sl@0: sl@0: /** @internalComponent */ sl@0: #define NTHREAD_DEFAULT_EXCEPTION_HANDLER (&NThread_Default_Exception_Handler) sl@0: sl@0: /** @internalComponent */ sl@0: #define NTHREAD_DEFAULT_TIMEOUT_HANDLER ((NThreadTimeoutHandler)0) sl@0: sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: struct SFastExecTable sl@0: { sl@0: TInt iFastExecCount; // includes implicit function#0 sl@0: TLinAddr iFunction[1]; // first entry is for call number 1 sl@0: }; sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: const TUint32 KExecFlagClaim=0x80000000; // claim system lock sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: const TUint32 KExecFlagRelease=0x40000000; // release system lock sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: const TUint32 KExecFlagPreprocess=0x20000000; // preprocess sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: const TUint32 KExecFlagExtraArgMask=0x1C000000; // 3 bits indicating additional arguments sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: const TUint32 KExecFlagExtraArgs2=0x04000000; // 2 additional arguments sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: const TUint32 KExecFlagExtraArgs3=0x08000000; // 3 additional arguments sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: const TUint32 KExecFlagExtraArgs4=0x0C000000; // 4 additional arguments sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: const TUint32 KExecFlagExtraArgs5=0x10000000; // 5 additional arguments sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: const TUint32 KExecFlagExtraArgs6=0x14000000; // 6 additional arguments sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: const TUint32 KExecFlagExtraArgs7=0x18000000; // 7 additional arguments sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: const TUint32 KExecFlagExtraArgs8=0x1C000000; // 8 additional arguments sl@0: sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: struct SSlowExecEntry sl@0: { sl@0: TUint32 iFlags; // information about call sl@0: TLinAddr iFunction; // address of function to be called sl@0: }; sl@0: sl@0: sl@0: /** sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: struct SSlowExecTable sl@0: { sl@0: TInt iSlowExecCount; sl@0: TLinAddr iInvalidExecHandler; // used if call number invalid sl@0: TLinAddr iPreprocessHandler; // used for handle lookups sl@0: SSlowExecEntry iEntries[1]; // first entry is for call number 0 sl@0: }; sl@0: sl@0: // Thread iAttributes Constants sl@0: const TUint8 KThreadAttImplicitSystemLock=1; /**< @internalComponent */ sl@0: const TUint8 KThreadAttAddressSpace=2; /**< @internalComponent */ sl@0: const TUint8 KThreadAttLoggable=4; /**< @internalComponent */ sl@0: const TUint8 KThreadAttDelayed=8; /**< @internalComponent */ sl@0: sl@0: sl@0: // Thread CPU sl@0: const TUint32 KCpuAffinityAny=0xffffffffu; /**< @internalComponent */ sl@0: sl@0: /** Information needed for creating a nanothread. sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: struct SNThreadCreateInfo sl@0: { sl@0: NThreadFunction iFunction; sl@0: TAny* iStackBase; sl@0: TInt iStackSize; sl@0: TInt iPriority; sl@0: TInt iTimeslice; sl@0: TUint8 iAttributes; sl@0: TUint32 iCpuAffinity; sl@0: const SNThreadHandlers* iHandlers; sl@0: const SFastExecTable* iFastExecTable; sl@0: const SSlowExecTable* iSlowExecTable; sl@0: const TUint32* iParameterBlock; sl@0: TInt iParameterBlockSize; // if zero, iParameterBlock _is_ the initial data sl@0: // otherwise it points to n bytes of initial data sl@0: }; sl@0: sl@0: /** Constant for use with NKern:: functions which release a fast mutex as well sl@0: as performing some other operations. sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: #define SYSTEM_LOCK (NFastMutex*)0 sl@0: sl@0: sl@0: /** Idle handler function sl@0: Pointer to a function which is called whenever a CPU goes idle sl@0: sl@0: @param aPtr The iPtr stored in the SCpuIdleHandler structure sl@0: @param aStage If positive, the number of processors still active sl@0: If zero, indicates all processors are now idle sl@0: -1 indicates that postamble processing is required after waking up sl@0: sl@0: @internalComponent sl@0: */ sl@0: typedef void (*TCpuIdleHandlerFn)(TAny* aPtr, TInt aStage); sl@0: sl@0: /** Idle handler structure sl@0: sl@0: @internalComponent sl@0: */ sl@0: struct SCpuIdleHandler sl@0: { sl@0: TCpuIdleHandlerFn iHandler; sl@0: TAny* iPtr; sl@0: volatile TBool iPostambleRequired; sl@0: }; sl@0: sl@0: sl@0: /** sl@0: @internalComponent sl@0: */ sl@0: enum TUserModeCallbackReason sl@0: { sl@0: EUserModeCallbackRun, sl@0: EUserModeCallbackCancel, sl@0: }; sl@0: sl@0: sl@0: /** sl@0: A callback function executed when a thread returns to user mode. sl@0: sl@0: @internalComponent sl@0: */ sl@0: typedef void (*TUserModeCallbackFunc)(TAny* aThisPtr, TUserModeCallbackReason aReasonCode); sl@0: sl@0: sl@0: /** sl@0: An object representing a queued callback to be executed when a thread returns to user mode. sl@0: sl@0: @internalComponent sl@0: */ sl@0: class TUserModeCallback sl@0: { sl@0: public: sl@0: TUserModeCallback(TUserModeCallbackFunc); sl@0: ~TUserModeCallback(); sl@0: sl@0: public: sl@0: TUserModeCallback* volatile iNext; sl@0: TUserModeCallbackFunc iFunc; sl@0: }; sl@0: sl@0: TUserModeCallback* const KUserModeCallbackUnqueued = ((TUserModeCallback*)1); sl@0: sl@0: sl@0: /** Nanokernel functions sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: class NKern sl@0: { sl@0: public: sl@0: /** Bitmask values used when blocking a nanothread. sl@0: @see NKern::Block() sl@0: */ sl@0: enum TBlockMode sl@0: { sl@0: EEnterCS=1, /**< Enter thread critical section before blocking */ sl@0: ERelease=2, /**< Release specified fast mutex before blocking */ sl@0: EClaim=4, /**< Re-acquire specified fast mutex when unblocked */ sl@0: EObstruct=8, /**< Signifies obstruction of thread rather than lack of work to do */ sl@0: }; sl@0: sl@0: /** Values that specify the context of the processor. sl@0: @see NKern::CurrentContext() sl@0: */ sl@0: enum TContext sl@0: { sl@0: EThread=0, /**< The processor is in a thread context*/ sl@0: EIDFC=1, /**< The processor is in an IDFC context*/ sl@0: EInterrupt=2, /**< The processor is in an interrupt context*/ sl@0: EEscaped=KMaxTInt /**< Not valid a process context on target hardware*/ sl@0: }; sl@0: sl@0: public: sl@0: // Threads sl@0: IMPORT_C static TInt ThreadCreate(NThread* aThread, SNThreadCreateInfo& anInfo); sl@0: IMPORT_C static TBool ThreadSuspend(NThread* aThread, TInt aCount); sl@0: IMPORT_C static TBool ThreadResume(NThread* aThread); sl@0: IMPORT_C static TBool ThreadResume(NThread* aThread, NFastMutex* aMutex); sl@0: IMPORT_C static TBool ThreadForceResume(NThread* aThread); sl@0: IMPORT_C static TBool ThreadForceResume(NThread* aThread, NFastMutex* aMutex); sl@0: IMPORT_C static void ThreadRelease(NThread* aThread, TInt aReturnValue); sl@0: IMPORT_C static void ThreadRelease(NThread* aThread, TInt aReturnValue, NFastMutex* aMutex); sl@0: IMPORT_C static void ThreadSetPriority(NThread* aThread, TInt aPriority); sl@0: IMPORT_C static void ThreadSetPriority(NThread* aThread, TInt aPriority, NFastMutex* aMutex); sl@0: IMPORT_C static void ThreadRequestSignal(NThread* aThread); sl@0: IMPORT_C static void ThreadRequestSignal(NThread* aThread, NFastMutex* aMutex); sl@0: IMPORT_C static void ThreadRequestSignal(NThread* aThread, TInt aCount); sl@0: IMPORT_C static void ThreadKill(NThread* aThread); sl@0: IMPORT_C static void ThreadKill(NThread* aThread, NFastMutex* aMutex); sl@0: IMPORT_C static void ThreadEnterCS(); sl@0: IMPORT_C static void ThreadLeaveCS(); sl@0: static NThread* _ThreadEnterCS(); /**< @internalComponent */ sl@0: static void _ThreadLeaveCS(); /**< @internalComponent */ sl@0: IMPORT_C static TInt Block(TUint32 aTimeout, TUint aMode, NFastMutex* aMutex); sl@0: IMPORT_C static TInt Block(TUint32 aTimeout, TUint aMode); sl@0: IMPORT_C static void NanoBlock(TUint32 aTimeout, TUint aState, TAny* aWaitObj); sl@0: IMPORT_C static void ThreadGetUserContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask); sl@0: IMPORT_C static void ThreadSetUserContext(NThread* aThread, TAny* aContext); sl@0: IMPORT_C static void ThreadGetSystemContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask); sl@0: static void ThreadModifyUsp(NThread* aThread, TLinAddr aUsp); sl@0: IMPORT_C static TInt FreezeCpu(); /**< @internalComponent */ sl@0: IMPORT_C static void EndFreezeCpu(TInt aCookie); /**< @internalComponent */ sl@0: IMPORT_C static TUint32 ThreadSetCpuAffinity(NThread* aThread, TUint32 aAffinity); /**< @internalComponent */ sl@0: IMPORT_C static void ThreadSetTimeslice(NThread* aThread, TInt aTimeslice); /**< @internalComponent */ sl@0: IMPORT_C static TUint64 ThreadCpuTime(NThread* aThread); /**< @internalComponent */ sl@0: IMPORT_C static TUint32 CpuTimeMeasFreq(); /**< @internalComponent */ sl@0: static TInt QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback); /**< @internalComponent */ sl@0: static void MoveUserModeCallbacks(NThreadBase* aSrcThread, NThreadBase* aDestThread); /**< @internalComponent */ sl@0: static void CancelUserModeCallbacks(); /**< @internalComponent */ sl@0: sl@0: // Fast semaphores sl@0: IMPORT_C static void FSSetOwner(NFastSemaphore* aSem,NThreadBase* aThread); sl@0: IMPORT_C static void FSWait(NFastSemaphore* aSem); sl@0: IMPORT_C static void FSSignal(NFastSemaphore* aSem); sl@0: IMPORT_C static void FSSignal(NFastSemaphore* aSem, NFastMutex* aMutex); sl@0: IMPORT_C static void FSSignalN(NFastSemaphore* aSem, TInt aCount); sl@0: IMPORT_C static void FSSignalN(NFastSemaphore* aSem, TInt aCount, NFastMutex* aMutex); sl@0: sl@0: // Fast mutexes sl@0: IMPORT_C static void FMWait(NFastMutex* aMutex); sl@0: IMPORT_C static void FMSignal(NFastMutex* aMutex); sl@0: IMPORT_C static TBool FMFlash(NFastMutex* aMutex); sl@0: sl@0: // Scheduler sl@0: IMPORT_C static void Lock(); sl@0: IMPORT_C static NThread* LockC(); sl@0: IMPORT_C static void Unlock(); sl@0: IMPORT_C static TInt PreemptionPoint(); sl@0: sl@0: // Interrupts sl@0: IMPORT_C static TInt DisableAllInterrupts(); sl@0: IMPORT_C static TInt DisableInterrupts(TInt aLevel); sl@0: IMPORT_C static void RestoreInterrupts(TInt aRestoreData); sl@0: IMPORT_C static void EnableAllInterrupts(); sl@0: sl@0: // Read-modify-write sl@0: inline static TInt LockedInc(TInt& aCount) sl@0: { return __e32_atomic_add_ord32(&aCount,1); } sl@0: inline static TInt LockedDec(TInt& aCount) sl@0: { return __e32_atomic_add_ord32(&aCount,0xffffffff); } sl@0: inline static TInt LockedAdd(TInt& aDest, TInt aSrc) sl@0: { return __e32_atomic_add_ord32(&aDest,aSrc); } sl@0: inline static TInt64 LockedInc(TInt64& aCount) sl@0: { return __e32_atomic_add_ord64(&aCount,1); } sl@0: inline static TInt64 LockedDec(TInt64& aCount) sl@0: { return __e32_atomic_add_ord64(&aCount,TUint64(TInt64(-1))); } sl@0: inline static TInt64 LockedAdd(TInt64& aDest, TInt64 aSrc) /**< @internalComponent */ sl@0: { return __e32_atomic_add_ord64(&aDest,aSrc); } sl@0: inline static TUint32 LockedSetClear(TUint32& aDest, TUint32 aClearMask, TUint32 aSetMask) sl@0: { return __e32_atomic_axo_ord32(&aDest,~(aClearMask|aSetMask),aSetMask); } sl@0: inline static TUint16 LockedSetClear16(TUint16& aDest, TUint16 aClearMask, TUint16 aSetMask) /**< @internalComponent */ sl@0: { return __e32_atomic_axo_ord16(&aDest,TUint16(~(aClearMask|aSetMask)),aSetMask); } sl@0: inline static TUint8 LockedSetClear8(TUint8& aDest, TUint8 aClearMask, TUint8 aSetMask) sl@0: { return __e32_atomic_axo_ord8(&aDest,TUint8(~(aClearMask|aSetMask)),aSetMask); } sl@0: inline static TInt SafeInc(TInt& aCount) sl@0: { return __e32_atomic_tas_ord32(&aCount,1,1,0); } sl@0: inline static TInt SafeDec(TInt& aCount) sl@0: { return __e32_atomic_tas_ord32(&aCount,1,-1,0); } sl@0: inline static TInt AddIfGe(TInt& aCount, TInt aLimit, TInt aInc) /**< @internalComponent */ sl@0: { return __e32_atomic_tas_ord32(&aCount,aLimit,aInc,0); } sl@0: inline static TInt AddIfLt(TInt& aCount, TInt aLimit, TInt aInc) /**< @internalComponent */ sl@0: { return __e32_atomic_tas_ord32(&aCount,aLimit,0,aInc); } sl@0: inline static TAny* SafeSwap(TAny* aNewValue, TAny*& aPtr) sl@0: { return __e32_atomic_swp_ord_ptr(&aPtr, aNewValue); } sl@0: inline static TUint8 SafeSwap8(TUint8 aNewValue, TUint8& aPtr) sl@0: { return __e32_atomic_swp_ord8(&aPtr, aNewValue); } sl@0: inline static TUint16 SafeSwap16(TUint16 aNewValue, TUint16& aPtr) /**< @internalComponent */ sl@0: { return __e32_atomic_swp_ord16(&aPtr, aNewValue); } sl@0: inline static TBool CompareAndSwap(TAny*& aPtr, TAny* aExpected, TAny* aNew) /**< @internalComponent */ sl@0: { return __e32_atomic_cas_ord_ptr(&aPtr, &aExpected, aNew); } sl@0: inline static TBool CompareAndSwap8(TUint8& aPtr, TUint8 aExpected, TUint8 aNew) /**< @internalComponent */ sl@0: { return __e32_atomic_cas_ord8(&aPtr, (TUint8*)&aExpected, (TUint8)aNew); } sl@0: inline static TBool CompareAndSwap16(TUint16& aPtr, TUint16 aExpected, TUint16 aNew) /**< @internalComponent */ sl@0: { return __e32_atomic_cas_ord16(&aPtr, (TUint16*)&aExpected, (TUint16)aNew); } sl@0: inline static TUint32 SafeSwap(TUint32 aNewValue, TUint32& aPtr) /**< @internalComponent */ sl@0: { return __e32_atomic_swp_ord32(&aPtr, aNewValue); } sl@0: inline static TUint SafeSwap(TUint aNewValue, TUint& aPtr) /**< @internalComponent */ sl@0: { return __e32_atomic_swp_ord32(&aPtr, aNewValue); } sl@0: inline static TInt SafeSwap(TInt aNewValue, TInt& aPtr) /**< @internalComponent */ sl@0: { return __e32_atomic_swp_ord32(&aPtr, aNewValue); } sl@0: inline static TBool CompareAndSwap(TUint32& aPtr, TUint32 aExpected, TUint32 aNew) /**< @internalComponent */ sl@0: { return __e32_atomic_cas_ord32(&aPtr, &aExpected, aNew); } sl@0: inline static TBool CompareAndSwap(TUint& aPtr, TUint aExpected, TUint aNew) /**< @internalComponent */ sl@0: { return __e32_atomic_cas_ord32(&aPtr, (TUint32*)&aExpected, (TUint32)aNew); } sl@0: inline static TBool CompareAndSwap(TInt& aPtr, TInt aExpected, TInt aNew) /**< @internalComponent */ sl@0: { return __e32_atomic_cas_ord32(&aPtr, (TUint32*)&aExpected, (TUint32)aNew); } sl@0: sl@0: sl@0: // Miscellaneous sl@0: IMPORT_C static NThread* CurrentThread(); sl@0: IMPORT_C static TInt CurrentCpu(); /**< @internalComponent */ sl@0: IMPORT_C static TInt NumberOfCpus(); /**< @internalComponent */ sl@0: IMPORT_C static void LockSystem(); sl@0: IMPORT_C static void UnlockSystem(); sl@0: IMPORT_C static TBool FlashSystem(); sl@0: IMPORT_C static void WaitForAnyRequest(); sl@0: IMPORT_C static void Sleep(TUint32 aTime); sl@0: IMPORT_C static void Exit(); sl@0: IMPORT_C static void DeferredExit(); sl@0: IMPORT_C static void YieldTimeslice(); /**< @internalComponent */ sl@0: IMPORT_C static void RotateReadyList(TInt aPriority); sl@0: IMPORT_C static void RotateReadyList(TInt aPriority, TInt aCpu); /**< @internalTechnology */ sl@0: IMPORT_C static void RecordIntLatency(TInt aLatency, TInt aIntMask); /**< @internalTechnology */ sl@0: IMPORT_C static void RecordThreadLatency(TInt aLatency); /**< @internalTechnology */ sl@0: IMPORT_C static TUint32 TickCount(); sl@0: IMPORT_C static TInt TickPeriod(); sl@0: IMPORT_C static TInt TimerTicks(TInt aMilliseconds); sl@0: IMPORT_C static TInt TimesliceTicks(TUint32 aMicroseconds); /**< @internalTechnology */ sl@0: IMPORT_C static TInt CurrentContext(); sl@0: IMPORT_C static TUint32 FastCounter(); sl@0: IMPORT_C static TInt FastCounterFrequency(); sl@0: static void Init0(TAny* aVariantData); sl@0: static void Init(NThread* aThread, SNThreadCreateInfo& anInfo); sl@0: IMPORT_C static TBool KernelLocked(TInt aCount=0); /**< @internalTechnology */ sl@0: IMPORT_C static NFastMutex* HeldFastMutex(); /**< @internalTechnology */ sl@0: static void Idle(); sl@0: IMPORT_C static SCpuIdleHandler* CpuIdleHandler(); /**< @internalTechnology */ sl@0: static void NotifyCrash(const TAny* a0, TInt a1); /**< @internalTechnology */ sl@0: IMPORT_C static TBool Crashed(); sl@0: static TUint32 IdleGenerationCount(); sl@0: sl@0: // Debugger support sl@0: typedef void (*TRescheduleCallback)(NThread*); sl@0: IMPORT_C static void SchedulerHooks(TLinAddr& aStart, TLinAddr& aEnd); sl@0: IMPORT_C static void InsertSchedulerHooks(); sl@0: IMPORT_C static void RemoveSchedulerHooks(); sl@0: IMPORT_C static void SetRescheduleCallback(TRescheduleCallback aCallback); sl@0: }; sl@0: sl@0: sl@0: /** Create a fast semaphore sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: inline NFastSemaphore::NFastSemaphore(NThreadBase* aThread) sl@0: : iCount(0), sl@0: iOwningThread(aThread ? aThread : (NThreadBase*)NKern::CurrentThread()) sl@0: { sl@0: } sl@0: sl@0: sl@0: #endif