First public contribution.
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\include\nkernsmp\nkern.h
16 // WARNING: This file contains some APIs which are internal and are subject
17 // to change without notice. Such APIs should therefore not be used
18 // outside the Kernel and Hardware Services package.
24 #ifdef __STANDALONE_NANOKERNEL__
34 #include <e32atomics.h>
37 /** @internalComponent */
38 IMPORT_C void NKFault(const char* file, TInt line);
39 /** @internalComponent */
40 void NKIdle(TInt aStage);
47 #define FAULT() NKFault(__FILE__,__LINE__)
55 #define __NK_ASSERT_DEBUG(c) ((void) ((c)||(FAULT(),0)) )
59 #define __NK_ASSERT_DEBUG(c)
67 #define __NK_ASSERT_ALWAYS(c) ((void) ((c)||(FAULT(),0)) )
73 const TInt KNumPriorities=64;
75 const TInt KMaxCpus=8;
84 Used for protecting a code fragment against both interrupts and concurrent
85 execution on another processor.
87 List of spin locks in the nanokernel, in deadlock-prevention order:
88 A NEventHandler::TiedLock (preemption)
89 B NFastMutex spin locks (preemption)
90 C Thread spin locks (preemption)
91 D Thread group spin locks (preemption)
92 E Per-CPU ready list lock (preemption)
94 a Idle DFC list lock (interrupts)
95 b Per-CPU exogenous IDFC queue lock (interrupts)
96 c NTimerQ spin lock (interrupts)
97 d Generic IPI list locks (interrupts)
98 e NIrq spin locks (interrupts)
99 f Per-CPU event handler list lock (interrupts)
100 z BTrace lock (interrupts)
102 z must be minimum since BTrace can appear anywhere
104 interrupt-disabling spinlocks must be lower than preemption-disabling ones
106 Nestings which actually occur are:
110 Nothing (except possibly z) nested inside a, b, d, f
111 e is held while calling HW-poking functions (which might use other spinlocks)
121 // Bit 7 of order clear for locks used with interrupts disabled
122 EOrderGenericIrqLow0 =0x00u, // Device driver spin locks, low range
123 EOrderGenericIrqLow1 =0x01u, // Device driver spin locks, low range
124 EOrderGenericIrqLow2 =0x02u, // Device driver spin locks, low range
125 EOrderGenericIrqLow3 =0x03u, // Device driver spin locks, low range
126 EOrderBTrace =0x04u, // BTrace lock
127 EOrderEventHandlerList =0x07u, // Per-CPU event handler list lock
128 EOrderCacheMaintenance =0x08u, // CacheMaintenance (for PL310)
129 EOrderNIrq =0x0Au, // NIrq lock
130 EOrderGenericIPIList =0x0Du, // Generic IPI list lock
131 EOrderNTimerQ =0x10u, // Nanokernel timer queue lock
132 EOrderExIDfcQ =0x13u, // Per-CPU exogenous IDFC queue list lock
133 EOrderIdleDFCList =0x16u, // Idle DFC list lock
134 EOrderGenericIrqHigh0 =0x18u, // Device driver spin locks, high range
135 EOrderGenericIrqHigh1 =0x19u, // Device driver spin locks, high range
136 EOrderGenericIrqHigh2 =0x1Au, // Device driver spin locks, high range
137 EOrderGenericIrqHigh3 =0x1Bu, // Device driver spin locks, high range
139 // Bit 7 of order set for locks used with interrupts enabled, preemption disabled
140 EOrderGenericPreLow0 =0x80u, // Device driver spin locks, low range
141 EOrderGenericPreLow1 =0x81u, // Device driver spin locks, low range
142 EOrderReadyList =0x88u, // Per-CPU ready list lock
143 EOrderThreadGroup =0x90u, // Thread group locks
144 EOrderThread =0x91u, // Thread locks
145 EOrderFastMutex =0x98u, // Fast mutex locks
146 EOrderEventHandlerTied =0x9Cu, // Event handler tied lock
147 EOrderGenericPreHigh0 =0x9Eu, // Device driver spin locks, high range
148 EOrderGenericPreHigh1 =0x9Fu, // Device driver spin locks, high range
150 EOrderNone =0xFFu // No order check required (e.g. for dynamic ordering)
153 IMPORT_C TSpinLock(TUint aOrder);
154 IMPORT_C void LockIrq(); /**< @internalComponent disable interrupts and acquire the lock */
155 IMPORT_C void UnlockIrq(); /**< @internalComponent release the lock and enable interrupts */
156 IMPORT_C TBool FlashIrq(); /**< @internalComponent if someone else is waiting for the lock, UnlockIrq() then LockIrq() */
157 IMPORT_C void LockOnly(); /**< @internalComponent acquire the lock, assuming interrupts/preemption already disabled */
158 IMPORT_C void UnlockOnly(); /**< @internalComponent release the lock, don't change interrupt/preemption state */
159 IMPORT_C TBool FlashOnly(); /**< @internalComponent if someone else is waiting for the lock, UnlockOnly() then LockOnly() */
160 IMPORT_C TInt LockIrqSave(); /**< @internalComponent remember original interrupt state then disable interrupts and acquire the lock */
161 IMPORT_C void UnlockIrqRestore(TInt); /**< @internalComponent release the lock then restore original interrupt state */
162 IMPORT_C TBool FlashIrqRestore(TInt); /**< @internalComponent if someone else is waiting for the lock, UnlockIrqRestore() then LockIrq() */
163 IMPORT_C TBool FlashPreempt(); /**< @internalComponent if someone else is waiting for the lock, UnlockOnly(); NKern::PreemptionPoint(); LockOnly(); */
165 volatile TUint64 iLock;
169 /** Macro to disable interrupts and acquire the lock.
174 #define __SPIN_LOCK_IRQ(lock) ((lock).LockIrq())
176 /** Macro to release the lock and enable interrupts.
181 #define __SPIN_UNLOCK_IRQ(lock) (lock).UnlockIrq()
183 /** Macro to see if someone else is waiting for the lock, enabling IRQs
184 then disabling IRQs again.
189 #define __SPIN_FLASH_IRQ(lock) (lock).FlashIrq()
191 /** Macro to remember original interrupt state then disable interrupts
192 and acquire the lock.
197 #define __SPIN_LOCK_IRQSAVE(lock) ((lock).LockIrqSave())
199 /** Macro to release the lock then restore original interrupt state to that
205 #define __SPIN_UNLOCK_IRQRESTORE(lock,irq) (lock).UnlockIrqRestore(irq)
207 /** Macro to see if someone else is waiting for the lock, enabling IRQs to
208 the original state supplied then disabling IRQs again.
213 #define __SPIN_FLASH_IRQRESTORE(lock,irq) (lock).FlashIrqRestore(irq)
215 /** Macro to acquire the lock. This assumes the caller has already disabled
216 interrupts/preemption.
218 If interrupts/preemption is not disabled a run-time assert will occur
219 This is to protect against unsafe code that might lead to same core
222 In device driver code it is safer to use __SPIN_LOCK_IRQSAVE() instead,
223 although not as efficient should interrupts aleady be disabled for the
224 duration the lock is held.
229 #define __SPIN_LOCK(lock) ((lock).LockOnly())
231 /** Macro to release the lock, don't change interrupt/preemption state.
236 #define __SPIN_UNLOCK(lock) (lock).UnlockOnly()
241 #define __SPIN_FLASH(lock) (lock).FlashOnly()
243 /** Macro to see if someone else is waiting for the lock, enabling preemption
244 then disabling it again.
249 #define __SPIN_FLASH_PREEMPT(lock) (lock).FlashPreempt()
252 /** Read/Write Spin lock
260 IMPORT_C TRWSpinLock(TUint aOrder); // Uses same order space as TSpinLock
262 IMPORT_C void LockIrqR(); /**< @internalComponent disable interrupts and acquire read lock */
263 IMPORT_C void UnlockIrqR(); /**< @internalComponent release read lock and enable interrupts */
264 IMPORT_C TBool FlashIrqR(); /**< @internalComponent if someone else is waiting for write lock, UnlockIrqR() then LockIrqR() */
265 IMPORT_C void LockIrqW(); /**< @internalComponent disable interrupts and acquire write lock */
266 IMPORT_C void UnlockIrqW(); /**< @internalComponent release write lock and enable interrupts */
267 IMPORT_C TBool FlashIrqW(); /**< @internalComponent if someone else is waiting for the lock, UnlockIrqW() then LockIrqW() */
268 IMPORT_C void LockOnlyR(); /**< @internalComponent acquire read lock, assuming interrupts/preemption already disabled */
269 IMPORT_C void UnlockOnlyR(); /**< @internalComponent release read lock, don't change interrupt/preemption state */
270 IMPORT_C TBool FlashOnlyR(); /**< @internalComponent if someone else is waiting for write lock, UnlockOnlyR() then LockOnlyR() */
271 IMPORT_C void LockOnlyW(); /**< @internalComponent acquire write lock, assuming interrupts/preemption already disabled */
272 IMPORT_C void UnlockOnlyW(); /**< @internalComponent release write lock, don't change interrupt/preemption state */
273 IMPORT_C TBool FlashOnlyW(); /**< @internalComponent if someone else is waiting for the lock, UnlockOnlyW() then LockOnlyW() */
274 IMPORT_C TInt LockIrqSaveR(); /**< @internalComponent disable interrupts and acquire read lock, return original interrupt state */
275 IMPORT_C void UnlockIrqRestoreR(TInt); /**< @internalComponent release read lock and reset original interrupt state */
276 IMPORT_C TBool FlashIrqRestoreR(TInt); /**< @internalComponent if someone else is waiting for write lock, UnlockIrqRestoreR() then LockIrqR() */
277 IMPORT_C TInt LockIrqSaveW(); /**< @internalComponent disable interrupts and acquire write lock, return original interrupt state */
278 IMPORT_C void UnlockIrqRestoreW(TInt); /**< @internalComponent release write lock and reset original interrupt state */
279 IMPORT_C TBool FlashIrqRestoreW(TInt); /**< @internalComponent if someone else is waiting for the lock, UnlockIrqRestoreW() then LockIrqW() */
280 IMPORT_C TBool FlashPreemptR(); /**< @internalComponent if someone else is waiting for write lock, UnlockOnlyR(); NKern::PreemptionPoint(); LockOnlyR(); */
281 IMPORT_C TBool FlashPreemptW(); /**< @internalComponent if someone else is waiting for the lock, UnlockOnlyW(); NKern::PreemptionPoint(); LockOnlyW(); */
283 volatile TUint64 iLock;
291 #define __SPIN_LOCK_IRQ_R(lock) (lock).LockIrqR()
297 #define __SPIN_UNLOCK_IRQ_R(lock) (lock).UnlockIrqR()
303 #define __SPIN_FLASH_IRQ_R(lock) ((lock).FlashIrqR())
309 #define __SPIN_LOCK_IRQ_W(lock) (lock).LockIrqW()
315 #define __SPIN_UNLOCK_IRQ_W(lock) (lock).UnlockIrqW()
321 #define __SPIN_FLASH_IRQ_W(lock) ((lock).FlashIrqW())
328 #define __SPIN_LOCK_R(lock) (lock).LockOnlyR()
334 #define __SPIN_UNLOCK_R(lock) (lock).UnlockOnlyR()
339 #define __SPIN_FLASH_R(lock) ((lock).FlashOnlyR())
345 #define __SPIN_LOCK_W(lock) (lock).LockOnlyW()
351 #define __SPIN_UNLOCK_W(lock) (lock).UnlockOnlyW()
356 #define __SPIN_FLASH_W(lock) ((lock).FlashOnlyW())
363 #define __SPIN_LOCK_IRQSAVE_R(lock) (lock).LockIrqSaveR()
369 #define __SPIN_UNLOCK_IRQRESTORE_R(lock,irq) (lock).UnlockIrqRestoreR(irq)
375 #define __SPIN_FLASH_IRQRESTORE_R(lock,irq) ((lock).FlashIrqRestoreR(irq))
381 #define __SPIN_LOCK_IRQSAVE_W(lock) (lock).LockIrqSaveW()
387 #define __SPIN_UNLOCK_IRQRESTORE_W(lock,irq) (lock).UnlockIrqRestoreW(irq)
393 #define __SPIN_FLASH_IRQRESTORE_W(lock,irq) ((lock).FlashIrqRestoreW(irq))
400 #define __SPIN_FLASH_PREEMPT_R(lock) ((lock).FlashPreemptR())
406 #define __SPIN_FLASH_PREEMPT_W(lock) ((lock).FlashPreemptW())
410 #define __INCLUDE_SPIN_LOCK_CHECKS__
414 /** Nanokernel fast semaphore
416 A light-weight semaphore class that only supports a single waiting thread,
417 suitable for the Symbian OS thread I/O semaphore.
419 Initialising a NFastSemaphore involves two steps:
421 - Constructing the semaphore
422 - Setting the semaphore owning thread (the one allowed to wait on it)
424 For example, creating one for the current thread to wait on:
428 sem.iOwningThread = NKern::CurrentThread();
437 inline NFastSemaphore();
438 inline NFastSemaphore(NThreadBase* aThread);
439 IMPORT_C void SetOwner(NThreadBase* aThread);
440 IMPORT_C void Wait();
441 IMPORT_C void Signal();
442 IMPORT_C void SignalN(TInt aCount);
443 IMPORT_C void Reset();
446 TInt Dec(NThreadBase* aThread); // does mb() if >0
447 NThreadBase* Inc(TInt aCount); // does mb()
448 NThreadBase* DoReset(); // does mb()
450 /** If >=0 the semaphore count
451 If <0, (thread>>2)|0x80000000
456 /** The thread allowed to wait on the semaphore
459 NThreadBase* iOwningThread;
462 /** Create a fast semaphore
467 inline NFastSemaphore::NFastSemaphore()
468 : iCount(0), iOwningThread(NULL)
471 /** Nanokernel fast mutex
473 A light-weight priority-inheritance mutex that can be used if the following
476 - Threads that hold the mutex never block.
477 - The mutex is never acquired in a nested fashion
479 If either of these conditions is not met, a DMutex object is more appropriate.
487 IMPORT_C NFastMutex();
488 IMPORT_C void Wait();
489 IMPORT_C void Signal();
490 IMPORT_C TBool HeldByCurrentThread();
497 /** @internalComponent
499 If mutex is free and no-one is waiting, iHoldingThread=0
500 If mutex is held and no-one is waiting, iHoldingThread points to holding thread
501 If mutex is free but threads are waiting, iHoldingThread=1
502 If mutex is held and threads are waiting, iHoldingThread points to holding thread but with bit 0 set
504 NThreadBase* iHoldingThread;
506 TUint32 i_NFastMutex_Pad1; /**< @internalComponent */
508 /** @internalComponent
510 Spin lock to protect mutex
512 TSpinLock iMutexLock;
514 /** @internalComponent
516 List of NThreads which are waiting for the mutex. The threads are linked via
517 their iWaitLink members.
519 TPriList<NThreadBase, KNumPriorities> iWaitQ;
522 __ASSERT_COMPILE(!(_FOFF(NFastMutex,iMutexLock)&7));
529 The type of the callback function used by the nanokernel timer.
533 typedef NEventFn NTimerFn;
542 A basic relative timer provided by the nanokernel.
544 It can generate either a one-shot interrupt or periodic interrupts.
546 A timeout handler is called when the timer expires, either:
547 - from the timer ISR - if the timer is queued via OneShot(TInt aTime) or OneShot(TInt aTime, TBool EFalse), or
548 - from the nanokernel timer dfc1 thread - if the timer is queued via OneShot(TInt aTime, TBool ETrue) call, or
549 - from any other dfc thread that provided DFC belongs to - if the timer is queued via OneShot(TInt aTime, TDfc& aDfc) call.
550 Call-back mechanism cannot be changed in the life time of a timer.
552 These timer objects may be manipulated from any context.
553 The timers are driven from a periodic system tick interrupt,
554 usually a 1ms period.
559 class NTimer : public NEventHandler
567 iHType = EEventHandlerNTimer;
568 i8888.iHState1 = EIdle;
571 Constructor taking a callback function and a pointer to be passed
572 to the callback function.
574 @param aFunction The callback function.
575 @param aPtr A pointer to be passed to the callback function
578 inline NTimer(NTimerFn aFunction, TAny* aPtr)
582 iHType = EEventHandlerNTimer;
583 i8888.iHState1 = EIdle;
585 IMPORT_C NTimer(NSchedulable* aTied, NTimerFn aFunction, TAny* aPtr);
586 IMPORT_C NTimer(TDfcFn aFunction, TAny* aPtr, TInt aPriority); // create DFC, queue to be set later
587 IMPORT_C NTimer(TDfcFn aFunction, TAny* aPtr, TDfcQue* aDfcQ, TInt aPriority); // create DFC
588 IMPORT_C void SetDfcQ(TDfcQue* aDfcQ);
590 IMPORT_C TInt SetTied(NSchedulable* aTied);
591 IMPORT_C TInt OneShot(TInt aTime);
592 IMPORT_C TInt OneShot(TInt aTime, TBool aDfc);
593 IMPORT_C TInt OneShot(TInt aTime, TDfc& aDfc);
594 IMPORT_C TInt Again(TInt aTime);
595 IMPORT_C TBool Cancel();
596 IMPORT_C TBool IsPending();
598 enum { ECancelDestroy=1 };
600 inline TBool IsNormal()
601 { return iHType==EEventHandlerNTimer; }
602 inline TBool IsMutating()
603 { return iHType<KNumDfcPriorities; }
604 inline TBool IsValid()
605 { return iHType<KNumDfcPriorities || iHType==EEventHandlerNTimer; }
607 TUint DoCancel(TUint aFlags);
608 void DoCancel0(TUint aState);
609 TBool DoCancelMutating(TUint aFlags);
616 EIdle=0, // not queued
617 // 1 skipped so as not to clash with DFC states
618 ETransferring=2, // being transferred from holding to ordered queue
619 EHolding=3, // on holding queue
620 EOrdered=4, // on ordered queue
621 ECritical=5, // on ordered queue and in use by queue walk routine
622 EFinal=6, // on final queue
623 EEventQ=32, // 32+n = on event queue of CPU n (for tied timers)
626 TUint32 iTriggerTime; /**< @internalComponent */
627 TUint32 iNTimerSpare1; /**< @internalComponent */
629 /** This field is available for use by the timer client provided that
630 the timer isn't a mutating-into-DFC timer.
631 @internalTechnology */
632 // TUint8 iUserFlags; // i8888.iHState0
633 // TUint8 iState; /**< @internalComponent */ // i8888.iHState1
634 // TUint8 iCompleteInDfc; /**< @internalComponent */ // i8888.iHState2
637 friend class NTimerQ;
638 friend class NSchedulable;
644 #define i_NTimer_iUserFlags i8888.iHState0
649 #define i_NTimer_iState i8888.iHState1
655 typedef void (*NThreadFunction)(TAny*);
661 typedef TDfc* (*NThreadExitHandler)(NThread*);
667 typedef void (*NThreadStateHandler)(NThread*,TInt,TInt);
673 typedef void (*NThreadExceptionHandler)(TAny*,NThread*);
679 typedef void (*NThreadTimeoutHandler)(NThread*,TInt);
685 struct SNThreadHandlers
687 NThreadExitHandler iExitHandler;
688 NThreadStateHandler iStateHandler;
689 NThreadExceptionHandler iExceptionHandler;
690 NThreadTimeoutHandler iTimeoutHandler;
693 /** @internalComponent */
694 extern void NThread_Default_State_Handler(NThread*, TInt, TInt);
696 /** @internalComponent */
697 extern void NThread_Default_Exception_Handler(TAny*, NThread*);
699 /** @internalComponent */
700 #define NTHREAD_DEFAULT_EXIT_HANDLER ((NThreadExitHandler)0)
702 /** @internalComponent */
703 #define NTHREAD_DEFAULT_STATE_HANDLER (&NThread_Default_State_Handler)
705 /** @internalComponent */
706 #define NTHREAD_DEFAULT_EXCEPTION_HANDLER (&NThread_Default_Exception_Handler)
708 /** @internalComponent */
709 #define NTHREAD_DEFAULT_TIMEOUT_HANDLER ((NThreadTimeoutHandler)0)
716 struct SFastExecTable
718 TInt iFastExecCount; // includes implicit function#0
719 TLinAddr iFunction[1]; // first entry is for call number 1
726 const TUint32 KExecFlagClaim=0x80000000; // claim system lock
732 const TUint32 KExecFlagRelease=0x40000000; // release system lock
738 const TUint32 KExecFlagPreprocess=0x20000000; // preprocess
744 const TUint32 KExecFlagExtraArgMask=0x1C000000; // 3 bits indicating additional arguments
750 const TUint32 KExecFlagExtraArgs2=0x04000000; // 2 additional arguments
756 const TUint32 KExecFlagExtraArgs3=0x08000000; // 3 additional arguments
762 const TUint32 KExecFlagExtraArgs4=0x0C000000; // 4 additional arguments
768 const TUint32 KExecFlagExtraArgs5=0x10000000; // 5 additional arguments
774 const TUint32 KExecFlagExtraArgs6=0x14000000; // 6 additional arguments
780 const TUint32 KExecFlagExtraArgs7=0x18000000; // 7 additional arguments
786 const TUint32 KExecFlagExtraArgs8=0x1C000000; // 8 additional arguments
793 struct SSlowExecEntry
795 TUint32 iFlags; // information about call
796 TLinAddr iFunction; // address of function to be called
804 struct SSlowExecTable
807 TLinAddr iInvalidExecHandler; // used if call number invalid
808 TLinAddr iPreprocessHandler; // used for handle lookups
809 SSlowExecEntry iEntries[1]; // first entry is for call number 0
812 // Thread iAttributes Constants
813 const TUint8 KThreadAttImplicitSystemLock=1; /**< @internalComponent */
814 const TUint8 KThreadAttAddressSpace=2; /**< @internalComponent */
815 const TUint8 KThreadAttLoggable=4; /**< @internalComponent */
819 const TUint32 KCpuAffinityAny=0xffffffffu; /**< @internalComponent */
821 /** Information needed for creating a nanothread.
826 struct SNThreadCreateInfo
828 NThreadFunction iFunction;
834 TUint32 iCpuAffinity;
835 const SNThreadHandlers* iHandlers;
836 const SFastExecTable* iFastExecTable;
837 const SSlowExecTable* iSlowExecTable;
838 const TUint32* iParameterBlock;
839 TInt iParameterBlockSize; // if zero, iParameterBlock _is_ the initial data
840 // otherwise it points to n bytes of initial data
841 NThreadGroup* iGroup; // NULL for lone thread
844 /** Information needed for creating a nanothread group.
849 struct SNThreadGroupCreateInfo
851 TUint32 iCpuAffinity;
854 /** Constant for use with NKern:: functions which release a fast mutex as well
855 as performing some other operations.
860 #define SYSTEM_LOCK (NFastMutex*)0
863 /** Idle handler function
864 Pointer to a function which is called whenever a CPU goes idle
866 @param aPtr The iPtr stored in the SCpuIdleHandler structure
867 @param aStage If positive, the number of processors still active
868 If zero, indicates all processors are now idle
869 -1 indicates that postamble processing is required after waking up
874 typedef void (*TCpuIdleHandlerFn)(TAny* aPtr, TInt aStage);
876 /** Idle handler structure
881 struct SCpuIdleHandler
883 TCpuIdleHandlerFn iHandler;
885 volatile TBool iPostambleRequired;
892 enum TUserModeCallbackReason
894 EUserModeCallbackRun,
895 EUserModeCallbackCancel,
900 A callback function executed when a thread returns to user mode.
904 typedef void (*TUserModeCallbackFunc)(TAny* aThisPtr, TUserModeCallbackReason aReasonCode);
908 An object representing a queued callback to be executed when a thread returns to user mode.
912 class TUserModeCallback
915 TUserModeCallback(TUserModeCallbackFunc);
916 ~TUserModeCallback();
919 TUserModeCallback* volatile iNext;
920 TUserModeCallbackFunc iFunc;
923 TUserModeCallback* const KUserModeCallbackUnqueued = ((TUserModeCallback*)1);
926 /** Main function for AP
931 typedef void (*TAPBootFunc)(volatile SAPBootInfo*);
934 /** Information needed to boot an AP
940 TUint32 iCpu; // Hardware CPU ID
941 TUint32 iInitStackSize; // Size of initial stack
942 TLinAddr iInitStackBase; // Base of initial stack
943 TAPBootFunc iMain; // Address of initial function to call
947 typedef void (*NIsr)(TAny*);
949 /** Nanokernel functions
957 /** Bitmask values used when blocking a nanothread.
962 EEnterCS=1, /**< Enter thread critical section before blocking */
963 ERelease=2, /**< Release specified fast mutex before blocking */
964 EClaim=4, /**< Re-acquire specified fast mutex when unblocked */
965 EObstruct=8, /**< Signifies obstruction of thread rather than lack of work to do */
968 /** Values that specify the context of the processor.
969 @see NKern::CurrentContext()
973 EThread=0, /**< The processor is in a thread context*/
974 EIDFC=1, /**< The processor is in an IDFC context*/
975 EInterrupt=2, /**< The processor is in an interrupt context*/
976 EEscaped=KMaxTInt /**< Not valid a process context on target hardware*/
981 IMPORT_C static TInt ThreadCreate(NThread* aThread, SNThreadCreateInfo& aInfo);
982 IMPORT_C static TBool ThreadSuspend(NThread* aThread, TInt aCount);
983 IMPORT_C static TBool ThreadResume(NThread* aThread);
984 IMPORT_C static TBool ThreadResume(NThread* aThread, NFastMutex* aMutex);
985 IMPORT_C static TBool ThreadForceResume(NThread* aThread);
986 IMPORT_C static TBool ThreadForceResume(NThread* aThread, NFastMutex* aMutex);
987 IMPORT_C static void ThreadRelease(NThread* aThread, TInt aReturnValue);
988 IMPORT_C static void ThreadRelease(NThread* aThread, TInt aReturnValue, NFastMutex* aMutex);
989 IMPORT_C static void ThreadSetPriority(NThread* aThread, TInt aPriority);
990 IMPORT_C static void ThreadSetPriority(NThread* aThread, TInt aPriority, NFastMutex* aMutex);
991 IMPORT_C static void ThreadRequestSignal(NThread* aThread);
992 IMPORT_C static void ThreadRequestSignal(NThread* aThread, NFastMutex* aMutex);
993 IMPORT_C static void ThreadRequestSignal(NThread* aThread, TInt aCount);
994 IMPORT_C static void ThreadKill(NThread* aThread);
995 IMPORT_C static void ThreadKill(NThread* aThread, NFastMutex* aMutex);
996 IMPORT_C static void ThreadEnterCS();
997 IMPORT_C static void ThreadLeaveCS();
998 static NThread* _ThreadEnterCS(); /**< @internalComponent */
999 static void _ThreadLeaveCS(); /**< @internalComponent */
1000 IMPORT_C static TInt Block(TUint32 aTimeout, TUint aMode, NFastMutex* aMutex);
1001 IMPORT_C static TInt Block(TUint32 aTimeout, TUint aMode);
1002 IMPORT_C static void NanoBlock(TUint32 aTimeout, TUint aState, TAny* aWaitObj);
1003 IMPORT_C static void ThreadGetUserContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask);
1004 IMPORT_C static void ThreadSetUserContext(NThread* aThread, TAny* aContext);
1005 IMPORT_C static void ThreadGetSystemContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask);
1006 static void ThreadModifyUsp(NThread* aThread, TLinAddr aUsp);
1007 IMPORT_C static TInt FreezeCpu(); /**< @internalComponent */
1008 IMPORT_C static void EndFreezeCpu(TInt aCookie); /**< @internalComponent */
1009 IMPORT_C static TUint32 ThreadSetCpuAffinity(NThread* aThread, TUint32 aAffinity); /**< @internalComponent */
1010 IMPORT_C static void ThreadSetTimeslice(NThread* aThread, TInt aTimeslice); /**< @internalComponent */
1011 IMPORT_C static TUint64 ThreadCpuTime(NThread* aThread); /**< @internalComponent */
1012 IMPORT_C static TUint32 CpuTimeMeasFreq(); /**< @internalComponent */
1013 static TInt QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback); /**< @internalComponent */
1014 static void MoveUserModeCallbacks(NThreadBase* aSrcThread, NThreadBase* aDestThread); /**< @internalComponent */
1015 static void CancelUserModeCallbacks(); /**< @internalComponent */
1018 IMPORT_C static TInt GroupCreate(NThreadGroup* aGroup, SNThreadGroupCreateInfo& aInfo);
1019 IMPORT_C static void GroupDestroy(NThreadGroup* aGroup);
1020 IMPORT_C static NThreadGroup* CurrentGroup();
1021 IMPORT_C static NThreadGroup* LeaveGroup();
1022 IMPORT_C static void JoinGroup(NThreadGroup* aGroup);
1023 IMPORT_C static TUint32 GroupSetCpuAffinity(NThreadGroup* aGroup, TUint32 aAffinity);
1026 IMPORT_C static void FSSetOwner(NFastSemaphore* aSem,NThreadBase* aThread);
1027 IMPORT_C static void FSWait(NFastSemaphore* aSem);
1028 IMPORT_C static void FSSignal(NFastSemaphore* aSem);
1029 IMPORT_C static void FSSignal(NFastSemaphore* aSem, NFastMutex* aMutex);
1030 IMPORT_C static void FSSignalN(NFastSemaphore* aSem, TInt aCount);
1031 IMPORT_C static void FSSignalN(NFastSemaphore* aSem, TInt aCount, NFastMutex* aMutex);
1034 IMPORT_C static void FMWait(NFastMutex* aMutex);
1035 IMPORT_C static void FMSignal(NFastMutex* aMutex);
1036 IMPORT_C static TBool FMFlash(NFastMutex* aMutex);
1039 IMPORT_C static void Lock();
1040 IMPORT_C static NThread* LockC();
1041 IMPORT_C static void Unlock();
1042 IMPORT_C static TInt PreemptionPoint();
1045 IMPORT_C static TInt DisableAllInterrupts();
1046 IMPORT_C static TInt DisableInterrupts(TInt aLevel);
1047 IMPORT_C static void RestoreInterrupts(TInt aRestoreData);
1048 IMPORT_C static void EnableAllInterrupts();
1050 // Read-modify-write
1051 inline static TInt LockedInc(TInt& aCount)
1052 { return __e32_atomic_add_ord32(&aCount,1); }
1053 inline static TInt LockedDec(TInt& aCount)
1054 { return __e32_atomic_add_ord32(&aCount,0xffffffff); }
1055 inline static TInt LockedAdd(TInt& aDest, TInt aSrc)
1056 { return __e32_atomic_add_ord32(&aDest,aSrc); }
1057 inline static TInt64 LockedInc(TInt64& aCount)
1058 { return __e32_atomic_add_ord64(&aCount,1); }
1059 inline static TInt64 LockedDec(TInt64& aCount)
1060 { return __e32_atomic_add_ord64(&aCount,TUint64(TInt64(-1))); }
1061 inline static TInt64 LockedAdd(TInt64& aDest, TInt64 aSrc) /**< @internalComponent */
1062 { return __e32_atomic_add_ord64(&aDest,aSrc); }
1063 inline static TUint32 LockedSetClear(TUint32& aDest, TUint32 aClearMask, TUint32 aSetMask)
1064 { return __e32_atomic_axo_ord32(&aDest,~(aClearMask|aSetMask),aSetMask); }
1065 inline static TUint16 LockedSetClear16(TUint16& aDest, TUint16 aClearMask, TUint16 aSetMask) /**< @internalComponent */
1066 { return __e32_atomic_axo_ord16(&aDest,TUint16(~(aClearMask|aSetMask)),aSetMask); }
1067 inline static TUint8 LockedSetClear8(TUint8& aDest, TUint8 aClearMask, TUint8 aSetMask)
1068 { return __e32_atomic_axo_ord8(&aDest,TUint8(~(aClearMask|aSetMask)),aSetMask); }
1069 inline static TInt SafeInc(TInt& aCount)
1070 { return __e32_atomic_tas_ord32(&aCount,1,1,0); }
1071 inline static TInt SafeDec(TInt& aCount)
1072 { return __e32_atomic_tas_ord32(&aCount,1,-1,0); }
1073 inline static TInt AddIfGe(TInt& aCount, TInt aLimit, TInt aInc) /**< @internalComponent */
1074 { return __e32_atomic_tas_ord32(&aCount,aLimit,aInc,0); }
1075 inline static TInt AddIfLt(TInt& aCount, TInt aLimit, TInt aInc) /**< @internalComponent */
1076 { return __e32_atomic_tas_ord32(&aCount,aLimit,0,aInc); }
1077 inline static TAny* SafeSwap(TAny* aNewValue, TAny*& aPtr)
1078 { return __e32_atomic_swp_ord_ptr(&aPtr, aNewValue); }
1079 inline static TUint8 SafeSwap8(TUint8 aNewValue, TUint8& aPtr)
1080 { return __e32_atomic_swp_ord8(&aPtr, aNewValue); }
1081 inline static TUint16 SafeSwap16(TUint16 aNewValue, TUint16& aPtr) /**< @internalComponent */
1082 { return __e32_atomic_swp_ord16(&aPtr, aNewValue); }
1083 inline static TBool CompareAndSwap(TAny*& aPtr, TAny* aExpected, TAny* aNew) /**< @internalComponent */
1084 { return __e32_atomic_cas_ord_ptr(&aPtr, &aExpected, aNew); }
1085 inline static TBool CompareAndSwap8(TUint8& aPtr, TUint8 aExpected, TUint8 aNew) /**< @internalComponent */
1086 { return __e32_atomic_cas_ord8(&aPtr, (TUint8*)&aExpected, (TUint8)aNew); }
1087 inline static TBool CompareAndSwap16(TUint16& aPtr, TUint16 aExpected, TUint16 aNew) /**< @internalComponent */
1088 { return __e32_atomic_cas_ord16(&aPtr, (TUint16*)&aExpected, (TUint16)aNew); }
1089 inline static TUint32 SafeSwap(TUint32 aNewValue, TUint32& aPtr) /**< @internalComponent */
1090 { return __e32_atomic_swp_ord32(&aPtr, aNewValue); }
1091 inline static TUint SafeSwap(TUint aNewValue, TUint& aPtr) /**< @internalComponent */
1092 { return __e32_atomic_swp_ord32(&aPtr, aNewValue); }
1093 inline static TInt SafeSwap(TInt aNewValue, TInt& aPtr) /**< @internalComponent */
1094 { return __e32_atomic_swp_ord32(&aPtr, aNewValue); }
1095 inline static TBool CompareAndSwap(TUint32& aPtr, TUint32 aExpected, TUint32 aNew) /**< @internalComponent */
1096 { return __e32_atomic_cas_ord32(&aPtr, &aExpected, aNew); }
1097 inline static TBool CompareAndSwap(TUint& aPtr, TUint aExpected, TUint aNew) /**< @internalComponent */
1098 { return __e32_atomic_cas_ord32(&aPtr, (TUint32*)&aExpected, (TUint32)aNew); }
1099 inline static TBool CompareAndSwap(TInt& aPtr, TInt aExpected, TInt aNew) /**< @internalComponent */
1100 { return __e32_atomic_cas_ord32(&aPtr, (TUint32*)&aExpected, (TUint32)aNew); }
1104 IMPORT_C static NThread* CurrentThread();
1105 IMPORT_C static TInt CurrentCpu(); /**< @internalComponent */
1106 IMPORT_C static TInt NumberOfCpus(); /**< @internalComponent */
1107 IMPORT_C static void LockSystem();
1108 IMPORT_C static void UnlockSystem();
1109 IMPORT_C static TBool FlashSystem();
1110 IMPORT_C static void WaitForAnyRequest();
1111 IMPORT_C static void Sleep(TUint32 aTime);
1112 IMPORT_C static void Exit();
1113 IMPORT_C static void DeferredExit();
1114 IMPORT_C static void YieldTimeslice(); /**< @internalComponent */
1115 IMPORT_C static void RotateReadyList(TInt aPriority);
1116 IMPORT_C static void RotateReadyList(TInt aPriority, TInt aCpu); /**< @internalTechnology */
1117 IMPORT_C static void RecordIntLatency(TInt aLatency, TInt aIntMask); /**< @internalTechnology */
1118 IMPORT_C static void RecordThreadLatency(TInt aLatency); /**< @internalTechnology */
1119 IMPORT_C static TUint32 TickCount();
1120 IMPORT_C static TInt TickPeriod();
1121 IMPORT_C static TInt TimerTicks(TInt aMilliseconds);
1122 IMPORT_C static TInt TimesliceTicks(TUint32 aMicroseconds); /**< @internalTechnology */
1123 IMPORT_C static TInt CurrentContext();
1124 IMPORT_C static TUint32 FastCounter();
1125 IMPORT_C static TInt FastCounterFrequency();
1126 IMPORT_C static TUint64 Timestamp();
1127 IMPORT_C static TUint32 TimestampFrequency();
1128 static void Init0(TAny* aVariantData);
1129 static void Init(NThread* aThread, SNThreadCreateInfo& aInfo);
1130 static TInt BootAP(volatile SAPBootInfo* aInfo);
1131 IMPORT_C static TBool KernelLocked(TInt aCount=0); /**< @internalTechnology */
1132 IMPORT_C static NFastMutex* HeldFastMutex(); /**< @internalTechnology */
1134 IMPORT_C static SCpuIdleHandler* CpuIdleHandler(); /**< @internalTechnology */
1135 static void NotifyCrash(const TAny* a0, TInt a1); /**< @internalTechnology */
1136 IMPORT_C static TBool Crashed();
1137 static TUint32 IdleGenerationCount();
1140 typedef void (*TRescheduleCallback)(NThread*);
1141 IMPORT_C static void SchedulerHooks(TLinAddr& aStart, TLinAddr& aEnd);
1142 IMPORT_C static void InsertSchedulerHooks();
1143 IMPORT_C static void RemoveSchedulerHooks();
1144 IMPORT_C static void SetRescheduleCallback(TRescheduleCallback aCallback);
1149 EIrqInit_FallingEdge=0,
1150 EIrqInit_RisingEdge=2,
1151 EIrqInit_LevelLow=1,
1152 EIrqInit_LevelHigh=3,
1153 EIrqInit_Shared=0x10,
1154 EIrqInit_Count=0x20,
1161 EIrqBind_Exclusive=4,
1167 EIrqIndexMask = 0x0000ffff, // bottom 16 bits is IRQ number if top 16 bits all zero
1168 // otherwise is IRQ handler index
1169 EIrqCookieMask = 0x7fff0000,
1170 EIrqCookieShift = 16
1173 static void InterruptInit0();
1174 IMPORT_C static TInt InterruptInit(TInt aId, TUint32 aFlags, TInt aVector, TUint32 aHwId, TAny* aExt=0);
1175 IMPORT_C static TInt InterruptBind(TInt aId, NIsr aIsr, TAny* aPtr, TUint32 aFlags, NSchedulable* aTied);
1176 IMPORT_C static TInt InterruptUnbind(TInt aId);
1177 IMPORT_C static TInt InterruptEnable(TInt aId);
1178 IMPORT_C static TInt InterruptDisable(TInt aId);
1179 IMPORT_C static TInt InterruptClear(TInt aId);
1180 IMPORT_C static TInt InterruptSetPriority(TInt aId, TInt aPri);
1181 IMPORT_C static TInt InterruptSetCpuMask(TInt aId, TUint32 aMask);
1182 IMPORT_C static void Interrupt(TInt aIrqNo);
1186 /** Create a fast semaphore
1191 inline NFastSemaphore::NFastSemaphore(NThreadBase* aThread)
1193 iOwningThread(aThread ? aThread : (NThreadBase*)NKern::CurrentThread())
1203 typedef void (*TGenericIPIFn)(TGenericIPI*);
1208 class TGenericIPI : public SDblQueLink
1211 void Queue(TGenericIPIFn aFunc, TUint32 aCpuMask);
1212 void QueueAll(TGenericIPIFn aFunc);
1213 void QueueAllOther(TGenericIPIFn aFunc);
1215 void WaitCompletion();
1217 TGenericIPIFn iFunc;
1218 volatile TUint32 iCpusIn;
1219 volatile TUint32 iCpusOut;
1225 class TStopIPI : public TGenericIPI
1230 static void Isr(TGenericIPI*);
1232 volatile TInt iFlag;