os/kernelhwsrv/kernel/eka/include/nkernsmp/nkern.h
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 // e32\include\nkernsmp\nkern.h
    15 // 
    16 // WARNING: This file contains some APIs which are internal and are subject
    17 //          to change without notice. Such APIs should therefore not be used
    18 //          outside the Kernel and Hardware Services package.
    19 //
    20 
    21 #ifndef __NKERN_H__
    22 #define __NKERN_H__
    23 
    24 #ifdef	__STANDALONE_NANOKERNEL__
    25 #undef	__IN_KERNEL__
    26 #define	__IN_KERNEL__
    27 #endif
    28 
    29 #include <e32const.h>
    30 #include <nklib.h>
    31 #include <nk_event.h>
    32 #include <dfcs.h>
    33 #include <nk_trace.h>
    34 #include <e32atomics.h>
    35 
    36 extern "C" {
    37 /** @internalComponent */
    38 IMPORT_C void NKFault(const char* file, TInt line);
    39 /** @internalComponent */
    40 void NKIdle(TInt aStage);
    41 }
    42 
    43 /**
    44 @publishedPartner
    45 @released
    46 */
    47 #define FAULT()		NKFault(__FILE__,__LINE__)
    48 
    49 #ifdef _DEBUG
    50 
    51 /**
    52 @publishedPartner
    53 @released
    54 */
    55 #define __NK_ASSERT_DEBUG(c)	((void) ((c)||(FAULT(),0)) )
    56 
    57 #else
    58 
    59 #define __NK_ASSERT_DEBUG(c)
    60 
    61 #endif
    62 
    63 /**
    64 @publishedPartner
    65 @released
    66 */
    67 #define __NK_ASSERT_ALWAYS(c)	((void) ((c)||(FAULT(),0)) )
    68 
    69 /**
    70 	@publishedPartner
    71 	@released
    72 */
    73 const TInt KNumPriorities=64;
    74 
    75 const TInt KMaxCpus=8;
    76 
    77 class NSchedulable;
    78 class NThread;
    79 class NThreadGroup;
    80 
    81 
    82 /** Spin lock
    83 
    84 	Used for protecting a code fragment against both interrupts and concurrent
    85 	execution on another processor.
    86 
    87 	List of spin locks in the nanokernel, in deadlock-prevention order:
    88 	A	NEventHandler::TiedLock (preemption)
    89 	B	NFastMutex spin locks (preemption)
    90 	C	Thread spin locks (preemption)
    91 	D	Thread group spin locks (preemption)
    92 	E	Per-CPU ready list lock (preemption)
    93 
    94 	a	Idle DFC list lock (interrupts)
    95 	b	Per-CPU exogenous IDFC queue lock (interrupts)
    96 	c	NTimerQ spin lock (interrupts)
    97 	d	Generic IPI list locks (interrupts)
    98 	e	NIrq spin locks (interrupts)
    99 	f	Per-CPU event handler list lock (interrupts)
   100 	z	BTrace lock (interrupts)
   101 
   102 	z must be minimum since BTrace can appear anywhere
   103 
   104 	interrupt-disabling spinlocks must be lower than preemption-disabling ones
   105 
   106 	Nestings which actually occur are:
   107 		A > C
   108 		B > C > D > E
   109 		c > f
   110 		Nothing (except possibly z) nested inside a, b, d, f
   111 		e is held while calling HW-poking functions (which might use other spinlocks)
   112 
   113 @publishedPartner
   114 @prototype
   115 */
   116 class TSpinLock
   117 	{
   118 public:
   119 	enum TOrder
   120 		{
   121 		// Bit 7 of order clear for locks used with interrupts disabled
   122 		EOrderGenericIrqLow0	=0x00u,		// Device driver spin locks, low range
   123 		EOrderGenericIrqLow1	=0x01u,		// Device driver spin locks, low range
   124 		EOrderGenericIrqLow2	=0x02u,		// Device driver spin locks, low range
   125 		EOrderGenericIrqLow3	=0x03u,		// Device driver spin locks, low range
   126 		EOrderBTrace			=0x04u,		// BTrace lock
   127 		EOrderEventHandlerList	=0x07u,		// Per-CPU event handler list lock
   128 		EOrderCacheMaintenance  =0x08u,		// CacheMaintenance (for PL310)
   129 		EOrderNIrq				=0x0Au,		// NIrq lock
   130 		EOrderGenericIPIList	=0x0Du,		// Generic IPI list lock
   131 		EOrderNTimerQ			=0x10u,		// Nanokernel timer queue lock
   132 		EOrderExIDfcQ			=0x13u,		// Per-CPU exogenous IDFC queue list lock
   133 		EOrderIdleDFCList		=0x16u,		// Idle DFC list lock
   134 		EOrderGenericIrqHigh0	=0x18u,		// Device driver spin locks, high range
   135 		EOrderGenericIrqHigh1	=0x19u,		// Device driver spin locks, high range
   136 		EOrderGenericIrqHigh2	=0x1Au,		// Device driver spin locks, high range
   137 		EOrderGenericIrqHigh3	=0x1Bu,		// Device driver spin locks, high range
   138 
   139 		// Bit 7 of order set for locks used with interrupts enabled, preemption disabled
   140 		EOrderGenericPreLow0	=0x80u,		// Device driver spin locks, low range
   141 		EOrderGenericPreLow1	=0x81u,		// Device driver spin locks, low range
   142 		EOrderReadyList			=0x88u,		// Per-CPU ready list lock
   143 		EOrderThreadGroup		=0x90u,		// Thread group locks
   144 		EOrderThread			=0x91u,		// Thread locks
   145 		EOrderFastMutex			=0x98u,		// Fast mutex locks
   146 		EOrderEventHandlerTied	=0x9Cu,		// Event handler tied lock
   147 		EOrderGenericPreHigh0	=0x9Eu,		// Device driver spin locks, high range
   148 		EOrderGenericPreHigh1	=0x9Fu,		// Device driver spin locks, high range
   149 
   150 		EOrderNone				=0xFFu		// No order check required (e.g. for dynamic ordering)
   151 		};
   152 public:
   153 	IMPORT_C TSpinLock(TUint aOrder);
   154 	IMPORT_C void LockIrq();				/**< @internalComponent disable interrupts and acquire the lock */
   155 	IMPORT_C void UnlockIrq();				/**< @internalComponent release the lock and enable interrupts */
   156 	IMPORT_C TBool FlashIrq();				/**< @internalComponent if someone else is waiting for the lock, UnlockIrq() then LockIrq() */
   157 	IMPORT_C void LockOnly();				/**< @internalComponent acquire the lock, assuming interrupts/preemption already disabled */
   158 	IMPORT_C void UnlockOnly();				/**< @internalComponent release the lock, don't change interrupt/preemption state */
   159 	IMPORT_C TBool FlashOnly();				/**< @internalComponent if someone else is waiting for the lock, UnlockOnly() then LockOnly() */
   160 	IMPORT_C TInt LockIrqSave();			/**< @internalComponent remember original interrupt state then disable interrupts and acquire the lock */
   161 	IMPORT_C void UnlockIrqRestore(TInt);	/**< @internalComponent release the lock then restore original interrupt state */
   162 	IMPORT_C TBool FlashIrqRestore(TInt);	/**< @internalComponent if someone else is waiting for the lock, UnlockIrqRestore() then LockIrq() */
   163 	IMPORT_C TBool FlashPreempt();			/**< @internalComponent if someone else is waiting for the lock, UnlockOnly(); NKern::PreemptionPoint(); LockOnly(); */
   164 private:
   165 	volatile TUint64 iLock;
   166 	};
   167 
   168 
   169 /** Macro to disable interrupts and acquire the lock.
   170 
   171 @publishedPartner
   172 @prototype
   173 */
   174 #define __SPIN_LOCK_IRQ(lock)				((lock).LockIrq())
   175 
   176 /** Macro to release the lock and enable interrupts.
   177 
   178 @publishedPartner
   179 @prototype
   180 */
   181 #define __SPIN_UNLOCK_IRQ(lock)				(lock).UnlockIrq()
   182 
   183 /** Macro to see if someone else is waiting for the lock, enabling IRQs 
   184     then disabling IRQs again.
   185 
   186 @publishedPartner
   187 @prototype
   188 */
   189 #define __SPIN_FLASH_IRQ(lock)				(lock).FlashIrq()
   190 
   191 /** Macro to remember original interrupt state then disable interrupts 
   192     and acquire the lock.
   193     
   194 @publishedPartner
   195 @prototype
   196 */
   197 #define __SPIN_LOCK_IRQSAVE(lock)			((lock).LockIrqSave())
   198 
   199 /** Macro to release the lock then restore original interrupt state to that 
   200 	supplied.
   201 	
   202 @publishedPartner
   203 @prototype
   204 */
   205 #define __SPIN_UNLOCK_IRQRESTORE(lock,irq)	(lock).UnlockIrqRestore(irq)
   206 
   207 /** Macro to see if someone else is waiting for the lock, enabling IRQs to
   208 	the original state supplied then disabling IRQs again.
   209     
   210 @publishedPartner
   211 @prototype
   212 */
   213 #define __SPIN_FLASH_IRQRESTORE(lock,irq)	(lock).FlashIrqRestore(irq)
   214 
   215 /** Macro to acquire the lock. This assumes the caller has already disabled 
   216     interrupts/preemption. 
   217 	
   218 	If interrupts/preemption is not disabled a run-time assert will occur
   219 	This is to protect against unsafe code that might lead to same core 
   220 	deadlock.
   221 	
   222     In device driver code it is safer to use __SPIN_LOCK_IRQSAVE() instead, 
   223 	although not as efficient should interrupts aleady be disabled for the 
   224 	duration the lock is held.
   225     
   226 @publishedPartner
   227 @prototype
   228 */
   229 #define __SPIN_LOCK(lock)					((lock).LockOnly())
   230 
   231 /** Macro to release the lock, don't change interrupt/preemption state.
   232 
   233 @publishedPartner
   234 @prototype
   235 */
   236 #define __SPIN_UNLOCK(lock)					(lock).UnlockOnly()
   237 
   238 /**
   239 @internalComponent
   240 */
   241 #define __SPIN_FLASH(lock)					(lock).FlashOnly()
   242 
   243 /** Macro to see if someone else is waiting for the lock, enabling preemption 
   244     then disabling it again.
   245 
   246 @publishedPartner
   247 @prototype
   248 */
   249 #define __SPIN_FLASH_PREEMPT(lock)			(lock).FlashPreempt()
   250 
   251 
   252 /** Read/Write Spin lock
   253 
   254 @publishedPartner
   255 @prototype
   256 */
   257 class TRWSpinLock
   258 	{
   259 public:
   260 	IMPORT_C TRWSpinLock(TUint aOrder);		// Uses same order space as TSpinLock
   261 
   262 	IMPORT_C void LockIrqR();				/**< @internalComponent disable interrupts and acquire read lock */
   263 	IMPORT_C void UnlockIrqR();				/**< @internalComponent release read lock and enable interrupts */
   264 	IMPORT_C TBool FlashIrqR();				/**< @internalComponent if someone else is waiting for write lock, UnlockIrqR() then LockIrqR() */
   265 	IMPORT_C void LockIrqW();				/**< @internalComponent disable interrupts and acquire write lock */
   266 	IMPORT_C void UnlockIrqW();				/**< @internalComponent release write lock and enable interrupts */
   267 	IMPORT_C TBool FlashIrqW();				/**< @internalComponent if someone else is waiting for the lock, UnlockIrqW() then LockIrqW() */
   268 	IMPORT_C void LockOnlyR();				/**< @internalComponent acquire read lock, assuming interrupts/preemption already disabled */
   269 	IMPORT_C void UnlockOnlyR();			/**< @internalComponent release read lock, don't change interrupt/preemption state */
   270 	IMPORT_C TBool FlashOnlyR();			/**< @internalComponent if someone else is waiting for write lock, UnlockOnlyR() then LockOnlyR() */
   271 	IMPORT_C void LockOnlyW();				/**< @internalComponent acquire write lock, assuming interrupts/preemption already disabled */
   272 	IMPORT_C void UnlockOnlyW();			/**< @internalComponent release write lock, don't change interrupt/preemption state */
   273 	IMPORT_C TBool FlashOnlyW();			/**< @internalComponent if someone else is waiting for the lock, UnlockOnlyW() then LockOnlyW() */
   274 	IMPORT_C TInt LockIrqSaveR();			/**< @internalComponent disable interrupts and acquire read lock, return original interrupt state */
   275 	IMPORT_C void UnlockIrqRestoreR(TInt);	/**< @internalComponent release read lock and reset original interrupt state */
   276 	IMPORT_C TBool FlashIrqRestoreR(TInt);	/**< @internalComponent if someone else is waiting for write lock, UnlockIrqRestoreR() then LockIrqR() */
   277 	IMPORT_C TInt LockIrqSaveW();			/**< @internalComponent disable interrupts and acquire write lock, return original interrupt state */
   278 	IMPORT_C void UnlockIrqRestoreW(TInt);	/**< @internalComponent release write lock and reset original interrupt state */
   279 	IMPORT_C TBool FlashIrqRestoreW(TInt);	/**< @internalComponent if someone else is waiting for the lock, UnlockIrqRestoreW() then LockIrqW() */
   280 	IMPORT_C TBool FlashPreemptR();			/**< @internalComponent if someone else is waiting for write lock, UnlockOnlyR(); NKern::PreemptionPoint(); LockOnlyR(); */
   281 	IMPORT_C TBool FlashPreemptW();			/**< @internalComponent if someone else is waiting for the lock, UnlockOnlyW(); NKern::PreemptionPoint(); LockOnlyW(); */
   282 private:
   283 	volatile TUint64 iLock;
   284 	};
   285 
   286 
   287 /**
   288 @publishedPartner
   289 @prototype
   290 */
   291 #define __SPIN_LOCK_IRQ_R(lock)					(lock).LockIrqR()
   292 
   293 /**
   294 @publishedPartner
   295 @prototype
   296 */
   297 #define __SPIN_UNLOCK_IRQ_R(lock)				(lock).UnlockIrqR()
   298 
   299 /**
   300 @publishedPartner
   301 @prototype
   302 */
   303 #define __SPIN_FLASH_IRQ_R(lock)				((lock).FlashIrqR())
   304 
   305 /**
   306 @publishedPartner
   307 @prototype
   308 */
   309 #define __SPIN_LOCK_IRQ_W(lock)					(lock).LockIrqW()
   310 
   311 /**
   312 @publishedPartner
   313 @prototype
   314 */
   315 #define __SPIN_UNLOCK_IRQ_W(lock)				(lock).UnlockIrqW()
   316 
   317 /**
   318 @publishedPartner
   319 @prototype
   320 */
   321 #define __SPIN_FLASH_IRQ_W(lock)				((lock).FlashIrqW())
   322 
   323 
   324 /**
   325 @publishedPartner
   326 @prototype
   327 */
   328 #define __SPIN_LOCK_R(lock)						(lock).LockOnlyR()
   329 
   330 /**
   331 @publishedPartner
   332 @prototype
   333 */
   334 #define __SPIN_UNLOCK_R(lock)					(lock).UnlockOnlyR()
   335 
   336 /**
   337 @internalComponent
   338 */
   339 #define __SPIN_FLASH_R(lock)					((lock).FlashOnlyR())
   340 
   341 /**
   342 @publishedPartner
   343 @prototype
   344 */
   345 #define __SPIN_LOCK_W(lock)						(lock).LockOnlyW()
   346 
   347 /**
   348 @publishedPartner
   349 @prototype
   350 */
   351 #define __SPIN_UNLOCK_W(lock)					(lock).UnlockOnlyW()
   352 
   353 /**
   354 @internalComponent
   355 */
   356 #define __SPIN_FLASH_W(lock)					((lock).FlashOnlyW())
   357 
   358 
   359 /**
   360 @publishedPartner
   361 @prototype
   362 */
   363 #define __SPIN_LOCK_IRQSAVE_R(lock)				(lock).LockIrqSaveR()
   364 
   365 /**
   366 @publishedPartner
   367 @prototype
   368 */
   369 #define __SPIN_UNLOCK_IRQRESTORE_R(lock,irq)	(lock).UnlockIrqRestoreR(irq)
   370 
   371 /**
   372 @publishedPartner
   373 @prototype
   374 */
   375 #define __SPIN_FLASH_IRQRESTORE_R(lock,irq)		((lock).FlashIrqRestoreR(irq))
   376 
   377 /**
   378 @publishedPartner
   379 @prototype
   380 */
   381 #define __SPIN_LOCK_IRQSAVE_W(lock)				(lock).LockIrqSaveW()
   382 
   383 /**
   384 @publishedPartner
   385 @prototype
   386 */
   387 #define __SPIN_UNLOCK_IRQRESTORE_W(lock,irq)	(lock).UnlockIrqRestoreW(irq)
   388 
   389 /**
   390 @publishedPartner
   391 @prototype
   392 */
   393 #define __SPIN_FLASH_IRQRESTORE_W(lock,irq)		((lock).FlashIrqRestoreW(irq))
   394 
   395 
   396 /**
   397 @publishedPartner
   398 @prototype
   399 */
   400 #define __SPIN_FLASH_PREEMPT_R(lock)			((lock).FlashPreemptR())
   401 
   402 /**
   403 @publishedPartner
   404 @prototype
   405 */
   406 #define __SPIN_FLASH_PREEMPT_W(lock)			((lock).FlashPreemptW())
   407 
   408 
   409 #ifdef _DEBUG
   410 #define __INCLUDE_SPIN_LOCK_CHECKS__
   411 #endif
   412 
   413 
   414 /** Nanokernel fast semaphore
   415 
   416 	A light-weight semaphore class that only supports a single waiting thread,
   417 	suitable for the Symbian OS thread I/O semaphore.
   418 	
   419 	Initialising a NFastSemaphore involves two steps:
   420 	
   421 	- Constructing the semaphore
   422 	- Setting the semaphore owning thread (the one allowed to wait on it)
   423 	
   424 	For example, creating one for the current thread to wait on:
   425 	
   426 	@code
   427 	NFastSemaphore sem;
   428 	sem.iOwningThread = NKern::CurrentThread();
   429 	@endcode
   430 	
   431 	@publishedPartner
   432 	@prototype
   433 */
   434 class NFastSemaphore
   435 	{
   436 public:
   437 	inline NFastSemaphore();
   438 	inline NFastSemaphore(NThreadBase* aThread);
   439 	IMPORT_C void SetOwner(NThreadBase* aThread);
   440 	IMPORT_C void Wait();
   441 	IMPORT_C void Signal();
   442 	IMPORT_C void SignalN(TInt aCount);
   443 	IMPORT_C void Reset();
   444 	void WaitCancel();
   445 
   446 	TInt Dec(NThreadBase* aThread);	// does mb() if >0
   447 	NThreadBase* Inc(TInt aCount);	// does mb()
   448 	NThreadBase* DoReset();	// does mb()
   449 public:
   450 	/** If >=0 the semaphore count
   451 		If <0, (thread>>2)|0x80000000
   452 		@internalComponent
   453 	*/
   454 	TInt iCount;
   455 
   456 	/** The thread allowed to wait on the semaphore
   457 		@internalComponent
   458 	*/
   459 	NThreadBase* iOwningThread;	
   460 	};
   461 
   462 /** Create a fast semaphore
   463 
   464 	@publishedPartner
   465 	@prototype
   466 */
   467 inline NFastSemaphore::NFastSemaphore()
   468 	: iCount(0), iOwningThread(NULL)
   469 	{}
   470 
   471 /** Nanokernel fast mutex
   472 
   473 	A light-weight priority-inheritance mutex that can be used if the following
   474 	conditions apply:
   475 	
   476 	- Threads that hold the mutex never block.
   477 	- The mutex is never acquired in a nested fashion
   478 	
   479 	If either of these conditions is not met, a DMutex object is more appropriate.
   480 	
   481 	@publishedPartner
   482 	@prototype
   483 */
   484 class NFastMutex
   485 	{
   486 public:
   487 	IMPORT_C NFastMutex();
   488 	IMPORT_C void Wait();
   489 	IMPORT_C void Signal();
   490 	IMPORT_C TBool HeldByCurrentThread();
   491 private:
   492 	void DoWaitL();
   493 	void DoSignalL();
   494 
   495 	friend class NKern;
   496 public:
   497 	/** @internalComponent
   498 
   499 	If mutex is free and no-one is waiting, iHoldingThread=0
   500 	If mutex is held and no-one is waiting, iHoldingThread points to holding thread
   501 	If mutex is free but threads are waiting, iHoldingThread=1
   502 	If mutex is held and threads are waiting, iHoldingThread points to holding thread but with bit 0 set
   503 	*/
   504 	NThreadBase* iHoldingThread;
   505 
   506 	TUint32 i_NFastMutex_Pad1;	/**< @internalComponent */
   507 
   508 	/** @internalComponent
   509 
   510 	Spin lock to protect mutex
   511 	*/
   512 	TSpinLock iMutexLock;
   513 
   514 	/** @internalComponent
   515 
   516 	List of NThreads which are waiting for the mutex. The threads are linked via
   517 	their iWaitLink members.
   518 	*/
   519 	TPriList<NThreadBase, KNumPriorities> iWaitQ;
   520 	};
   521 
   522 __ASSERT_COMPILE(!(_FOFF(NFastMutex,iMutexLock)&7));
   523 
   524 
   525 /**
   526 @publishedPartner
   527 @prototype
   528 
   529 The type of the callback function used by the nanokernel timer. 
   530 
   531 @see NTimer
   532 */
   533 typedef NEventFn NTimerFn;
   534 
   535 
   536 
   537 
   538 /**
   539 @publishedPartner
   540 @prototype
   541 
   542 A basic relative timer provided by the nanokernel.
   543 
   544 It can generate either a one-shot interrupt or periodic interrupts.
   545 
   546 A timeout handler is called when the timer expires, either:
   547 - from the timer ISR - if the timer is queued via OneShot(TInt aTime) or OneShot(TInt aTime, TBool EFalse), or
   548 - from the nanokernel timer dfc1 thread - if the timer is queued via OneShot(TInt aTime, TBool ETrue) call, or
   549 - from any other dfc thread that provided DFC belongs to - if the timer is queued via OneShot(TInt aTime, TDfc& aDfc) call.
   550 Call-back mechanism cannot be changed in the life time of a timer.
   551 
   552 These timer objects may be manipulated from any context.
   553 The timers are driven from a periodic system tick interrupt,
   554 usually a 1ms period.
   555 
   556 @see NTimerFn
   557 */
   558 class NTimerQ;
   559 class NTimer : public NEventHandler
   560 	{
   561 public:
   562 	/**
   563 	Default constructor.
   564 	*/
   565 	inline NTimer()
   566 		{
   567 		iHType = EEventHandlerNTimer;
   568 		i8888.iHState1 = EIdle;
   569 		}
   570 	/**
   571 	Constructor taking a callback function and a pointer to be passed
   572 	to the callback function.
   573 	
   574 	@param aFunction The callback function.
   575 	@param aPtr      A pointer to be passed to the callback function 
   576 	                 when called.
   577 	*/
   578 	inline NTimer(NTimerFn aFunction, TAny* aPtr)
   579 		{
   580 		iPtr = aPtr;
   581 		iFn = aFunction;
   582 		iHType = EEventHandlerNTimer;
   583 		i8888.iHState1 = EIdle;
   584 		}
   585 	IMPORT_C NTimer(NSchedulable* aTied, NTimerFn aFunction, TAny* aPtr);
   586 	IMPORT_C NTimer(TDfcFn aFunction, TAny* aPtr, TInt aPriority);					// create DFC, queue to be set later
   587 	IMPORT_C NTimer(TDfcFn aFunction, TAny* aPtr, TDfcQue* aDfcQ, TInt aPriority);	// create DFC
   588 	IMPORT_C void SetDfcQ(TDfcQue* aDfcQ);
   589 	IMPORT_C ~NTimer();
   590 	IMPORT_C TInt SetTied(NSchedulable* aTied);
   591 	IMPORT_C TInt OneShot(TInt aTime);
   592 	IMPORT_C TInt OneShot(TInt aTime, TBool aDfc);
   593 	IMPORT_C TInt OneShot(TInt aTime, TDfc& aDfc);
   594 	IMPORT_C TInt Again(TInt aTime);
   595 	IMPORT_C TBool Cancel();
   596 	IMPORT_C TBool IsPending();
   597 private:
   598 	enum { ECancelDestroy=1 };
   599 private:
   600 	inline TBool IsNormal()
   601 		{ return iHType==EEventHandlerNTimer; }
   602 	inline TBool IsMutating()
   603 		{ return iHType<KNumDfcPriorities; }
   604 	inline TBool IsValid()
   605 		{ return iHType<KNumDfcPriorities || iHType==EEventHandlerNTimer; }
   606 	void AddAsDFC();
   607 	TUint DoCancel(TUint aFlags);
   608 	void DoCancel0(TUint aState);
   609 	TBool DoCancelMutating(TUint aFlags);
   610 public:
   611 /**
   612 	@internalComponent
   613 */
   614 	enum TState
   615 		{
   616 		EIdle=0,			// not queued
   617 							// 1 skipped so as not to clash with DFC states
   618 		ETransferring=2,	// being transferred from holding to ordered queue
   619 		EHolding=3,			// on holding queue
   620 		EOrdered=4,			// on ordered queue
   621 		ECritical=5,		// on ordered queue and in use by queue walk routine
   622 		EFinal=6,			// on final queue
   623 		EEventQ=32,			// 32+n = on event queue of CPU n (for tied timers)
   624 		};
   625 public:
   626 	TUint32 iTriggerTime;	/**< @internalComponent */
   627 	TUint32	iNTimerSpare1;	/**< @internalComponent */
   628 
   629 	/** This field is available for use by the timer client provided that
   630 		the timer isn't a mutating-into-DFC timer.
   631 		@internalTechnology */
   632 //	TUint8 iUserFlags;									// i8888.iHState0
   633 //	TUint8 iState;			/**< @internalComponent */	// i8888.iHState1
   634 //	TUint8 iCompleteInDfc;	/**< @internalComponent */	// i8888.iHState2
   635 
   636 
   637 	friend class NTimerQ;
   638 	friend class NSchedulable;
   639 	};
   640 
   641 /**
   642 @internalTechnology
   643 */
   644 #define	i_NTimer_iUserFlags	i8888.iHState0
   645 
   646 /**
   647 @internalComponent
   648 */
   649 #define	i_NTimer_iState		i8888.iHState1
   650 
   651 /**
   652 	@publishedPartner
   653 	@released
   654 */
   655 typedef void (*NThreadFunction)(TAny*);
   656 
   657 /**
   658 	@publishedPartner
   659 	@released
   660 */
   661 typedef TDfc* (*NThreadExitHandler)(NThread*);
   662 
   663 /**
   664 	@publishedPartner
   665 	@prototype
   666 */
   667 typedef void (*NThreadStateHandler)(NThread*,TInt,TInt);
   668 
   669 /**
   670 	@publishedPartner
   671 	@prototype
   672 */
   673 typedef void (*NThreadExceptionHandler)(TAny*,NThread*);
   674 
   675 /**
   676 	@publishedPartner
   677 	@prototype
   678 */
   679 typedef void (*NThreadTimeoutHandler)(NThread*,TInt);
   680 
   681 /**
   682 	@publishedPartner
   683 	@prototype
   684 */
   685 struct SNThreadHandlers
   686 	{
   687 	NThreadExitHandler iExitHandler;
   688 	NThreadStateHandler iStateHandler;
   689 	NThreadExceptionHandler iExceptionHandler;
   690 	NThreadTimeoutHandler iTimeoutHandler;
   691 	};
   692 
   693 /** @internalComponent */
   694 extern void NThread_Default_State_Handler(NThread*, TInt, TInt);
   695 
   696 /** @internalComponent */
   697 extern void NThread_Default_Exception_Handler(TAny*, NThread*);
   698 
   699 /** @internalComponent */
   700 #define NTHREAD_DEFAULT_EXIT_HANDLER		((NThreadExitHandler)0)
   701 
   702 /** @internalComponent */
   703 #define	NTHREAD_DEFAULT_STATE_HANDLER		(&NThread_Default_State_Handler)
   704 
   705 /** @internalComponent */
   706 #define	NTHREAD_DEFAULT_EXCEPTION_HANDLER	(&NThread_Default_Exception_Handler)
   707 
   708 /** @internalComponent */
   709 #define	NTHREAD_DEFAULT_TIMEOUT_HANDLER		((NThreadTimeoutHandler)0)
   710 
   711 
   712 /**
   713 	@publishedPartner
   714 	@prototype
   715 */
   716 struct SFastExecTable
   717 	{
   718 	TInt iFastExecCount;			// includes implicit function#0
   719 	TLinAddr iFunction[1];			// first entry is for call number 1
   720 	};
   721 
   722 /**
   723 	@publishedPartner
   724 	@prototype
   725 */
   726 const TUint32 KExecFlagClaim=0x80000000;		// claim system lock
   727 
   728 /**
   729 	@publishedPartner
   730 	@prototype
   731 */
   732 const TUint32 KExecFlagRelease=0x40000000;		// release system lock
   733 
   734 /**
   735 	@publishedPartner
   736 	@prototype
   737 */
   738 const TUint32 KExecFlagPreprocess=0x20000000;	// preprocess
   739 
   740 /**
   741 	@publishedPartner
   742 	@prototype
   743 */
   744 const TUint32 KExecFlagExtraArgMask=0x1C000000;	// 3 bits indicating additional arguments
   745 
   746 /**
   747 	@publishedPartner
   748 	@prototype
   749 */
   750 const TUint32 KExecFlagExtraArgs2=0x04000000;	// 2 additional arguments
   751 
   752 /**
   753 	@publishedPartner
   754 	@prototype
   755 */
   756 const TUint32 KExecFlagExtraArgs3=0x08000000;	// 3 additional arguments
   757 
   758 /**
   759 	@publishedPartner
   760 	@prototype
   761 */
   762 const TUint32 KExecFlagExtraArgs4=0x0C000000;	// 4 additional arguments
   763 
   764 /**
   765 	@publishedPartner
   766 	@prototype
   767 */
   768 const TUint32 KExecFlagExtraArgs5=0x10000000;	// 5 additional arguments
   769 
   770 /**
   771 	@publishedPartner
   772 	@prototype
   773 */
   774 const TUint32 KExecFlagExtraArgs6=0x14000000;	// 6 additional arguments
   775 
   776 /**
   777 	@publishedPartner
   778 	@prototype
   779 */
   780 const TUint32 KExecFlagExtraArgs7=0x18000000;	// 7 additional arguments
   781 
   782 /**
   783 	@publishedPartner
   784 	@prototype
   785 */
   786 const TUint32 KExecFlagExtraArgs8=0x1C000000;	// 8 additional arguments
   787 
   788 
   789 /**
   790 	@publishedPartner
   791 	@prototype
   792 */
   793 struct SSlowExecEntry
   794 	{
   795 	TUint32 iFlags;					// information about call
   796 	TLinAddr iFunction;				// address of function to be called
   797 	};
   798 
   799 
   800 /**
   801 	@publishedPartner
   802 	@prototype
   803 */
   804 struct SSlowExecTable
   805 	{
   806 	TInt iSlowExecCount;
   807 	TLinAddr iInvalidExecHandler;	// used if call number invalid
   808 	TLinAddr iPreprocessHandler;	// used for handle lookups
   809 	SSlowExecEntry iEntries[1];		// first entry is for call number 0
   810 	};
   811 
   812 // Thread iAttributes Constants
   813 const TUint8 KThreadAttImplicitSystemLock=1;	/**< @internalComponent */
   814 const TUint8 KThreadAttAddressSpace=2;			/**< @internalComponent */
   815 const TUint8 KThreadAttLoggable=4;				/**< @internalComponent */
   816 
   817 
   818 // Thread CPU
   819 const TUint32 KCpuAffinityAny=0xffffffffu;		/**< @internalComponent */
   820 
   821 /** Information needed for creating a nanothread.
   822 
   823 	@publishedPartner
   824 	@prototype
   825 */
   826 struct SNThreadCreateInfo
   827 	{
   828 	NThreadFunction iFunction;
   829 	TAny* iStackBase;
   830 	TInt iStackSize;
   831 	TInt iPriority;
   832 	TInt iTimeslice;
   833 	TUint8 iAttributes;
   834 	TUint32 iCpuAffinity;
   835 	const SNThreadHandlers* iHandlers;
   836 	const SFastExecTable* iFastExecTable;
   837 	const SSlowExecTable* iSlowExecTable;
   838 	const TUint32* iParameterBlock;
   839 	TInt iParameterBlockSize;		// if zero, iParameterBlock _is_ the initial data
   840 									// otherwise it points to n bytes of initial data
   841 	NThreadGroup* iGroup;			// NULL for lone thread
   842 	};
   843 
   844 /** Information needed for creating a nanothread group.
   845 
   846 	@publishedPartner
   847 	@prototype
   848 */
   849 struct SNThreadGroupCreateInfo
   850 	{
   851 	TUint32 iCpuAffinity;
   852 	};
   853 
   854 /**	Constant for use with NKern:: functions which release a fast mutex as well
   855 	as performing some other operations.
   856 
   857 	@publishedPartner
   858 	@released
   859 */
   860 #define	SYSTEM_LOCK		(NFastMutex*)0
   861 
   862 
   863 /** Idle handler function
   864 	Pointer to a function which is called whenever a CPU goes idle
   865 
   866 	@param	aPtr	The iPtr stored in the SCpuIdleHandler structure
   867 	@param	aStage	If positive, the number of processors still active
   868 					If zero, indicates all processors are now idle
   869 					-1 indicates that postamble processing is required after waking up
   870 
   871 	@publishedPartner
   872 	@prototype
   873 */
   874 typedef void (*TCpuIdleHandlerFn)(TAny* aPtr, TInt aStage);
   875 
   876 /** Idle handler structure
   877 
   878 	@publishedPartner
   879 	@prototype
   880 */
   881 struct SCpuIdleHandler
   882 	{
   883 	TCpuIdleHandlerFn	iHandler;
   884 	TAny*				iPtr;
   885 	volatile TBool		iPostambleRequired;
   886 	};
   887 
   888 
   889 /**
   890 @internalComponent
   891 */
   892 enum TUserModeCallbackReason
   893 	{
   894 	EUserModeCallbackRun,
   895 	EUserModeCallbackCancel,
   896 	};
   897 
   898 
   899 /**
   900 A callback function executed when a thread returns to user mode.
   901 
   902 @internalComponent
   903 */
   904 typedef void (*TUserModeCallbackFunc)(TAny* aThisPtr, TUserModeCallbackReason aReasonCode);
   905 
   906 
   907 /**
   908 An object representing a queued callback to be executed when a thread returns to user mode.
   909 
   910 @internalComponent
   911 */
   912 class TUserModeCallback
   913 	{
   914 public:
   915 	TUserModeCallback(TUserModeCallbackFunc);
   916 	~TUserModeCallback();
   917 
   918 public:
   919 	TUserModeCallback* volatile iNext;
   920 	TUserModeCallbackFunc iFunc;
   921 	};
   922 
   923 TUserModeCallback* const KUserModeCallbackUnqueued = ((TUserModeCallback*)1);
   924 
   925 
   926 /** Main function for AP
   927 
   928 @internalTechnology
   929 */
   930 struct SAPBootInfo;
   931 typedef void (*TAPBootFunc)(volatile SAPBootInfo*);
   932 
   933 
   934 /** Information needed to boot an AP
   935 
   936 @internalTechnology
   937 */
   938 struct SAPBootInfo
   939 	{
   940 	TUint32				iCpu;				// Hardware CPU ID
   941 	TUint32				iInitStackSize;		// Size of initial stack
   942 	TLinAddr			iInitStackBase;		// Base of initial stack
   943 	TAPBootFunc			iMain;				// Address of initial function to call
   944 	TAny*				iArgs[4];
   945 	};
   946 
   947 typedef void (*NIsr)(TAny*);
   948 
   949 /** Nanokernel functions
   950 
   951 	@publishedPartner
   952 	@prototype
   953 */
   954 class NKern
   955 	{
   956 public:
   957 	/** Bitmask values used when blocking a nanothread.
   958 		@see NKern::Block()
   959 	 */
   960 	enum TBlockMode 
   961 		{
   962 		EEnterCS=1,		/**< Enter thread critical section before blocking */
   963 		ERelease=2,		/**< Release specified fast mutex before blocking */
   964 		EClaim=4,		/**< Re-acquire specified fast mutex when unblocked */
   965 		EObstruct=8,	/**< Signifies obstruction of thread rather than lack of work to do */
   966 		};
   967 
   968 	/** Values that specify the context of the processor.
   969 		@see NKern::CurrentContext()
   970 	*/
   971 	enum TContext
   972 		{
   973 		EThread=0,			/**< The processor is in a thread context*/
   974 		EIDFC=1,			/**< The processor is in an IDFC context*/
   975 		EInterrupt=2,		/**< The processor is in an interrupt context*/
   976 		EEscaped=KMaxTInt	/**< Not valid a process context on target hardware*/
   977 		};
   978 
   979 public:
   980 	// Threads
   981 	IMPORT_C static TInt ThreadCreate(NThread* aThread, SNThreadCreateInfo& aInfo);
   982 	IMPORT_C static TBool ThreadSuspend(NThread* aThread, TInt aCount);
   983 	IMPORT_C static TBool ThreadResume(NThread* aThread);
   984 	IMPORT_C static TBool ThreadResume(NThread* aThread, NFastMutex* aMutex);
   985 	IMPORT_C static TBool ThreadForceResume(NThread* aThread);
   986 	IMPORT_C static TBool ThreadForceResume(NThread* aThread, NFastMutex* aMutex);
   987 	IMPORT_C static void ThreadRelease(NThread* aThread, TInt aReturnValue);
   988 	IMPORT_C static void ThreadRelease(NThread* aThread, TInt aReturnValue, NFastMutex* aMutex);
   989 	IMPORT_C static void ThreadSetPriority(NThread* aThread, TInt aPriority);
   990 	IMPORT_C static void ThreadSetPriority(NThread* aThread, TInt aPriority, NFastMutex* aMutex);
   991 	IMPORT_C static void ThreadRequestSignal(NThread* aThread);
   992 	IMPORT_C static void ThreadRequestSignal(NThread* aThread, NFastMutex* aMutex);
   993 	IMPORT_C static void ThreadRequestSignal(NThread* aThread, TInt aCount);
   994 	IMPORT_C static void ThreadKill(NThread* aThread);
   995 	IMPORT_C static void ThreadKill(NThread* aThread, NFastMutex* aMutex);
   996 	IMPORT_C static void ThreadEnterCS();
   997 	IMPORT_C static void ThreadLeaveCS();
   998 	static NThread* _ThreadEnterCS();		/**< @internalComponent */
   999 	static void _ThreadLeaveCS();			/**< @internalComponent */
  1000 	IMPORT_C static TInt Block(TUint32 aTimeout, TUint aMode, NFastMutex* aMutex);
  1001 	IMPORT_C static TInt Block(TUint32 aTimeout, TUint aMode);
  1002 	IMPORT_C static void NanoBlock(TUint32 aTimeout, TUint aState, TAny* aWaitObj);
  1003 	IMPORT_C static void ThreadGetUserContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask);
  1004 	IMPORT_C static void ThreadSetUserContext(NThread* aThread, TAny* aContext);
  1005 	IMPORT_C static void ThreadGetSystemContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask);
  1006 	static void ThreadModifyUsp(NThread* aThread, TLinAddr aUsp);
  1007 	IMPORT_C static TInt FreezeCpu();													/**< @internalComponent */
  1008 	IMPORT_C static void EndFreezeCpu(TInt aCookie);									/**< @internalComponent */
  1009 	IMPORT_C static TUint32 ThreadSetCpuAffinity(NThread* aThread, TUint32 aAffinity);	/**< @internalComponent */
  1010 	IMPORT_C static void ThreadSetTimeslice(NThread* aThread, TInt aTimeslice);			/**< @internalComponent */
  1011 	IMPORT_C static TUint64 ThreadCpuTime(NThread* aThread);							/**< @internalComponent */
  1012 	IMPORT_C static TUint32 CpuTimeMeasFreq();											/**< @internalComponent */
  1013 	static TInt QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback);	/**< @internalComponent */
  1014 	static void MoveUserModeCallbacks(NThreadBase* aSrcThread, NThreadBase* aDestThread);	/**< @internalComponent */
  1015 	static void CancelUserModeCallbacks();												/**< @internalComponent */
  1016 
  1017 	// Thread Groups
  1018 	IMPORT_C static TInt GroupCreate(NThreadGroup* aGroup, SNThreadGroupCreateInfo& aInfo);
  1019 	IMPORT_C static void GroupDestroy(NThreadGroup* aGroup);
  1020 	IMPORT_C static NThreadGroup* CurrentGroup();
  1021 	IMPORT_C static NThreadGroup* LeaveGroup();
  1022 	IMPORT_C static void JoinGroup(NThreadGroup* aGroup);
  1023 	IMPORT_C static TUint32 GroupSetCpuAffinity(NThreadGroup* aGroup, TUint32 aAffinity);
  1024 
  1025 	// Fast semaphores
  1026 	IMPORT_C static void FSSetOwner(NFastSemaphore* aSem,NThreadBase* aThread);
  1027 	IMPORT_C static void FSWait(NFastSemaphore* aSem);
  1028 	IMPORT_C static void FSSignal(NFastSemaphore* aSem);
  1029 	IMPORT_C static void FSSignal(NFastSemaphore* aSem, NFastMutex* aMutex);
  1030 	IMPORT_C static void FSSignalN(NFastSemaphore* aSem, TInt aCount);
  1031 	IMPORT_C static void FSSignalN(NFastSemaphore* aSem, TInt aCount, NFastMutex* aMutex);
  1032 
  1033 	// Fast mutexes
  1034 	IMPORT_C static void FMWait(NFastMutex* aMutex);
  1035 	IMPORT_C static void FMSignal(NFastMutex* aMutex);
  1036 	IMPORT_C static TBool FMFlash(NFastMutex* aMutex);
  1037 
  1038 	// Scheduler
  1039 	IMPORT_C static void Lock();
  1040 	IMPORT_C static NThread* LockC();
  1041 	IMPORT_C static void Unlock();
  1042 	IMPORT_C static TInt PreemptionPoint();
  1043 
  1044 	// Interrupts
  1045 	IMPORT_C static TInt DisableAllInterrupts();
  1046 	IMPORT_C static TInt DisableInterrupts(TInt aLevel);
  1047 	IMPORT_C static void RestoreInterrupts(TInt aRestoreData);
  1048 	IMPORT_C static void EnableAllInterrupts();
  1049 
  1050 	// Read-modify-write
  1051 	inline static TInt LockedInc(TInt& aCount)
  1052 		{ return __e32_atomic_add_ord32(&aCount,1); }
  1053 	inline static TInt LockedDec(TInt& aCount)
  1054 		{ return __e32_atomic_add_ord32(&aCount,0xffffffff); }
  1055 	inline static TInt LockedAdd(TInt& aDest, TInt aSrc)
  1056 		{ return __e32_atomic_add_ord32(&aDest,aSrc); }
  1057 	inline static TInt64 LockedInc(TInt64& aCount)
  1058 		{ return __e32_atomic_add_ord64(&aCount,1); }
  1059 	inline static TInt64 LockedDec(TInt64& aCount)
  1060 		{ return __e32_atomic_add_ord64(&aCount,TUint64(TInt64(-1))); }
  1061 	inline static TInt64 LockedAdd(TInt64& aDest, TInt64 aSrc)		/**< @internalComponent */
  1062 		{ return __e32_atomic_add_ord64(&aDest,aSrc); }
  1063 	inline static TUint32 LockedSetClear(TUint32& aDest, TUint32 aClearMask, TUint32 aSetMask)
  1064 		{ return __e32_atomic_axo_ord32(&aDest,~(aClearMask|aSetMask),aSetMask); }
  1065 	inline static TUint16 LockedSetClear16(TUint16& aDest, TUint16 aClearMask, TUint16 aSetMask)	/**< @internalComponent */
  1066 		{ return __e32_atomic_axo_ord16(&aDest,TUint16(~(aClearMask|aSetMask)),aSetMask); }
  1067 	inline static TUint8 LockedSetClear8(TUint8& aDest, TUint8 aClearMask, TUint8 aSetMask)
  1068 		{ return __e32_atomic_axo_ord8(&aDest,TUint8(~(aClearMask|aSetMask)),aSetMask); }
  1069 	inline static TInt SafeInc(TInt& aCount)
  1070 		{ return __e32_atomic_tas_ord32(&aCount,1,1,0); }
  1071 	inline static TInt SafeDec(TInt& aCount)
  1072 		{ return __e32_atomic_tas_ord32(&aCount,1,-1,0); }
  1073 	inline static TInt AddIfGe(TInt& aCount, TInt aLimit, TInt aInc)	/**< @internalComponent */
  1074 		{ return __e32_atomic_tas_ord32(&aCount,aLimit,aInc,0); }
  1075 	inline static TInt AddIfLt(TInt& aCount, TInt aLimit, TInt aInc)	/**< @internalComponent */
  1076 		{ return __e32_atomic_tas_ord32(&aCount,aLimit,0,aInc); }
  1077 	inline static TAny* SafeSwap(TAny* aNewValue, TAny*& aPtr)
  1078 		{ return __e32_atomic_swp_ord_ptr(&aPtr, aNewValue); }
  1079 	inline static TUint8 SafeSwap8(TUint8 aNewValue, TUint8& aPtr)
  1080 		{ return __e32_atomic_swp_ord8(&aPtr, aNewValue); }
  1081 	inline static TUint16 SafeSwap16(TUint16 aNewValue, TUint16& aPtr)						/**< @internalComponent */
  1082 		{ return __e32_atomic_swp_ord16(&aPtr, aNewValue); }
  1083 	inline static TBool CompareAndSwap(TAny*& aPtr, TAny* aExpected, TAny* aNew)			/**< @internalComponent */
  1084 		{ return __e32_atomic_cas_ord_ptr(&aPtr, &aExpected, aNew); }
  1085 	inline static TBool CompareAndSwap8(TUint8& aPtr, TUint8 aExpected, TUint8 aNew)		/**< @internalComponent */
  1086 		{ return __e32_atomic_cas_ord8(&aPtr, (TUint8*)&aExpected, (TUint8)aNew); }
  1087 	inline static TBool CompareAndSwap16(TUint16& aPtr, TUint16 aExpected, TUint16 aNew)	/**< @internalComponent */
  1088 		{ return __e32_atomic_cas_ord16(&aPtr, (TUint16*)&aExpected, (TUint16)aNew); }
  1089 	inline static TUint32 SafeSwap(TUint32 aNewValue, TUint32& aPtr)						/**< @internalComponent */
  1090 		{ return __e32_atomic_swp_ord32(&aPtr, aNewValue); }
  1091 	inline static TUint SafeSwap(TUint aNewValue, TUint& aPtr)								/**< @internalComponent */
  1092 		{ return __e32_atomic_swp_ord32(&aPtr, aNewValue); }
  1093 	inline static TInt SafeSwap(TInt aNewValue, TInt& aPtr)									/**< @internalComponent */
  1094 		{ return __e32_atomic_swp_ord32(&aPtr, aNewValue); }
  1095 	inline static TBool CompareAndSwap(TUint32& aPtr, TUint32 aExpected, TUint32 aNew)		/**< @internalComponent */
  1096 		{ return __e32_atomic_cas_ord32(&aPtr, &aExpected, aNew); }
  1097 	inline static TBool CompareAndSwap(TUint& aPtr, TUint aExpected, TUint aNew)			/**< @internalComponent */
  1098 		{ return __e32_atomic_cas_ord32(&aPtr, (TUint32*)&aExpected, (TUint32)aNew); }
  1099 	inline static TBool CompareAndSwap(TInt& aPtr, TInt aExpected, TInt aNew)				/**< @internalComponent */
  1100 		{ return __e32_atomic_cas_ord32(&aPtr, (TUint32*)&aExpected, (TUint32)aNew); }
  1101 
  1102 
  1103 	// Miscellaneous
  1104 	IMPORT_C static NThread* CurrentThread();
  1105 	IMPORT_C static TInt CurrentCpu();										/**< @internalComponent */
  1106 	IMPORT_C static TInt NumberOfCpus();									/**< @internalComponent */
  1107 	IMPORT_C static void LockSystem();
  1108 	IMPORT_C static void UnlockSystem();
  1109 	IMPORT_C static TBool FlashSystem();
  1110 	IMPORT_C static void WaitForAnyRequest();
  1111 	IMPORT_C static void Sleep(TUint32 aTime);
  1112 	IMPORT_C static void Exit();
  1113 	IMPORT_C static void DeferredExit();
  1114 	IMPORT_C static void YieldTimeslice();									/**< @internalComponent */
  1115 	IMPORT_C static void RotateReadyList(TInt aPriority);					
  1116 	IMPORT_C static void RotateReadyList(TInt aPriority, TInt aCpu);		/**< @internalTechnology */
  1117 	IMPORT_C static void RecordIntLatency(TInt aLatency, TInt aIntMask);	/**< @internalTechnology */
  1118 	IMPORT_C static void RecordThreadLatency(TInt aLatency);				/**< @internalTechnology */
  1119 	IMPORT_C static TUint32 TickCount();
  1120 	IMPORT_C static TInt TickPeriod();
  1121 	IMPORT_C static TInt TimerTicks(TInt aMilliseconds);
  1122 	IMPORT_C static TInt TimesliceTicks(TUint32 aMicroseconds);				/**< @internalTechnology */
  1123 	IMPORT_C static TInt CurrentContext();
  1124 	IMPORT_C static TUint32 FastCounter();
  1125 	IMPORT_C static TInt FastCounterFrequency();
  1126 	IMPORT_C static TUint64 Timestamp();
  1127 	IMPORT_C static TUint32 TimestampFrequency();
  1128 	static void Init0(TAny* aVariantData);
  1129 	static void Init(NThread* aThread, SNThreadCreateInfo& aInfo);
  1130 	static TInt BootAP(volatile SAPBootInfo* aInfo);
  1131 	IMPORT_C static TBool KernelLocked(TInt aCount=0);						/**< @internalTechnology */
  1132 	IMPORT_C static NFastMutex* HeldFastMutex();							/**< @internalTechnology */
  1133 	static void Idle();	
  1134 	IMPORT_C static SCpuIdleHandler* CpuIdleHandler();						/**< @internalTechnology */
  1135 	static void NotifyCrash(const TAny* a0, TInt a1);						/**< @internalTechnology */
  1136 	IMPORT_C static TBool Crashed();
  1137 	static TUint32 IdleGenerationCount();
  1138 
  1139 	// Debugger support
  1140 	typedef void (*TRescheduleCallback)(NThread*);
  1141 	IMPORT_C static void SchedulerHooks(TLinAddr& aStart, TLinAddr& aEnd);
  1142 	IMPORT_C static void InsertSchedulerHooks();
  1143 	IMPORT_C static void RemoveSchedulerHooks();
  1144 	IMPORT_C static void SetRescheduleCallback(TRescheduleCallback aCallback);
  1145 
  1146 	// Interrupts
  1147 	enum TIrqInitFlags
  1148 		{
  1149 		EIrqInit_FallingEdge=0,
  1150 		EIrqInit_RisingEdge=2,
  1151 		EIrqInit_LevelLow=1,
  1152 		EIrqInit_LevelHigh=3,
  1153 		EIrqInit_Shared=0x10,
  1154 		EIrqInit_Count=0x20,
  1155 		};
  1156 
  1157 	enum TIrqBindFlags
  1158 		{
  1159 		EIrqBind_Raw=1,
  1160 		EIrqBind_Count=2,
  1161 		EIrqBind_Exclusive=4,
  1162 		EIrqBind_Tied=8
  1163 		};
  1164 
  1165 	enum TIrqIdBits
  1166 		{
  1167 		EIrqIndexMask = 0x0000ffff,	// bottom 16 bits is IRQ number if top 16 bits all zero
  1168 									// otherwise is IRQ handler index
  1169 		EIrqCookieMask = 0x7fff0000,
  1170 		EIrqCookieShift = 16
  1171 		};
  1172 
  1173 	static void InterruptInit0();
  1174 	IMPORT_C static TInt InterruptInit(TInt aId, TUint32 aFlags, TInt aVector, TUint32 aHwId, TAny* aExt=0);
  1175 	IMPORT_C static TInt InterruptBind(TInt aId, NIsr aIsr, TAny* aPtr, TUint32 aFlags, NSchedulable* aTied);
  1176 	IMPORT_C static TInt InterruptUnbind(TInt aId);
  1177 	IMPORT_C static TInt InterruptEnable(TInt aId);
  1178 	IMPORT_C static TInt InterruptDisable(TInt aId);
  1179 	IMPORT_C static TInt InterruptClear(TInt aId);
  1180 	IMPORT_C static TInt InterruptSetPriority(TInt aId, TInt aPri);
  1181 	IMPORT_C static TInt InterruptSetCpuMask(TInt aId, TUint32 aMask);
  1182 	IMPORT_C static void Interrupt(TInt aIrqNo);
  1183 	};
  1184 
  1185 
  1186 /** Create a fast semaphore
  1187 
  1188 	@publishedPartner
  1189 	@prototype
  1190 */
  1191 inline NFastSemaphore::NFastSemaphore(NThreadBase* aThread)
  1192 	:	iCount(0),
  1193 		iOwningThread(aThread ? aThread : (NThreadBase*)NKern::CurrentThread())
  1194 	{
  1195 	}
  1196 
  1197 
  1198 class TGenericIPI;
  1199 
  1200 /**
  1201 @internalComponent
  1202 */
  1203 typedef void (*TGenericIPIFn)(TGenericIPI*);
  1204 
  1205 /**
  1206 @internalComponent
  1207 */
  1208 class TGenericIPI : public SDblQueLink
  1209 	{
  1210 public:
  1211 	void Queue(TGenericIPIFn aFunc, TUint32 aCpuMask);
  1212 	void QueueAll(TGenericIPIFn aFunc);
  1213 	void QueueAllOther(TGenericIPIFn aFunc);
  1214 	void WaitEntry();
  1215 	void WaitCompletion();
  1216 public:
  1217 	TGenericIPIFn			iFunc;
  1218 	volatile TUint32		iCpusIn;
  1219 	volatile TUint32		iCpusOut;
  1220 	};
  1221 
  1222 /**
  1223 @internalComponent
  1224 */
  1225 class TStopIPI : public TGenericIPI
  1226 	{
  1227 public:
  1228 	void StopCPUs();
  1229 	void ReleaseCPUs();
  1230 	static void Isr(TGenericIPI*);
  1231 public:
  1232 	volatile TInt iFlag;
  1233 	};
  1234 
  1235 #include <ncern.h>
  1236 #endif