os/kernelhwsrv/kernel/eka/include/nkernsmp/nkern.h
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/include/nkernsmp/nkern.h	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,1236 @@
     1.4 +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +// e32\include\nkernsmp\nkern.h
    1.18 +// 
    1.19 +// WARNING: This file contains some APIs which are internal and are subject
    1.20 +//          to change without notice. Such APIs should therefore not be used
    1.21 +//          outside the Kernel and Hardware Services package.
    1.22 +//
    1.23 +
    1.24 +#ifndef __NKERN_H__
    1.25 +#define __NKERN_H__
    1.26 +
    1.27 +#ifdef	__STANDALONE_NANOKERNEL__
    1.28 +#undef	__IN_KERNEL__
    1.29 +#define	__IN_KERNEL__
    1.30 +#endif
    1.31 +
    1.32 +#include <e32const.h>
    1.33 +#include <nklib.h>
    1.34 +#include <nk_event.h>
    1.35 +#include <dfcs.h>
    1.36 +#include <nk_trace.h>
    1.37 +#include <e32atomics.h>
    1.38 +
    1.39 +extern "C" {
    1.40 +/** @internalComponent */
    1.41 +IMPORT_C void NKFault(const char* file, TInt line);
    1.42 +/** @internalComponent */
    1.43 +void NKIdle(TInt aStage);
    1.44 +}
    1.45 +
    1.46 +/**
    1.47 +@publishedPartner
    1.48 +@released
    1.49 +*/
    1.50 +#define FAULT()		NKFault(__FILE__,__LINE__)
    1.51 +
    1.52 +#ifdef _DEBUG
    1.53 +
    1.54 +/**
    1.55 +@publishedPartner
    1.56 +@released
    1.57 +*/
    1.58 +#define __NK_ASSERT_DEBUG(c)	((void) ((c)||(FAULT(),0)) )
    1.59 +
    1.60 +#else
    1.61 +
    1.62 +#define __NK_ASSERT_DEBUG(c)
    1.63 +
    1.64 +#endif
    1.65 +
    1.66 +/**
    1.67 +@publishedPartner
    1.68 +@released
    1.69 +*/
    1.70 +#define __NK_ASSERT_ALWAYS(c)	((void) ((c)||(FAULT(),0)) )
    1.71 +
    1.72 +/**
    1.73 +	@publishedPartner
    1.74 +	@released
    1.75 +*/
    1.76 +const TInt KNumPriorities=64;
    1.77 +
    1.78 +const TInt KMaxCpus=8;
    1.79 +
    1.80 +class NSchedulable;
    1.81 +class NThread;
    1.82 +class NThreadGroup;
    1.83 +
    1.84 +
    1.85 +/** Spin lock
    1.86 +
    1.87 +	Used for protecting a code fragment against both interrupts and concurrent
    1.88 +	execution on another processor.
    1.89 +
    1.90 +	List of spin locks in the nanokernel, in deadlock-prevention order:
    1.91 +	A	NEventHandler::TiedLock (preemption)
    1.92 +	B	NFastMutex spin locks (preemption)
    1.93 +	C	Thread spin locks (preemption)
    1.94 +	D	Thread group spin locks (preemption)
    1.95 +	E	Per-CPU ready list lock (preemption)
    1.96 +
    1.97 +	a	Idle DFC list lock (interrupts)
    1.98 +	b	Per-CPU exogenous IDFC queue lock (interrupts)
    1.99 +	c	NTimerQ spin lock (interrupts)
   1.100 +	d	Generic IPI list locks (interrupts)
   1.101 +	e	NIrq spin locks (interrupts)
   1.102 +	f	Per-CPU event handler list lock (interrupts)
   1.103 +	z	BTrace lock (interrupts)
   1.104 +
   1.105 +	z must be minimum since BTrace can appear anywhere
   1.106 +
   1.107 +	interrupt-disabling spinlocks must be lower than preemption-disabling ones
   1.108 +
   1.109 +	Nestings which actually occur are:
   1.110 +		A > C
   1.111 +		B > C > D > E
   1.112 +		c > f
   1.113 +		Nothing (except possibly z) nested inside a, b, d, f
   1.114 +		e is held while calling HW-poking functions (which might use other spinlocks)
   1.115 +
   1.116 +@publishedPartner
   1.117 +@prototype
   1.118 +*/
   1.119 +class TSpinLock
   1.120 +	{
   1.121 +public:
   1.122 +	enum TOrder
   1.123 +		{
   1.124 +		// Bit 7 of order clear for locks used with interrupts disabled
   1.125 +		EOrderGenericIrqLow0	=0x00u,		// Device driver spin locks, low range
   1.126 +		EOrderGenericIrqLow1	=0x01u,		// Device driver spin locks, low range
   1.127 +		EOrderGenericIrqLow2	=0x02u,		// Device driver spin locks, low range
   1.128 +		EOrderGenericIrqLow3	=0x03u,		// Device driver spin locks, low range
   1.129 +		EOrderBTrace			=0x04u,		// BTrace lock
   1.130 +		EOrderEventHandlerList	=0x07u,		// Per-CPU event handler list lock
   1.131 +		EOrderCacheMaintenance  =0x08u,		// CacheMaintenance (for PL310)
   1.132 +		EOrderNIrq				=0x0Au,		// NIrq lock
   1.133 +		EOrderGenericIPIList	=0x0Du,		// Generic IPI list lock
   1.134 +		EOrderNTimerQ			=0x10u,		// Nanokernel timer queue lock
   1.135 +		EOrderExIDfcQ			=0x13u,		// Per-CPU exogenous IDFC queue list lock
   1.136 +		EOrderIdleDFCList		=0x16u,		// Idle DFC list lock
   1.137 +		EOrderGenericIrqHigh0	=0x18u,		// Device driver spin locks, high range
   1.138 +		EOrderGenericIrqHigh1	=0x19u,		// Device driver spin locks, high range
   1.139 +		EOrderGenericIrqHigh2	=0x1Au,		// Device driver spin locks, high range
   1.140 +		EOrderGenericIrqHigh3	=0x1Bu,		// Device driver spin locks, high range
   1.141 +
   1.142 +		// Bit 7 of order set for locks used with interrupts enabled, preemption disabled
   1.143 +		EOrderGenericPreLow0	=0x80u,		// Device driver spin locks, low range
   1.144 +		EOrderGenericPreLow1	=0x81u,		// Device driver spin locks, low range
   1.145 +		EOrderReadyList			=0x88u,		// Per-CPU ready list lock
   1.146 +		EOrderThreadGroup		=0x90u,		// Thread group locks
   1.147 +		EOrderThread			=0x91u,		// Thread locks
   1.148 +		EOrderFastMutex			=0x98u,		// Fast mutex locks
   1.149 +		EOrderEventHandlerTied	=0x9Cu,		// Event handler tied lock
   1.150 +		EOrderGenericPreHigh0	=0x9Eu,		// Device driver spin locks, high range
   1.151 +		EOrderGenericPreHigh1	=0x9Fu,		// Device driver spin locks, high range
   1.152 +
   1.153 +		EOrderNone				=0xFFu		// No order check required (e.g. for dynamic ordering)
   1.154 +		};
   1.155 +public:
   1.156 +	IMPORT_C TSpinLock(TUint aOrder);
   1.157 +	IMPORT_C void LockIrq();				/**< @internalComponent disable interrupts and acquire the lock */
   1.158 +	IMPORT_C void UnlockIrq();				/**< @internalComponent release the lock and enable interrupts */
   1.159 +	IMPORT_C TBool FlashIrq();				/**< @internalComponent if someone else is waiting for the lock, UnlockIrq() then LockIrq() */
   1.160 +	IMPORT_C void LockOnly();				/**< @internalComponent acquire the lock, assuming interrupts/preemption already disabled */
   1.161 +	IMPORT_C void UnlockOnly();				/**< @internalComponent release the lock, don't change interrupt/preemption state */
   1.162 +	IMPORT_C TBool FlashOnly();				/**< @internalComponent if someone else is waiting for the lock, UnlockOnly() then LockOnly() */
   1.163 +	IMPORT_C TInt LockIrqSave();			/**< @internalComponent remember original interrupt state then disable interrupts and acquire the lock */
   1.164 +	IMPORT_C void UnlockIrqRestore(TInt);	/**< @internalComponent release the lock then restore original interrupt state */
   1.165 +	IMPORT_C TBool FlashIrqRestore(TInt);	/**< @internalComponent if someone else is waiting for the lock, UnlockIrqRestore() then LockIrq() */
   1.166 +	IMPORT_C TBool FlashPreempt();			/**< @internalComponent if someone else is waiting for the lock, UnlockOnly(); NKern::PreemptionPoint(); LockOnly(); */
   1.167 +private:
   1.168 +	volatile TUint64 iLock;
   1.169 +	};
   1.170 +
   1.171 +
   1.172 +/** Macro to disable interrupts and acquire the lock.
   1.173 +
   1.174 +@publishedPartner
   1.175 +@prototype
   1.176 +*/
   1.177 +#define __SPIN_LOCK_IRQ(lock)				((lock).LockIrq())
   1.178 +
   1.179 +/** Macro to release the lock and enable interrupts.
   1.180 +
   1.181 +@publishedPartner
   1.182 +@prototype
   1.183 +*/
   1.184 +#define __SPIN_UNLOCK_IRQ(lock)				(lock).UnlockIrq()
   1.185 +
   1.186 +/** Macro to see if someone else is waiting for the lock, enabling IRQs 
   1.187 +    then disabling IRQs again.
   1.188 +
   1.189 +@publishedPartner
   1.190 +@prototype
   1.191 +*/
   1.192 +#define __SPIN_FLASH_IRQ(lock)				(lock).FlashIrq()
   1.193 +
   1.194 +/** Macro to remember original interrupt state then disable interrupts 
   1.195 +    and acquire the lock.
   1.196 +    
   1.197 +@publishedPartner
   1.198 +@prototype
   1.199 +*/
   1.200 +#define __SPIN_LOCK_IRQSAVE(lock)			((lock).LockIrqSave())
   1.201 +
   1.202 +/** Macro to release the lock then restore original interrupt state to that 
   1.203 +	supplied.
   1.204 +	
   1.205 +@publishedPartner
   1.206 +@prototype
   1.207 +*/
   1.208 +#define __SPIN_UNLOCK_IRQRESTORE(lock,irq)	(lock).UnlockIrqRestore(irq)
   1.209 +
   1.210 +/** Macro to see if someone else is waiting for the lock, enabling IRQs to
   1.211 +	the original state supplied then disabling IRQs again.
   1.212 +    
   1.213 +@publishedPartner
   1.214 +@prototype
   1.215 +*/
   1.216 +#define __SPIN_FLASH_IRQRESTORE(lock,irq)	(lock).FlashIrqRestore(irq)
   1.217 +
   1.218 +/** Macro to acquire the lock. This assumes the caller has already disabled 
   1.219 +    interrupts/preemption. 
   1.220 +	
   1.221 +	If interrupts/preemption is not disabled a run-time assert will occur
   1.222 +	This is to protect against unsafe code that might lead to same core 
   1.223 +	deadlock.
   1.224 +	
   1.225 +    In device driver code it is safer to use __SPIN_LOCK_IRQSAVE() instead, 
   1.226 +	although not as efficient should interrupts aleady be disabled for the 
   1.227 +	duration the lock is held.
   1.228 +    
   1.229 +@publishedPartner
   1.230 +@prototype
   1.231 +*/
   1.232 +#define __SPIN_LOCK(lock)					((lock).LockOnly())
   1.233 +
   1.234 +/** Macro to release the lock, don't change interrupt/preemption state.
   1.235 +
   1.236 +@publishedPartner
   1.237 +@prototype
   1.238 +*/
   1.239 +#define __SPIN_UNLOCK(lock)					(lock).UnlockOnly()
   1.240 +
   1.241 +/**
   1.242 +@internalComponent
   1.243 +*/
   1.244 +#define __SPIN_FLASH(lock)					(lock).FlashOnly()
   1.245 +
   1.246 +/** Macro to see if someone else is waiting for the lock, enabling preemption 
   1.247 +    then disabling it again.
   1.248 +
   1.249 +@publishedPartner
   1.250 +@prototype
   1.251 +*/
   1.252 +#define __SPIN_FLASH_PREEMPT(lock)			(lock).FlashPreempt()
   1.253 +
   1.254 +
   1.255 +/** Read/Write Spin lock
   1.256 +
   1.257 +@publishedPartner
   1.258 +@prototype
   1.259 +*/
   1.260 +class TRWSpinLock
   1.261 +	{
   1.262 +public:
   1.263 +	IMPORT_C TRWSpinLock(TUint aOrder);		// Uses same order space as TSpinLock
   1.264 +
   1.265 +	IMPORT_C void LockIrqR();				/**< @internalComponent disable interrupts and acquire read lock */
   1.266 +	IMPORT_C void UnlockIrqR();				/**< @internalComponent release read lock and enable interrupts */
   1.267 +	IMPORT_C TBool FlashIrqR();				/**< @internalComponent if someone else is waiting for write lock, UnlockIrqR() then LockIrqR() */
   1.268 +	IMPORT_C void LockIrqW();				/**< @internalComponent disable interrupts and acquire write lock */
   1.269 +	IMPORT_C void UnlockIrqW();				/**< @internalComponent release write lock and enable interrupts */
   1.270 +	IMPORT_C TBool FlashIrqW();				/**< @internalComponent if someone else is waiting for the lock, UnlockIrqW() then LockIrqW() */
   1.271 +	IMPORT_C void LockOnlyR();				/**< @internalComponent acquire read lock, assuming interrupts/preemption already disabled */
   1.272 +	IMPORT_C void UnlockOnlyR();			/**< @internalComponent release read lock, don't change interrupt/preemption state */
   1.273 +	IMPORT_C TBool FlashOnlyR();			/**< @internalComponent if someone else is waiting for write lock, UnlockOnlyR() then LockOnlyR() */
   1.274 +	IMPORT_C void LockOnlyW();				/**< @internalComponent acquire write lock, assuming interrupts/preemption already disabled */
   1.275 +	IMPORT_C void UnlockOnlyW();			/**< @internalComponent release write lock, don't change interrupt/preemption state */
   1.276 +	IMPORT_C TBool FlashOnlyW();			/**< @internalComponent if someone else is waiting for the lock, UnlockOnlyW() then LockOnlyW() */
   1.277 +	IMPORT_C TInt LockIrqSaveR();			/**< @internalComponent disable interrupts and acquire read lock, return original interrupt state */
   1.278 +	IMPORT_C void UnlockIrqRestoreR(TInt);	/**< @internalComponent release read lock and reset original interrupt state */
   1.279 +	IMPORT_C TBool FlashIrqRestoreR(TInt);	/**< @internalComponent if someone else is waiting for write lock, UnlockIrqRestoreR() then LockIrqR() */
   1.280 +	IMPORT_C TInt LockIrqSaveW();			/**< @internalComponent disable interrupts and acquire write lock, return original interrupt state */
   1.281 +	IMPORT_C void UnlockIrqRestoreW(TInt);	/**< @internalComponent release write lock and reset original interrupt state */
   1.282 +	IMPORT_C TBool FlashIrqRestoreW(TInt);	/**< @internalComponent if someone else is waiting for the lock, UnlockIrqRestoreW() then LockIrqW() */
   1.283 +	IMPORT_C TBool FlashPreemptR();			/**< @internalComponent if someone else is waiting for write lock, UnlockOnlyR(); NKern::PreemptionPoint(); LockOnlyR(); */
   1.284 +	IMPORT_C TBool FlashPreemptW();			/**< @internalComponent if someone else is waiting for the lock, UnlockOnlyW(); NKern::PreemptionPoint(); LockOnlyW(); */
   1.285 +private:
   1.286 +	volatile TUint64 iLock;
   1.287 +	};
   1.288 +
   1.289 +
   1.290 +/**
   1.291 +@publishedPartner
   1.292 +@prototype
   1.293 +*/
   1.294 +#define __SPIN_LOCK_IRQ_R(lock)					(lock).LockIrqR()
   1.295 +
   1.296 +/**
   1.297 +@publishedPartner
   1.298 +@prototype
   1.299 +*/
   1.300 +#define __SPIN_UNLOCK_IRQ_R(lock)				(lock).UnlockIrqR()
   1.301 +
   1.302 +/**
   1.303 +@publishedPartner
   1.304 +@prototype
   1.305 +*/
   1.306 +#define __SPIN_FLASH_IRQ_R(lock)				((lock).FlashIrqR())
   1.307 +
   1.308 +/**
   1.309 +@publishedPartner
   1.310 +@prototype
   1.311 +*/
   1.312 +#define __SPIN_LOCK_IRQ_W(lock)					(lock).LockIrqW()
   1.313 +
   1.314 +/**
   1.315 +@publishedPartner
   1.316 +@prototype
   1.317 +*/
   1.318 +#define __SPIN_UNLOCK_IRQ_W(lock)				(lock).UnlockIrqW()
   1.319 +
   1.320 +/**
   1.321 +@publishedPartner
   1.322 +@prototype
   1.323 +*/
   1.324 +#define __SPIN_FLASH_IRQ_W(lock)				((lock).FlashIrqW())
   1.325 +
   1.326 +
   1.327 +/**
   1.328 +@publishedPartner
   1.329 +@prototype
   1.330 +*/
   1.331 +#define __SPIN_LOCK_R(lock)						(lock).LockOnlyR()
   1.332 +
   1.333 +/**
   1.334 +@publishedPartner
   1.335 +@prototype
   1.336 +*/
   1.337 +#define __SPIN_UNLOCK_R(lock)					(lock).UnlockOnlyR()
   1.338 +
   1.339 +/**
   1.340 +@internalComponent
   1.341 +*/
   1.342 +#define __SPIN_FLASH_R(lock)					((lock).FlashOnlyR())
   1.343 +
   1.344 +/**
   1.345 +@publishedPartner
   1.346 +@prototype
   1.347 +*/
   1.348 +#define __SPIN_LOCK_W(lock)						(lock).LockOnlyW()
   1.349 +
   1.350 +/**
   1.351 +@publishedPartner
   1.352 +@prototype
   1.353 +*/
   1.354 +#define __SPIN_UNLOCK_W(lock)					(lock).UnlockOnlyW()
   1.355 +
   1.356 +/**
   1.357 +@internalComponent
   1.358 +*/
   1.359 +#define __SPIN_FLASH_W(lock)					((lock).FlashOnlyW())
   1.360 +
   1.361 +
   1.362 +/**
   1.363 +@publishedPartner
   1.364 +@prototype
   1.365 +*/
   1.366 +#define __SPIN_LOCK_IRQSAVE_R(lock)				(lock).LockIrqSaveR()
   1.367 +
   1.368 +/**
   1.369 +@publishedPartner
   1.370 +@prototype
   1.371 +*/
   1.372 +#define __SPIN_UNLOCK_IRQRESTORE_R(lock,irq)	(lock).UnlockIrqRestoreR(irq)
   1.373 +
   1.374 +/**
   1.375 +@publishedPartner
   1.376 +@prototype
   1.377 +*/
   1.378 +#define __SPIN_FLASH_IRQRESTORE_R(lock,irq)		((lock).FlashIrqRestoreR(irq))
   1.379 +
   1.380 +/**
   1.381 +@publishedPartner
   1.382 +@prototype
   1.383 +*/
   1.384 +#define __SPIN_LOCK_IRQSAVE_W(lock)				(lock).LockIrqSaveW()
   1.385 +
   1.386 +/**
   1.387 +@publishedPartner
   1.388 +@prototype
   1.389 +*/
   1.390 +#define __SPIN_UNLOCK_IRQRESTORE_W(lock,irq)	(lock).UnlockIrqRestoreW(irq)
   1.391 +
   1.392 +/**
   1.393 +@publishedPartner
   1.394 +@prototype
   1.395 +*/
   1.396 +#define __SPIN_FLASH_IRQRESTORE_W(lock,irq)		((lock).FlashIrqRestoreW(irq))
   1.397 +
   1.398 +
   1.399 +/**
   1.400 +@publishedPartner
   1.401 +@prototype
   1.402 +*/
   1.403 +#define __SPIN_FLASH_PREEMPT_R(lock)			((lock).FlashPreemptR())
   1.404 +
   1.405 +/**
   1.406 +@publishedPartner
   1.407 +@prototype
   1.408 +*/
   1.409 +#define __SPIN_FLASH_PREEMPT_W(lock)			((lock).FlashPreemptW())
   1.410 +
   1.411 +
   1.412 +#ifdef _DEBUG
   1.413 +#define __INCLUDE_SPIN_LOCK_CHECKS__
   1.414 +#endif
   1.415 +
   1.416 +
   1.417 +/** Nanokernel fast semaphore
   1.418 +
   1.419 +	A light-weight semaphore class that only supports a single waiting thread,
   1.420 +	suitable for the Symbian OS thread I/O semaphore.
   1.421 +	
   1.422 +	Initialising a NFastSemaphore involves two steps:
   1.423 +	
   1.424 +	- Constructing the semaphore
   1.425 +	- Setting the semaphore owning thread (the one allowed to wait on it)
   1.426 +	
   1.427 +	For example, creating one for the current thread to wait on:
   1.428 +	
   1.429 +	@code
   1.430 +	NFastSemaphore sem;
   1.431 +	sem.iOwningThread = NKern::CurrentThread();
   1.432 +	@endcode
   1.433 +	
   1.434 +	@publishedPartner
   1.435 +	@prototype
   1.436 +*/
   1.437 +class NFastSemaphore
   1.438 +	{
   1.439 +public:
   1.440 +	inline NFastSemaphore();
   1.441 +	inline NFastSemaphore(NThreadBase* aThread);
   1.442 +	IMPORT_C void SetOwner(NThreadBase* aThread);
   1.443 +	IMPORT_C void Wait();
   1.444 +	IMPORT_C void Signal();
   1.445 +	IMPORT_C void SignalN(TInt aCount);
   1.446 +	IMPORT_C void Reset();
   1.447 +	void WaitCancel();
   1.448 +
   1.449 +	TInt Dec(NThreadBase* aThread);	// does mb() if >0
   1.450 +	NThreadBase* Inc(TInt aCount);	// does mb()
   1.451 +	NThreadBase* DoReset();	// does mb()
   1.452 +public:
   1.453 +	/** If >=0 the semaphore count
   1.454 +		If <0, (thread>>2)|0x80000000
   1.455 +		@internalComponent
   1.456 +	*/
   1.457 +	TInt iCount;
   1.458 +
   1.459 +	/** The thread allowed to wait on the semaphore
   1.460 +		@internalComponent
   1.461 +	*/
   1.462 +	NThreadBase* iOwningThread;	
   1.463 +	};
   1.464 +
   1.465 +/** Create a fast semaphore
   1.466 +
   1.467 +	@publishedPartner
   1.468 +	@prototype
   1.469 +*/
   1.470 +inline NFastSemaphore::NFastSemaphore()
   1.471 +	: iCount(0), iOwningThread(NULL)
   1.472 +	{}
   1.473 +
   1.474 +/** Nanokernel fast mutex
   1.475 +
   1.476 +	A light-weight priority-inheritance mutex that can be used if the following
   1.477 +	conditions apply:
   1.478 +	
   1.479 +	- Threads that hold the mutex never block.
   1.480 +	- The mutex is never acquired in a nested fashion
   1.481 +	
   1.482 +	If either of these conditions is not met, a DMutex object is more appropriate.
   1.483 +	
   1.484 +	@publishedPartner
   1.485 +	@prototype
   1.486 +*/
   1.487 +class NFastMutex
   1.488 +	{
   1.489 +public:
   1.490 +	IMPORT_C NFastMutex();
   1.491 +	IMPORT_C void Wait();
   1.492 +	IMPORT_C void Signal();
   1.493 +	IMPORT_C TBool HeldByCurrentThread();
   1.494 +private:
   1.495 +	void DoWaitL();
   1.496 +	void DoSignalL();
   1.497 +
   1.498 +	friend class NKern;
   1.499 +public:
   1.500 +	/** @internalComponent
   1.501 +
   1.502 +	If mutex is free and no-one is waiting, iHoldingThread=0
   1.503 +	If mutex is held and no-one is waiting, iHoldingThread points to holding thread
   1.504 +	If mutex is free but threads are waiting, iHoldingThread=1
   1.505 +	If mutex is held and threads are waiting, iHoldingThread points to holding thread but with bit 0 set
   1.506 +	*/
   1.507 +	NThreadBase* iHoldingThread;
   1.508 +
   1.509 +	TUint32 i_NFastMutex_Pad1;	/**< @internalComponent */
   1.510 +
   1.511 +	/** @internalComponent
   1.512 +
   1.513 +	Spin lock to protect mutex
   1.514 +	*/
   1.515 +	TSpinLock iMutexLock;
   1.516 +
   1.517 +	/** @internalComponent
   1.518 +
   1.519 +	List of NThreads which are waiting for the mutex. The threads are linked via
   1.520 +	their iWaitLink members.
   1.521 +	*/
   1.522 +	TPriList<NThreadBase, KNumPriorities> iWaitQ;
   1.523 +	};
   1.524 +
   1.525 +__ASSERT_COMPILE(!(_FOFF(NFastMutex,iMutexLock)&7));
   1.526 +
   1.527 +
   1.528 +/**
   1.529 +@publishedPartner
   1.530 +@prototype
   1.531 +
   1.532 +The type of the callback function used by the nanokernel timer. 
   1.533 +
   1.534 +@see NTimer
   1.535 +*/
   1.536 +typedef NEventFn NTimerFn;
   1.537 +
   1.538 +
   1.539 +
   1.540 +
   1.541 +/**
   1.542 +@publishedPartner
   1.543 +@prototype
   1.544 +
   1.545 +A basic relative timer provided by the nanokernel.
   1.546 +
   1.547 +It can generate either a one-shot interrupt or periodic interrupts.
   1.548 +
   1.549 +A timeout handler is called when the timer expires, either:
   1.550 +- from the timer ISR - if the timer is queued via OneShot(TInt aTime) or OneShot(TInt aTime, TBool EFalse), or
   1.551 +- from the nanokernel timer dfc1 thread - if the timer is queued via OneShot(TInt aTime, TBool ETrue) call, or
   1.552 +- from any other dfc thread that provided DFC belongs to - if the timer is queued via OneShot(TInt aTime, TDfc& aDfc) call.
   1.553 +Call-back mechanism cannot be changed in the life time of a timer.
   1.554 +
   1.555 +These timer objects may be manipulated from any context.
   1.556 +The timers are driven from a periodic system tick interrupt,
   1.557 +usually a 1ms period.
   1.558 +
   1.559 +@see NTimerFn
   1.560 +*/
   1.561 +class NTimerQ;
   1.562 +class NTimer : public NEventHandler
   1.563 +	{
   1.564 +public:
   1.565 +	/**
   1.566 +	Default constructor.
   1.567 +	*/
   1.568 +	inline NTimer()
   1.569 +		{
   1.570 +		iHType = EEventHandlerNTimer;
   1.571 +		i8888.iHState1 = EIdle;
   1.572 +		}
   1.573 +	/**
   1.574 +	Constructor taking a callback function and a pointer to be passed
   1.575 +	to the callback function.
   1.576 +	
   1.577 +	@param aFunction The callback function.
   1.578 +	@param aPtr      A pointer to be passed to the callback function 
   1.579 +	                 when called.
   1.580 +	*/
   1.581 +	inline NTimer(NTimerFn aFunction, TAny* aPtr)
   1.582 +		{
   1.583 +		iPtr = aPtr;
   1.584 +		iFn = aFunction;
   1.585 +		iHType = EEventHandlerNTimer;
   1.586 +		i8888.iHState1 = EIdle;
   1.587 +		}
   1.588 +	IMPORT_C NTimer(NSchedulable* aTied, NTimerFn aFunction, TAny* aPtr);
   1.589 +	IMPORT_C NTimer(TDfcFn aFunction, TAny* aPtr, TInt aPriority);					// create DFC, queue to be set later
   1.590 +	IMPORT_C NTimer(TDfcFn aFunction, TAny* aPtr, TDfcQue* aDfcQ, TInt aPriority);	// create DFC
   1.591 +	IMPORT_C void SetDfcQ(TDfcQue* aDfcQ);
   1.592 +	IMPORT_C ~NTimer();
   1.593 +	IMPORT_C TInt SetTied(NSchedulable* aTied);
   1.594 +	IMPORT_C TInt OneShot(TInt aTime);
   1.595 +	IMPORT_C TInt OneShot(TInt aTime, TBool aDfc);
   1.596 +	IMPORT_C TInt OneShot(TInt aTime, TDfc& aDfc);
   1.597 +	IMPORT_C TInt Again(TInt aTime);
   1.598 +	IMPORT_C TBool Cancel();
   1.599 +	IMPORT_C TBool IsPending();
   1.600 +private:
   1.601 +	enum { ECancelDestroy=1 };
   1.602 +private:
   1.603 +	inline TBool IsNormal()
   1.604 +		{ return iHType==EEventHandlerNTimer; }
   1.605 +	inline TBool IsMutating()
   1.606 +		{ return iHType<KNumDfcPriorities; }
   1.607 +	inline TBool IsValid()
   1.608 +		{ return iHType<KNumDfcPriorities || iHType==EEventHandlerNTimer; }
   1.609 +	void AddAsDFC();
   1.610 +	TUint DoCancel(TUint aFlags);
   1.611 +	void DoCancel0(TUint aState);
   1.612 +	TBool DoCancelMutating(TUint aFlags);
   1.613 +public:
   1.614 +/**
   1.615 +	@internalComponent
   1.616 +*/
   1.617 +	enum TState
   1.618 +		{
   1.619 +		EIdle=0,			// not queued
   1.620 +							// 1 skipped so as not to clash with DFC states
   1.621 +		ETransferring=2,	// being transferred from holding to ordered queue
   1.622 +		EHolding=3,			// on holding queue
   1.623 +		EOrdered=4,			// on ordered queue
   1.624 +		ECritical=5,		// on ordered queue and in use by queue walk routine
   1.625 +		EFinal=6,			// on final queue
   1.626 +		EEventQ=32,			// 32+n = on event queue of CPU n (for tied timers)
   1.627 +		};
   1.628 +public:
   1.629 +	TUint32 iTriggerTime;	/**< @internalComponent */
   1.630 +	TUint32	iNTimerSpare1;	/**< @internalComponent */
   1.631 +
   1.632 +	/** This field is available for use by the timer client provided that
   1.633 +		the timer isn't a mutating-into-DFC timer.
   1.634 +		@internalTechnology */
   1.635 +//	TUint8 iUserFlags;									// i8888.iHState0
   1.636 +//	TUint8 iState;			/**< @internalComponent */	// i8888.iHState1
   1.637 +//	TUint8 iCompleteInDfc;	/**< @internalComponent */	// i8888.iHState2
   1.638 +
   1.639 +
   1.640 +	friend class NTimerQ;
   1.641 +	friend class NSchedulable;
   1.642 +	};
   1.643 +
   1.644 +/**
   1.645 +@internalTechnology
   1.646 +*/
   1.647 +#define	i_NTimer_iUserFlags	i8888.iHState0
   1.648 +
   1.649 +/**
   1.650 +@internalComponent
   1.651 +*/
   1.652 +#define	i_NTimer_iState		i8888.iHState1
   1.653 +
   1.654 +/**
   1.655 +	@publishedPartner
   1.656 +	@released
   1.657 +*/
   1.658 +typedef void (*NThreadFunction)(TAny*);
   1.659 +
   1.660 +/**
   1.661 +	@publishedPartner
   1.662 +	@released
   1.663 +*/
   1.664 +typedef TDfc* (*NThreadExitHandler)(NThread*);
   1.665 +
   1.666 +/**
   1.667 +	@publishedPartner
   1.668 +	@prototype
   1.669 +*/
   1.670 +typedef void (*NThreadStateHandler)(NThread*,TInt,TInt);
   1.671 +
   1.672 +/**
   1.673 +	@publishedPartner
   1.674 +	@prototype
   1.675 +*/
   1.676 +typedef void (*NThreadExceptionHandler)(TAny*,NThread*);
   1.677 +
   1.678 +/**
   1.679 +	@publishedPartner
   1.680 +	@prototype
   1.681 +*/
   1.682 +typedef void (*NThreadTimeoutHandler)(NThread*,TInt);
   1.683 +
   1.684 +/**
   1.685 +	@publishedPartner
   1.686 +	@prototype
   1.687 +*/
   1.688 +struct SNThreadHandlers
   1.689 +	{
   1.690 +	NThreadExitHandler iExitHandler;
   1.691 +	NThreadStateHandler iStateHandler;
   1.692 +	NThreadExceptionHandler iExceptionHandler;
   1.693 +	NThreadTimeoutHandler iTimeoutHandler;
   1.694 +	};
   1.695 +
   1.696 +/** @internalComponent */
   1.697 +extern void NThread_Default_State_Handler(NThread*, TInt, TInt);
   1.698 +
   1.699 +/** @internalComponent */
   1.700 +extern void NThread_Default_Exception_Handler(TAny*, NThread*);
   1.701 +
   1.702 +/** @internalComponent */
   1.703 +#define NTHREAD_DEFAULT_EXIT_HANDLER		((NThreadExitHandler)0)
   1.704 +
   1.705 +/** @internalComponent */
   1.706 +#define	NTHREAD_DEFAULT_STATE_HANDLER		(&NThread_Default_State_Handler)
   1.707 +
   1.708 +/** @internalComponent */
   1.709 +#define	NTHREAD_DEFAULT_EXCEPTION_HANDLER	(&NThread_Default_Exception_Handler)
   1.710 +
   1.711 +/** @internalComponent */
   1.712 +#define	NTHREAD_DEFAULT_TIMEOUT_HANDLER		((NThreadTimeoutHandler)0)
   1.713 +
   1.714 +
   1.715 +/**
   1.716 +	@publishedPartner
   1.717 +	@prototype
   1.718 +*/
   1.719 +struct SFastExecTable
   1.720 +	{
   1.721 +	TInt iFastExecCount;			// includes implicit function#0
   1.722 +	TLinAddr iFunction[1];			// first entry is for call number 1
   1.723 +	};
   1.724 +
   1.725 +/**
   1.726 +	@publishedPartner
   1.727 +	@prototype
   1.728 +*/
   1.729 +const TUint32 KExecFlagClaim=0x80000000;		// claim system lock
   1.730 +
   1.731 +/**
   1.732 +	@publishedPartner
   1.733 +	@prototype
   1.734 +*/
   1.735 +const TUint32 KExecFlagRelease=0x40000000;		// release system lock
   1.736 +
   1.737 +/**
   1.738 +	@publishedPartner
   1.739 +	@prototype
   1.740 +*/
   1.741 +const TUint32 KExecFlagPreprocess=0x20000000;	// preprocess
   1.742 +
   1.743 +/**
   1.744 +	@publishedPartner
   1.745 +	@prototype
   1.746 +*/
   1.747 +const TUint32 KExecFlagExtraArgMask=0x1C000000;	// 3 bits indicating additional arguments
   1.748 +
   1.749 +/**
   1.750 +	@publishedPartner
   1.751 +	@prototype
   1.752 +*/
   1.753 +const TUint32 KExecFlagExtraArgs2=0x04000000;	// 2 additional arguments
   1.754 +
   1.755 +/**
   1.756 +	@publishedPartner
   1.757 +	@prototype
   1.758 +*/
   1.759 +const TUint32 KExecFlagExtraArgs3=0x08000000;	// 3 additional arguments
   1.760 +
   1.761 +/**
   1.762 +	@publishedPartner
   1.763 +	@prototype
   1.764 +*/
   1.765 +const TUint32 KExecFlagExtraArgs4=0x0C000000;	// 4 additional arguments
   1.766 +
   1.767 +/**
   1.768 +	@publishedPartner
   1.769 +	@prototype
   1.770 +*/
   1.771 +const TUint32 KExecFlagExtraArgs5=0x10000000;	// 5 additional arguments
   1.772 +
   1.773 +/**
   1.774 +	@publishedPartner
   1.775 +	@prototype
   1.776 +*/
   1.777 +const TUint32 KExecFlagExtraArgs6=0x14000000;	// 6 additional arguments
   1.778 +
   1.779 +/**
   1.780 +	@publishedPartner
   1.781 +	@prototype
   1.782 +*/
   1.783 +const TUint32 KExecFlagExtraArgs7=0x18000000;	// 7 additional arguments
   1.784 +
   1.785 +/**
   1.786 +	@publishedPartner
   1.787 +	@prototype
   1.788 +*/
   1.789 +const TUint32 KExecFlagExtraArgs8=0x1C000000;	// 8 additional arguments
   1.790 +
   1.791 +
   1.792 +/**
   1.793 +	@publishedPartner
   1.794 +	@prototype
   1.795 +*/
   1.796 +struct SSlowExecEntry
   1.797 +	{
   1.798 +	TUint32 iFlags;					// information about call
   1.799 +	TLinAddr iFunction;				// address of function to be called
   1.800 +	};
   1.801 +
   1.802 +
   1.803 +/**
   1.804 +	@publishedPartner
   1.805 +	@prototype
   1.806 +*/
   1.807 +struct SSlowExecTable
   1.808 +	{
   1.809 +	TInt iSlowExecCount;
   1.810 +	TLinAddr iInvalidExecHandler;	// used if call number invalid
   1.811 +	TLinAddr iPreprocessHandler;	// used for handle lookups
   1.812 +	SSlowExecEntry iEntries[1];		// first entry is for call number 0
   1.813 +	};
   1.814 +
   1.815 +// Thread iAttributes Constants
   1.816 +const TUint8 KThreadAttImplicitSystemLock=1;	/**< @internalComponent */
   1.817 +const TUint8 KThreadAttAddressSpace=2;			/**< @internalComponent */
   1.818 +const TUint8 KThreadAttLoggable=4;				/**< @internalComponent */
   1.819 +
   1.820 +
   1.821 +// Thread CPU
   1.822 +const TUint32 KCpuAffinityAny=0xffffffffu;		/**< @internalComponent */
   1.823 +
   1.824 +/** Information needed for creating a nanothread.
   1.825 +
   1.826 +	@publishedPartner
   1.827 +	@prototype
   1.828 +*/
   1.829 +struct SNThreadCreateInfo
   1.830 +	{
   1.831 +	NThreadFunction iFunction;
   1.832 +	TAny* iStackBase;
   1.833 +	TInt iStackSize;
   1.834 +	TInt iPriority;
   1.835 +	TInt iTimeslice;
   1.836 +	TUint8 iAttributes;
   1.837 +	TUint32 iCpuAffinity;
   1.838 +	const SNThreadHandlers* iHandlers;
   1.839 +	const SFastExecTable* iFastExecTable;
   1.840 +	const SSlowExecTable* iSlowExecTable;
   1.841 +	const TUint32* iParameterBlock;
   1.842 +	TInt iParameterBlockSize;		// if zero, iParameterBlock _is_ the initial data
   1.843 +									// otherwise it points to n bytes of initial data
   1.844 +	NThreadGroup* iGroup;			// NULL for lone thread
   1.845 +	};
   1.846 +
   1.847 +/** Information needed for creating a nanothread group.
   1.848 +
   1.849 +	@publishedPartner
   1.850 +	@prototype
   1.851 +*/
   1.852 +struct SNThreadGroupCreateInfo
   1.853 +	{
   1.854 +	TUint32 iCpuAffinity;
   1.855 +	};
   1.856 +
   1.857 +/**	Constant for use with NKern:: functions which release a fast mutex as well
   1.858 +	as performing some other operations.
   1.859 +
   1.860 +	@publishedPartner
   1.861 +	@released
   1.862 +*/
   1.863 +#define	SYSTEM_LOCK		(NFastMutex*)0
   1.864 +
   1.865 +
   1.866 +/** Idle handler function
   1.867 +	Pointer to a function which is called whenever a CPU goes idle
   1.868 +
   1.869 +	@param	aPtr	The iPtr stored in the SCpuIdleHandler structure
   1.870 +	@param	aStage	If positive, the number of processors still active
   1.871 +					If zero, indicates all processors are now idle
   1.872 +					-1 indicates that postamble processing is required after waking up
   1.873 +
   1.874 +	@publishedPartner
   1.875 +	@prototype
   1.876 +*/
   1.877 +typedef void (*TCpuIdleHandlerFn)(TAny* aPtr, TInt aStage);
   1.878 +
   1.879 +/** Idle handler structure
   1.880 +
   1.881 +	@publishedPartner
   1.882 +	@prototype
   1.883 +*/
   1.884 +struct SCpuIdleHandler
   1.885 +	{
   1.886 +	TCpuIdleHandlerFn	iHandler;
   1.887 +	TAny*				iPtr;
   1.888 +	volatile TBool		iPostambleRequired;
   1.889 +	};
   1.890 +
   1.891 +
   1.892 +/**
   1.893 +@internalComponent
   1.894 +*/
   1.895 +enum TUserModeCallbackReason
   1.896 +	{
   1.897 +	EUserModeCallbackRun,
   1.898 +	EUserModeCallbackCancel,
   1.899 +	};
   1.900 +
   1.901 +
   1.902 +/**
   1.903 +A callback function executed when a thread returns to user mode.
   1.904 +
   1.905 +@internalComponent
   1.906 +*/
   1.907 +typedef void (*TUserModeCallbackFunc)(TAny* aThisPtr, TUserModeCallbackReason aReasonCode);
   1.908 +
   1.909 +
   1.910 +/**
   1.911 +An object representing a queued callback to be executed when a thread returns to user mode.
   1.912 +
   1.913 +@internalComponent
   1.914 +*/
   1.915 +class TUserModeCallback
   1.916 +	{
   1.917 +public:
   1.918 +	TUserModeCallback(TUserModeCallbackFunc);
   1.919 +	~TUserModeCallback();
   1.920 +
   1.921 +public:
   1.922 +	TUserModeCallback* volatile iNext;
   1.923 +	TUserModeCallbackFunc iFunc;
   1.924 +	};
   1.925 +
   1.926 +TUserModeCallback* const KUserModeCallbackUnqueued = ((TUserModeCallback*)1);
   1.927 +
   1.928 +
   1.929 +/** Main function for AP
   1.930 +
   1.931 +@internalTechnology
   1.932 +*/
   1.933 +struct SAPBootInfo;
   1.934 +typedef void (*TAPBootFunc)(volatile SAPBootInfo*);
   1.935 +
   1.936 +
   1.937 +/** Information needed to boot an AP
   1.938 +
   1.939 +@internalTechnology
   1.940 +*/
   1.941 +struct SAPBootInfo
   1.942 +	{
   1.943 +	TUint32				iCpu;				// Hardware CPU ID
   1.944 +	TUint32				iInitStackSize;		// Size of initial stack
   1.945 +	TLinAddr			iInitStackBase;		// Base of initial stack
   1.946 +	TAPBootFunc			iMain;				// Address of initial function to call
   1.947 +	TAny*				iArgs[4];
   1.948 +	};
   1.949 +
   1.950 +typedef void (*NIsr)(TAny*);
   1.951 +
   1.952 +/** Nanokernel functions
   1.953 +
   1.954 +	@publishedPartner
   1.955 +	@prototype
   1.956 +*/
   1.957 +class NKern
   1.958 +	{
   1.959 +public:
   1.960 +	/** Bitmask values used when blocking a nanothread.
   1.961 +		@see NKern::Block()
   1.962 +	 */
   1.963 +	enum TBlockMode 
   1.964 +		{
   1.965 +		EEnterCS=1,		/**< Enter thread critical section before blocking */
   1.966 +		ERelease=2,		/**< Release specified fast mutex before blocking */
   1.967 +		EClaim=4,		/**< Re-acquire specified fast mutex when unblocked */
   1.968 +		EObstruct=8,	/**< Signifies obstruction of thread rather than lack of work to do */
   1.969 +		};
   1.970 +
   1.971 +	/** Values that specify the context of the processor.
   1.972 +		@see NKern::CurrentContext()
   1.973 +	*/
   1.974 +	enum TContext
   1.975 +		{
   1.976 +		EThread=0,			/**< The processor is in a thread context*/
   1.977 +		EIDFC=1,			/**< The processor is in an IDFC context*/
   1.978 +		EInterrupt=2,		/**< The processor is in an interrupt context*/
   1.979 +		EEscaped=KMaxTInt	/**< Not valid a process context on target hardware*/
   1.980 +		};
   1.981 +
   1.982 +public:
   1.983 +	// Threads
   1.984 +	IMPORT_C static TInt ThreadCreate(NThread* aThread, SNThreadCreateInfo& aInfo);
   1.985 +	IMPORT_C static TBool ThreadSuspend(NThread* aThread, TInt aCount);
   1.986 +	IMPORT_C static TBool ThreadResume(NThread* aThread);
   1.987 +	IMPORT_C static TBool ThreadResume(NThread* aThread, NFastMutex* aMutex);
   1.988 +	IMPORT_C static TBool ThreadForceResume(NThread* aThread);
   1.989 +	IMPORT_C static TBool ThreadForceResume(NThread* aThread, NFastMutex* aMutex);
   1.990 +	IMPORT_C static void ThreadRelease(NThread* aThread, TInt aReturnValue);
   1.991 +	IMPORT_C static void ThreadRelease(NThread* aThread, TInt aReturnValue, NFastMutex* aMutex);
   1.992 +	IMPORT_C static void ThreadSetPriority(NThread* aThread, TInt aPriority);
   1.993 +	IMPORT_C static void ThreadSetPriority(NThread* aThread, TInt aPriority, NFastMutex* aMutex);
   1.994 +	IMPORT_C static void ThreadRequestSignal(NThread* aThread);
   1.995 +	IMPORT_C static void ThreadRequestSignal(NThread* aThread, NFastMutex* aMutex);
   1.996 +	IMPORT_C static void ThreadRequestSignal(NThread* aThread, TInt aCount);
   1.997 +	IMPORT_C static void ThreadKill(NThread* aThread);
   1.998 +	IMPORT_C static void ThreadKill(NThread* aThread, NFastMutex* aMutex);
   1.999 +	IMPORT_C static void ThreadEnterCS();
  1.1000 +	IMPORT_C static void ThreadLeaveCS();
  1.1001 +	static NThread* _ThreadEnterCS();		/**< @internalComponent */
  1.1002 +	static void _ThreadLeaveCS();			/**< @internalComponent */
  1.1003 +	IMPORT_C static TInt Block(TUint32 aTimeout, TUint aMode, NFastMutex* aMutex);
  1.1004 +	IMPORT_C static TInt Block(TUint32 aTimeout, TUint aMode);
  1.1005 +	IMPORT_C static void NanoBlock(TUint32 aTimeout, TUint aState, TAny* aWaitObj);
  1.1006 +	IMPORT_C static void ThreadGetUserContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask);
  1.1007 +	IMPORT_C static void ThreadSetUserContext(NThread* aThread, TAny* aContext);
  1.1008 +	IMPORT_C static void ThreadGetSystemContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask);
  1.1009 +	static void ThreadModifyUsp(NThread* aThread, TLinAddr aUsp);
  1.1010 +	IMPORT_C static TInt FreezeCpu();													/**< @internalComponent */
  1.1011 +	IMPORT_C static void EndFreezeCpu(TInt aCookie);									/**< @internalComponent */
  1.1012 +	IMPORT_C static TUint32 ThreadSetCpuAffinity(NThread* aThread, TUint32 aAffinity);	/**< @internalComponent */
  1.1013 +	IMPORT_C static void ThreadSetTimeslice(NThread* aThread, TInt aTimeslice);			/**< @internalComponent */
  1.1014 +	IMPORT_C static TUint64 ThreadCpuTime(NThread* aThread);							/**< @internalComponent */
  1.1015 +	IMPORT_C static TUint32 CpuTimeMeasFreq();											/**< @internalComponent */
  1.1016 +	static TInt QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback);	/**< @internalComponent */
  1.1017 +	static void MoveUserModeCallbacks(NThreadBase* aSrcThread, NThreadBase* aDestThread);	/**< @internalComponent */
  1.1018 +	static void CancelUserModeCallbacks();												/**< @internalComponent */
  1.1019 +
  1.1020 +	// Thread Groups
  1.1021 +	IMPORT_C static TInt GroupCreate(NThreadGroup* aGroup, SNThreadGroupCreateInfo& aInfo);
  1.1022 +	IMPORT_C static void GroupDestroy(NThreadGroup* aGroup);
  1.1023 +	IMPORT_C static NThreadGroup* CurrentGroup();
  1.1024 +	IMPORT_C static NThreadGroup* LeaveGroup();
  1.1025 +	IMPORT_C static void JoinGroup(NThreadGroup* aGroup);
  1.1026 +	IMPORT_C static TUint32 GroupSetCpuAffinity(NThreadGroup* aGroup, TUint32 aAffinity);
  1.1027 +
  1.1028 +	// Fast semaphores
  1.1029 +	IMPORT_C static void FSSetOwner(NFastSemaphore* aSem,NThreadBase* aThread);
  1.1030 +	IMPORT_C static void FSWait(NFastSemaphore* aSem);
  1.1031 +	IMPORT_C static void FSSignal(NFastSemaphore* aSem);
  1.1032 +	IMPORT_C static void FSSignal(NFastSemaphore* aSem, NFastMutex* aMutex);
  1.1033 +	IMPORT_C static void FSSignalN(NFastSemaphore* aSem, TInt aCount);
  1.1034 +	IMPORT_C static void FSSignalN(NFastSemaphore* aSem, TInt aCount, NFastMutex* aMutex);
  1.1035 +
  1.1036 +	// Fast mutexes
  1.1037 +	IMPORT_C static void FMWait(NFastMutex* aMutex);
  1.1038 +	IMPORT_C static void FMSignal(NFastMutex* aMutex);
  1.1039 +	IMPORT_C static TBool FMFlash(NFastMutex* aMutex);
  1.1040 +
  1.1041 +	// Scheduler
  1.1042 +	IMPORT_C static void Lock();
  1.1043 +	IMPORT_C static NThread* LockC();
  1.1044 +	IMPORT_C static void Unlock();
  1.1045 +	IMPORT_C static TInt PreemptionPoint();
  1.1046 +
  1.1047 +	// Interrupts
  1.1048 +	IMPORT_C static TInt DisableAllInterrupts();
  1.1049 +	IMPORT_C static TInt DisableInterrupts(TInt aLevel);
  1.1050 +	IMPORT_C static void RestoreInterrupts(TInt aRestoreData);
  1.1051 +	IMPORT_C static void EnableAllInterrupts();
  1.1052 +
  1.1053 +	// Read-modify-write
  1.1054 +	inline static TInt LockedInc(TInt& aCount)
  1.1055 +		{ return __e32_atomic_add_ord32(&aCount,1); }
  1.1056 +	inline static TInt LockedDec(TInt& aCount)
  1.1057 +		{ return __e32_atomic_add_ord32(&aCount,0xffffffff); }
  1.1058 +	inline static TInt LockedAdd(TInt& aDest, TInt aSrc)
  1.1059 +		{ return __e32_atomic_add_ord32(&aDest,aSrc); }
  1.1060 +	inline static TInt64 LockedInc(TInt64& aCount)
  1.1061 +		{ return __e32_atomic_add_ord64(&aCount,1); }
  1.1062 +	inline static TInt64 LockedDec(TInt64& aCount)
  1.1063 +		{ return __e32_atomic_add_ord64(&aCount,TUint64(TInt64(-1))); }
  1.1064 +	inline static TInt64 LockedAdd(TInt64& aDest, TInt64 aSrc)		/**< @internalComponent */
  1.1065 +		{ return __e32_atomic_add_ord64(&aDest,aSrc); }
  1.1066 +	inline static TUint32 LockedSetClear(TUint32& aDest, TUint32 aClearMask, TUint32 aSetMask)
  1.1067 +		{ return __e32_atomic_axo_ord32(&aDest,~(aClearMask|aSetMask),aSetMask); }
  1.1068 +	inline static TUint16 LockedSetClear16(TUint16& aDest, TUint16 aClearMask, TUint16 aSetMask)	/**< @internalComponent */
  1.1069 +		{ return __e32_atomic_axo_ord16(&aDest,TUint16(~(aClearMask|aSetMask)),aSetMask); }
  1.1070 +	inline static TUint8 LockedSetClear8(TUint8& aDest, TUint8 aClearMask, TUint8 aSetMask)
  1.1071 +		{ return __e32_atomic_axo_ord8(&aDest,TUint8(~(aClearMask|aSetMask)),aSetMask); }
  1.1072 +	inline static TInt SafeInc(TInt& aCount)
  1.1073 +		{ return __e32_atomic_tas_ord32(&aCount,1,1,0); }
  1.1074 +	inline static TInt SafeDec(TInt& aCount)
  1.1075 +		{ return __e32_atomic_tas_ord32(&aCount,1,-1,0); }
  1.1076 +	inline static TInt AddIfGe(TInt& aCount, TInt aLimit, TInt aInc)	/**< @internalComponent */
  1.1077 +		{ return __e32_atomic_tas_ord32(&aCount,aLimit,aInc,0); }
  1.1078 +	inline static TInt AddIfLt(TInt& aCount, TInt aLimit, TInt aInc)	/**< @internalComponent */
  1.1079 +		{ return __e32_atomic_tas_ord32(&aCount,aLimit,0,aInc); }
  1.1080 +	inline static TAny* SafeSwap(TAny* aNewValue, TAny*& aPtr)
  1.1081 +		{ return __e32_atomic_swp_ord_ptr(&aPtr, aNewValue); }
  1.1082 +	inline static TUint8 SafeSwap8(TUint8 aNewValue, TUint8& aPtr)
  1.1083 +		{ return __e32_atomic_swp_ord8(&aPtr, aNewValue); }
  1.1084 +	inline static TUint16 SafeSwap16(TUint16 aNewValue, TUint16& aPtr)						/**< @internalComponent */
  1.1085 +		{ return __e32_atomic_swp_ord16(&aPtr, aNewValue); }
  1.1086 +	inline static TBool CompareAndSwap(TAny*& aPtr, TAny* aExpected, TAny* aNew)			/**< @internalComponent */
  1.1087 +		{ return __e32_atomic_cas_ord_ptr(&aPtr, &aExpected, aNew); }
  1.1088 +	inline static TBool CompareAndSwap8(TUint8& aPtr, TUint8 aExpected, TUint8 aNew)		/**< @internalComponent */
  1.1089 +		{ return __e32_atomic_cas_ord8(&aPtr, (TUint8*)&aExpected, (TUint8)aNew); }
  1.1090 +	inline static TBool CompareAndSwap16(TUint16& aPtr, TUint16 aExpected, TUint16 aNew)	/**< @internalComponent */
  1.1091 +		{ return __e32_atomic_cas_ord16(&aPtr, (TUint16*)&aExpected, (TUint16)aNew); }
  1.1092 +	inline static TUint32 SafeSwap(TUint32 aNewValue, TUint32& aPtr)						/**< @internalComponent */
  1.1093 +		{ return __e32_atomic_swp_ord32(&aPtr, aNewValue); }
  1.1094 +	inline static TUint SafeSwap(TUint aNewValue, TUint& aPtr)								/**< @internalComponent */
  1.1095 +		{ return __e32_atomic_swp_ord32(&aPtr, aNewValue); }
  1.1096 +	inline static TInt SafeSwap(TInt aNewValue, TInt& aPtr)									/**< @internalComponent */
  1.1097 +		{ return __e32_atomic_swp_ord32(&aPtr, aNewValue); }
  1.1098 +	inline static TBool CompareAndSwap(TUint32& aPtr, TUint32 aExpected, TUint32 aNew)		/**< @internalComponent */
  1.1099 +		{ return __e32_atomic_cas_ord32(&aPtr, &aExpected, aNew); }
  1.1100 +	inline static TBool CompareAndSwap(TUint& aPtr, TUint aExpected, TUint aNew)			/**< @internalComponent */
  1.1101 +		{ return __e32_atomic_cas_ord32(&aPtr, (TUint32*)&aExpected, (TUint32)aNew); }
  1.1102 +	inline static TBool CompareAndSwap(TInt& aPtr, TInt aExpected, TInt aNew)				/**< @internalComponent */
  1.1103 +		{ return __e32_atomic_cas_ord32(&aPtr, (TUint32*)&aExpected, (TUint32)aNew); }
  1.1104 +
  1.1105 +
  1.1106 +	// Miscellaneous
  1.1107 +	IMPORT_C static NThread* CurrentThread();
  1.1108 +	IMPORT_C static TInt CurrentCpu();										/**< @internalComponent */
  1.1109 +	IMPORT_C static TInt NumberOfCpus();									/**< @internalComponent */
  1.1110 +	IMPORT_C static void LockSystem();
  1.1111 +	IMPORT_C static void UnlockSystem();
  1.1112 +	IMPORT_C static TBool FlashSystem();
  1.1113 +	IMPORT_C static void WaitForAnyRequest();
  1.1114 +	IMPORT_C static void Sleep(TUint32 aTime);
  1.1115 +	IMPORT_C static void Exit();
  1.1116 +	IMPORT_C static void DeferredExit();
  1.1117 +	IMPORT_C static void YieldTimeslice();									/**< @internalComponent */
  1.1118 +	IMPORT_C static void RotateReadyList(TInt aPriority);					
  1.1119 +	IMPORT_C static void RotateReadyList(TInt aPriority, TInt aCpu);		/**< @internalTechnology */
  1.1120 +	IMPORT_C static void RecordIntLatency(TInt aLatency, TInt aIntMask);	/**< @internalTechnology */
  1.1121 +	IMPORT_C static void RecordThreadLatency(TInt aLatency);				/**< @internalTechnology */
  1.1122 +	IMPORT_C static TUint32 TickCount();
  1.1123 +	IMPORT_C static TInt TickPeriod();
  1.1124 +	IMPORT_C static TInt TimerTicks(TInt aMilliseconds);
  1.1125 +	IMPORT_C static TInt TimesliceTicks(TUint32 aMicroseconds);				/**< @internalTechnology */
  1.1126 +	IMPORT_C static TInt CurrentContext();
  1.1127 +	IMPORT_C static TUint32 FastCounter();
  1.1128 +	IMPORT_C static TInt FastCounterFrequency();
  1.1129 +	IMPORT_C static TUint64 Timestamp();
  1.1130 +	IMPORT_C static TUint32 TimestampFrequency();
  1.1131 +	static void Init0(TAny* aVariantData);
  1.1132 +	static void Init(NThread* aThread, SNThreadCreateInfo& aInfo);
  1.1133 +	static TInt BootAP(volatile SAPBootInfo* aInfo);
  1.1134 +	IMPORT_C static TBool KernelLocked(TInt aCount=0);						/**< @internalTechnology */
  1.1135 +	IMPORT_C static NFastMutex* HeldFastMutex();							/**< @internalTechnology */
  1.1136 +	static void Idle();	
  1.1137 +	IMPORT_C static SCpuIdleHandler* CpuIdleHandler();						/**< @internalTechnology */
  1.1138 +	static void NotifyCrash(const TAny* a0, TInt a1);						/**< @internalTechnology */
  1.1139 +	IMPORT_C static TBool Crashed();
  1.1140 +	static TUint32 IdleGenerationCount();
  1.1141 +
  1.1142 +	// Debugger support
  1.1143 +	typedef void (*TRescheduleCallback)(NThread*);
  1.1144 +	IMPORT_C static void SchedulerHooks(TLinAddr& aStart, TLinAddr& aEnd);
  1.1145 +	IMPORT_C static void InsertSchedulerHooks();
  1.1146 +	IMPORT_C static void RemoveSchedulerHooks();
  1.1147 +	IMPORT_C static void SetRescheduleCallback(TRescheduleCallback aCallback);
  1.1148 +
  1.1149 +	// Interrupts
  1.1150 +	enum TIrqInitFlags
  1.1151 +		{
  1.1152 +		EIrqInit_FallingEdge=0,
  1.1153 +		EIrqInit_RisingEdge=2,
  1.1154 +		EIrqInit_LevelLow=1,
  1.1155 +		EIrqInit_LevelHigh=3,
  1.1156 +		EIrqInit_Shared=0x10,
  1.1157 +		EIrqInit_Count=0x20,
  1.1158 +		};
  1.1159 +
  1.1160 +	enum TIrqBindFlags
  1.1161 +		{
  1.1162 +		EIrqBind_Raw=1,
  1.1163 +		EIrqBind_Count=2,
  1.1164 +		EIrqBind_Exclusive=4,
  1.1165 +		EIrqBind_Tied=8
  1.1166 +		};
  1.1167 +
  1.1168 +	enum TIrqIdBits
  1.1169 +		{
  1.1170 +		EIrqIndexMask = 0x0000ffff,	// bottom 16 bits is IRQ number if top 16 bits all zero
  1.1171 +									// otherwise is IRQ handler index
  1.1172 +		EIrqCookieMask = 0x7fff0000,
  1.1173 +		EIrqCookieShift = 16
  1.1174 +		};
  1.1175 +
  1.1176 +	static void InterruptInit0();
  1.1177 +	IMPORT_C static TInt InterruptInit(TInt aId, TUint32 aFlags, TInt aVector, TUint32 aHwId, TAny* aExt=0);
  1.1178 +	IMPORT_C static TInt InterruptBind(TInt aId, NIsr aIsr, TAny* aPtr, TUint32 aFlags, NSchedulable* aTied);
  1.1179 +	IMPORT_C static TInt InterruptUnbind(TInt aId);
  1.1180 +	IMPORT_C static TInt InterruptEnable(TInt aId);
  1.1181 +	IMPORT_C static TInt InterruptDisable(TInt aId);
  1.1182 +	IMPORT_C static TInt InterruptClear(TInt aId);
  1.1183 +	IMPORT_C static TInt InterruptSetPriority(TInt aId, TInt aPri);
  1.1184 +	IMPORT_C static TInt InterruptSetCpuMask(TInt aId, TUint32 aMask);
  1.1185 +	IMPORT_C static void Interrupt(TInt aIrqNo);
  1.1186 +	};
  1.1187 +
  1.1188 +
  1.1189 +/** Create a fast semaphore
  1.1190 +
  1.1191 +	@publishedPartner
  1.1192 +	@prototype
  1.1193 +*/
  1.1194 +inline NFastSemaphore::NFastSemaphore(NThreadBase* aThread)
  1.1195 +	:	iCount(0),
  1.1196 +		iOwningThread(aThread ? aThread : (NThreadBase*)NKern::CurrentThread())
  1.1197 +	{
  1.1198 +	}
  1.1199 +
  1.1200 +
  1.1201 +class TGenericIPI;
  1.1202 +
  1.1203 +/**
  1.1204 +@internalComponent
  1.1205 +*/
  1.1206 +typedef void (*TGenericIPIFn)(TGenericIPI*);
  1.1207 +
  1.1208 +/**
  1.1209 +@internalComponent
  1.1210 +*/
  1.1211 +class TGenericIPI : public SDblQueLink
  1.1212 +	{
  1.1213 +public:
  1.1214 +	void Queue(TGenericIPIFn aFunc, TUint32 aCpuMask);
  1.1215 +	void QueueAll(TGenericIPIFn aFunc);
  1.1216 +	void QueueAllOther(TGenericIPIFn aFunc);
  1.1217 +	void WaitEntry();
  1.1218 +	void WaitCompletion();
  1.1219 +public:
  1.1220 +	TGenericIPIFn			iFunc;
  1.1221 +	volatile TUint32		iCpusIn;
  1.1222 +	volatile TUint32		iCpusOut;
  1.1223 +	};
  1.1224 +
  1.1225 +/**
  1.1226 +@internalComponent
  1.1227 +*/
  1.1228 +class TStopIPI : public TGenericIPI
  1.1229 +	{
  1.1230 +public:
  1.1231 +	void StopCPUs();
  1.1232 +	void ReleaseCPUs();
  1.1233 +	static void Isr(TGenericIPI*);
  1.1234 +public:
  1.1235 +	volatile TInt iFlag;
  1.1236 +	};
  1.1237 +
  1.1238 +#include <ncern.h>
  1.1239 +#endif