os/kernelhwsrv/kernel/eka/include/nkernsmp/nk_priv.h
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/include/nkernsmp/nk_priv.h	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,882 @@
     1.4 +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +// e32\include\nkernsmp\nk_priv.h
    1.18 +// 
    1.19 +// WARNING: This file contains some APIs which are internal and are subject
    1.20 +//          to change without notice. Such APIs should therefore not be used
    1.21 +//          outside the Kernel and Hardware Services package.
    1.22 +//
    1.23 +
    1.24 +#ifndef __NK_PRIV_H__
    1.25 +#define __NK_PRIV_H__
    1.26 +#include <cpudefs.h>
    1.27 +#include <nkern.h>
    1.28 +
    1.29 +#define __USE_BTRACE_LOCK__
    1.30 +
    1.31 +class Monitor;
    1.32 +
    1.33 +/********************************************
    1.34 + * Schedulable = thread or thread group
    1.35 + ********************************************/
    1.36 +
    1.37 +/**
    1.38 +@publishedPartner
    1.39 +@prototype
    1.40 +
    1.41 +Base class for a nanokernel thread or group
    1.42 +*/
    1.43 +class NThreadGroup;
    1.44 +class NSchedulable : public TPriListLink
    1.45 +	{
    1.46 +public:
    1.47 +	enum
    1.48 +		{
    1.49 +		EReadyGroup=1,
    1.50 +		EReadyCpuMask=0x7f,
    1.51 +		EReadyOffset=0x80,
    1.52 +		};
    1.53 +
    1.54 +	enum NReadyFlags
    1.55 +		{
    1.56 +		ENewTimeslice=1,
    1.57 +		EPreferSameCpu=2,
    1.58 +		EUnPause=4,
    1.59 +		};
    1.60 +
    1.61 +	enum NEventState
    1.62 +		{
    1.63 +		EEventCountShift=16u,
    1.64 +		EEventCountMask=0xffff0000u,
    1.65 +		EEventCountInc=0x10000u,
    1.66 +		EEventCpuShift=0u,
    1.67 +		EEventCpuMask=0x1fu,
    1.68 +		EThreadCpuShift=8u,
    1.69 +		EThreadCpuMask=0x1f00u,
    1.70 +		EDeferredReady=0x4000u,
    1.71 +		EEventParent=0x8000u,
    1.72 +		};
    1.73 +public:
    1.74 +	NSchedulable();
    1.75 +	void AcqSLock();
    1.76 +	void RelSLock();
    1.77 +	void LAcqSLock();
    1.78 +	void RelSLockU();
    1.79 +	void ReadyT(TUint aMode);					// make ready, assumes lock held
    1.80 +	TInt BeginTiedEvent();
    1.81 +	void EndTiedEvent();
    1.82 +	TInt AddTiedEvent(NEventHandler* aEvent);
    1.83 +	TBool TiedEventReadyInterlock(TInt aCpu);
    1.84 +	void UnPauseT();							// decrement pause count and make ready if necessary
    1.85 +	static void DeferredReadyIDfcFn(TAny*);
    1.86 +	void DetachTiedEvents();
    1.87 +public:
    1.88 +	inline TBool IsGroup()			{return !iParent;}
    1.89 +	inline TBool IsLoneThread()		{return iParent==this;}
    1.90 +	inline TBool IsGroupThread()	{return iParent && iParent!=this;}
    1.91 +public:
    1.92 +//	TUint8				iReady;					/**< @internalComponent */	// flag indicating thread on ready list = cpu number | EReadyOffset
    1.93 +//	TUint8				iCurrent;				/**< @internalComponent */	// flag indicating thread is running
    1.94 +//	TUint8				iLastCpu;				/**< @internalComponent */	// CPU on which this thread last ran
    1.95 +	TUint8				iPauseCount;			/**< @internalComponent */	// count of externally requested pauses extending a voluntary wait
    1.96 +	TUint8				iSuspended;				/**< @internalComponent */	// flag indicating active external suspend (Not used for groups)
    1.97 +	TUint8				iNSchedulableSpare1;	/**< @internalComponent */
    1.98 +	TUint8				iNSchedulableSpare2;	/**< @internalComponent */
    1.99 +
   1.100 +	TUint8				iCpuChange;				/**< @internalComponent */	// flag showing CPU migration outstanding
   1.101 +	TUint8				iStopping;				/**< @internalComponent */	// thread is exiting, thread group is being destroyed
   1.102 +	TUint16				iFreezeCpu;				/**< @internalComponent */	// flag set if CPU frozen - count for groups
   1.103 +	NSchedulable*		iParent;				/**< @internalComponent */	// Pointer to group containing thread, =this for normal thread, =0 for group
   1.104 +
   1.105 +	TUint32				iCpuAffinity;			/**< @internalComponent */
   1.106 +	volatile TUint32	iEventState;			/**< @internalComponent */	// bits 16-31=count, 0-4=event CPU, 5-9=thread CPU, 10=defer, 11=parent
   1.107 +
   1.108 +	TSpinLock			iSSpinLock;				/**< @internalComponent */
   1.109 +
   1.110 +	SDblQue				iEvents;				/**< @internalComponent */	// doubly-linked list of tied events
   1.111 +
   1.112 +	TUint32				i_IDfcMem[sizeof(TDfc)/sizeof(TUint32)];	/**< @internalComponent */	// IDFC used to make thread ready after last tied event completes
   1.113 +//	TDfc				iDeferredReadyIDfc;		/**< @internalComponent */	// IDFC used to make thread ready after last tied event completes
   1.114 +
   1.115 +	union
   1.116 +		{
   1.117 +		TUint64			iRunCount64;
   1.118 +		TUint32			iRunCount32[2];
   1.119 +		};
   1.120 +	union
   1.121 +		{
   1.122 +		TUint64			iTotalCpuTime64;		/**< @internalComponent */	// total time spent running, in hi-res timer ticks
   1.123 +		TUint32			iTotalCpuTime32[2];		/**< @internalComponent */	// total time spent running, in hi-res timer ticks
   1.124 +		};
   1.125 +	};
   1.126 +
   1.127 +__ASSERT_COMPILE(!(_FOFF(NSchedulable,iSSpinLock)&7));
   1.128 +__ASSERT_COMPILE(!(_FOFF(NSchedulable,iRunCount64)&7));
   1.129 +__ASSERT_COMPILE(!(_FOFF(NSchedulable,iTotalCpuTime64)&7));
   1.130 +__ASSERT_COMPILE(!(sizeof(NSchedulable)&7));
   1.131 +
   1.132 +
   1.133 +/**
   1.134 +@internalComponent
   1.135 +*/
   1.136 +inline TBool TDfc::IsValid()
   1.137 +	{
   1.138 +	if (iHType < KNumDfcPriorities)
   1.139 +		return TRUE;
   1.140 +	if (iHType != EEventHandlerIDFC)
   1.141 +		return FALSE;
   1.142 +	return !iTied || !iTied->iStopping;
   1.143 +	}
   1.144 +
   1.145 +/********************************************
   1.146 + * Thread
   1.147 + ********************************************/
   1.148 +
   1.149 +/**
   1.150 +@internalComponent
   1.151 +*/
   1.152 +class NThreadWaitState
   1.153 +	{
   1.154 +private:
   1.155 +	enum TWtStFlags
   1.156 +		{
   1.157 +		EWtStWaitPending		=0x01u,		// thread is about to wait
   1.158 +		EWtStWaitActive			=0x02u,		// thread is actually blocked
   1.159 +		EWtStTimeout			=0x04u,		// timeout is active on this wait
   1.160 +		EWtStObstructed			=0x08u,		// wait is due to obstruction (e.g. mutex) rather than lack of work to do
   1.161 +		EWtStDead				=0x80u,		// thread is dead
   1.162 +		};
   1.163 +private:
   1.164 +	NThreadWaitState();
   1.165 +	void SetUpWait(TUint aType, TUint aFlags, TAny* aWaitObj);
   1.166 +	void SetUpWait(TUint aType, TUint aFlags, TAny* aWaitObj, TUint32 aTimeout);
   1.167 +	void SetDead(TDfc* aKillDfc);
   1.168 +	void CancelWait();
   1.169 +	TInt DoWait();
   1.170 +	static void TimerExpired(TAny*);
   1.171 +	TInt UnBlockT(TUint aType, TAny* aWaitObj, TInt aReturnValue);
   1.172 +	TUint32 ReleaseT(TAny*& aWaitObj, TInt aReturnValue);
   1.173 +	void CancelTimerT();
   1.174 +private:
   1.175 +	inline NThreadBase* Thread();
   1.176 +	inline TBool WaitPending()
   1.177 +		{ return iWtC.iWtStFlags & (EWtStWaitPending|EWtStDead); }
   1.178 +	inline TBool ThreadIsBlocked()
   1.179 +		{ return iWtC.iWtStFlags & (EWtStWaitActive|EWtStDead); }
   1.180 +	inline TBool ThreadIsDead()
   1.181 +		{ return iWtC.iWtStFlags & EWtStDead; }
   1.182 +private:
   1.183 +	struct S
   1.184 +		{
   1.185 +		volatile TUint8			iWtStFlags;
   1.186 +		volatile TUint8			iWtObjType;
   1.187 +		volatile TUint8			iWtStSpare1;
   1.188 +		volatile TUint8			iWtStSpare2;
   1.189 +		union
   1.190 +			{
   1.191 +			TAny* volatile		iWtObj;
   1.192 +			volatile TInt		iRetVal;
   1.193 +			TDfc* volatile		iKillDfc;
   1.194 +			};
   1.195 +		};
   1.196 +	union
   1.197 +		{
   1.198 +		S						iWtC;
   1.199 +		volatile TUint32		iWtSt32[2];
   1.200 +		volatile TUint64		iWtSt64;
   1.201 +		};
   1.202 +	NTimer						iTimer;
   1.203 +private:
   1.204 +	friend class NSchedulable;
   1.205 +	friend class NThreadBase;
   1.206 +	friend class NThread;
   1.207 +	friend class TScheduler;
   1.208 +	friend class TSubScheduler;
   1.209 +	friend class TDfc;
   1.210 +	friend class TDfcQue;
   1.211 +	friend class NFastSemaphore;
   1.212 +	friend class NFastMutex;
   1.213 +	friend class NTimer;
   1.214 +	friend class NTimerQ;
   1.215 +	friend class NKern;
   1.216 +	friend class Monitor;
   1.217 +	friend class NKTest;
   1.218 +	};
   1.219 +
   1.220 +/**
   1.221 +@publishedPartner
   1.222 +@prototype
   1.223 +
   1.224 +Base class for a nanokernel thread.
   1.225 +*/
   1.226 +class TSubScheduler;
   1.227 +class NThreadBase : public NSchedulable
   1.228 +	{
   1.229 +public:
   1.230 +    /**
   1.231 +    Defines the possible types of wait object
   1.232 +    */
   1.233 +	enum NThreadWaitType
   1.234 +		{
   1.235 +		EWaitNone,
   1.236 +		EWaitFastSemaphore,
   1.237 +		EWaitFastMutex,
   1.238 +		EWaitSleep,
   1.239 +		EWaitBlocked,
   1.240 +		EWaitDfc,
   1.241 +		
   1.242 +		ENumWaitTypes
   1.243 +		};
   1.244 +
   1.245 +		
   1.246 +	/**
   1.247 +	@internalComponent
   1.248 +	*/
   1.249 +	enum NThreadCSFunction
   1.250 +		{
   1.251 +		ECSExitPending=-1,
   1.252 +		ECSExitInProgress=-2,
   1.253 +		ECSDivertPending=-3,
   1.254 +		};
   1.255 +
   1.256 +	/**
   1.257 +	@internalComponent
   1.258 +	*/
   1.259 +	enum NThreadTimeoutOp
   1.260 +		{
   1.261 +		ETimeoutPreamble=0,
   1.262 +		ETimeoutPostamble=1,
   1.263 +		ETimeoutSpurious=2,
   1.264 +		};
   1.265 +public:
   1.266 +	NThreadBase();
   1.267 +	TInt Create(SNThreadCreateInfo& anInfo,	TBool aInitial);
   1.268 +	void UnReadyT();
   1.269 +	TBool SuspendOrKill(TInt aCount);
   1.270 +	TBool DoSuspendOrKillT(TInt aCount, TSubScheduler* aS);
   1.271 +	TBool CancelTimerT();
   1.272 +	void DoReleaseT(TInt aReturnCode, TUint aMode);
   1.273 +	TBool CheckFastMutexDefer();
   1.274 +	void DoCsFunctionT();
   1.275 +	TBool Resume(TBool aForce);
   1.276 +	IMPORT_C TBool Suspend(TInt aCount);		/**< @internalComponent */
   1.277 +	IMPORT_C TBool Resume();					/**< @internalComponent */
   1.278 +	IMPORT_C TBool ForceResume();				/**< @internalComponent */
   1.279 +	IMPORT_C void Release(TInt aReturnCode, TUint aMode);	/**< @internalComponent */
   1.280 +	IMPORT_C void RequestSignal();				/**< @internalComponent */
   1.281 +	IMPORT_C void SetPriority(TInt aPriority);	/**< @internalComponent */
   1.282 +	void SetMutexPriority(NFastMutex* aMutex);
   1.283 +	void LoseInheritedPriorityT();
   1.284 +	void ChangeReadyThreadPriority();
   1.285 +	TUint32 SetCpuAffinity(TUint32 aAffinity);
   1.286 +	TBool TiedEventLeaveInterlock();
   1.287 +	TBool TiedEventJoinInterlock();
   1.288 +	IMPORT_C void Kill();						/**< @internalComponent */
   1.289 +	void Exit();
   1.290 +	// hooks for platform-specific code
   1.291 +	void OnKill(); 
   1.292 +	void OnExit();
   1.293 +public:
   1.294 +	static void TimerExpired(TAny* aPtr);
   1.295 +
   1.296 +	/** @internalComponent */
   1.297 +	inline void UnknownState(TInt aOp, TInt aParam)
   1.298 +		{ (*iHandlers->iStateHandler)((NThread*)this,aOp,aParam); }
   1.299 +
   1.300 +	/** @internalComponent */
   1.301 +	inline TUint8 Attributes()
   1.302 +		{ return i_ThrdAttr; }
   1.303 +
   1.304 +	/** @internalComponent */
   1.305 +	inline TUint8 SetAttributes(TUint8 aNewAtt)
   1.306 +		{ return __e32_atomic_swp_ord8(&i_ThrdAttr, aNewAtt); }
   1.307 +
   1.308 +	/** @internalComponent */
   1.309 +	inline TUint8 ModifyAttributes(TUint8 aClearMask, TUint8 aSetMask)
   1.310 +		{ return __e32_atomic_axo_ord8(&i_ThrdAttr, (TUint8)~(aClearMask|aSetMask), aSetMask); }
   1.311 +
   1.312 +	/** @internalComponent */
   1.313 +	inline void SetAddressSpace(TAny* a)
   1.314 +		{ iAddressSpace=a; }
   1.315 +
   1.316 +	/** @internalComponent */
   1.317 +	inline void SetExtraContext(TAny* a, TInt aSize)
   1.318 +		{ iExtraContext = a; iExtraContextSize = aSize; }
   1.319 +
   1.320 +	/** @internalTechnology */
   1.321 +	inline TBool IsDead()
   1.322 +		{ return iWaitState.ThreadIsDead(); }
   1.323 +public:
   1.324 +	TPriListLink		iWaitLink;				/**< @internalComponent */	// used to link thread into a wait queue
   1.325 +//	TUint8				iBasePri;				/**< @internalComponent */	// priority with no fast mutex held
   1.326 +//	TUint8				iMutexPri;				/**< @internalComponent */	// priority from held fast mutex
   1.327 +//	TUint8				iInitial;				/**< @internalComponent */	// TRUE if this is an initial thread
   1.328 +	TUint8				iLinkedObjType;
   1.329 +	TUint8				i_ThrdAttr;				/**< @internalComponent */
   1.330 +	TUint8				iNThreadBaseSpare10;
   1.331 +	TUint8				iFastMutexDefer;		/**< @internalComponent */
   1.332 +
   1.333 +	NFastSemaphore		iRequestSemaphore;		/**< @internalComponent */
   1.334 +
   1.335 +	TInt				iTime;					/**< @internalComponent */	// time remaining, 0 if expired
   1.336 +	TInt				iTimeslice;				/**< @internalComponent */	// timeslice for this thread, -ve = no timeslicing
   1.337 +
   1.338 +	TLinAddr			iSavedSP;				/**< @internalComponent */
   1.339 +	TAny*				iAddressSpace;			/**< @internalComponent */
   1.340 +
   1.341 +	NFastMutex* volatile iHeldFastMutex;		/**< @internalComponent */	// fast mutex held by this thread
   1.342 +	TUserModeCallback* volatile iUserModeCallbacks;	/**< @internalComponent */	// Head of singly-linked list of callbacks
   1.343 +	TAny* volatile		iLinkedObj;				/**< @internalComponent */	// object to which this thread is linked
   1.344 +	NThreadGroup*		iNewParent;				/**< @internalComponent */	// group to join
   1.345 +
   1.346 +	const SFastExecTable* iFastExecTable;		/**< @internalComponent */
   1.347 +	const SSlowExecEntry* iSlowExecTable;		/**< @internalComponent */	// points to first entry iEntries[0]
   1.348 +
   1.349 +	volatile TInt		iCsCount;				/**< @internalComponent */	// critical section count
   1.350 +	volatile TInt		iCsFunction;			/**< @internalComponent */	// what to do on leaving CS: +n=suspend n times, 0=nothing, -1=exit
   1.351 +
   1.352 +	NThreadWaitState	iWaitState;				/**< @internalComponent */
   1.353 +
   1.354 +	const SNThreadHandlers* iHandlers;			/**< @internalComponent */	// additional thread event handlers
   1.355 +	TInt				iSuspendCount;			/**< @internalComponent */	// -how many times we have been suspended
   1.356 +
   1.357 +	TLinAddr			iStackBase;				/**< @internalComponent */
   1.358 +	TInt				iStackSize;				/**< @internalComponent */
   1.359 +
   1.360 +	TAny*				iExtraContext;			/**< @internalComponent */	// parent FPSCR value (iExtraContextSize == -1), coprocessor context (iExtraContextSize > 0) or NULL
   1.361 +	TInt				iExtraContextSize;		/**< @internalComponent */	// +ve=dynamically allocated, 0=none, -1=iExtraContext stores parent FPSCR value
   1.362 +
   1.363 +	TUint32				iNThreadBaseSpare6;		/**< @internalComponent */	// spare to allow growth while preserving BC
   1.364 +	TUint32				iNThreadBaseSpare7;		/**< @internalComponent */	// spare to allow growth while preserving BC
   1.365 +	TUint32				iNThreadBaseSpare8;		/**< @internalComponent */	// spare to allow growth while preserving BC
   1.366 +	TUint32				iNThreadBaseSpare9;		/**< @internalComponent */	// spare to allow growth while preserving BC
   1.367 +
   1.368 +	// For EMI support - HOPEFULLY THIS CAN DIE
   1.369 +	TUint32	iTag;							/**< @internalComponent */	// User defined set of bits which is ANDed with a mask when the thread is scheduled, and indicates if a DFC should be scheduled.
   1.370 +	TAny* iVemsData;						/**< @internalComponent */	// This pointer can be used by any VEMS to store any data associated with the thread.  This data must be clean up before the Thread Exit Monitor completes.
   1.371 +	};
   1.372 +
   1.373 +__ASSERT_COMPILE(!(_FOFF(NThreadBase,iWaitLink)&7));
   1.374 +__ASSERT_COMPILE(!(sizeof(NThreadBase)&7));
   1.375 +
   1.376 +#ifdef __INCLUDE_NTHREADBASE_DEFINES__
   1.377 +#define	iReady				iSpare1				/**< @internalComponent */
   1.378 +#define	iCurrent			iSpare2				/**< @internalComponent */
   1.379 +#define	iLastCpu			iSpare3				/**< @internalComponent */
   1.380 +
   1.381 +#define iBasePri			iWaitLink.iSpare1	/**< @internalComponent */
   1.382 +#define	iMutexPri			iWaitLink.iSpare2	/**< @internalComponent */
   1.383 +#define	i_NThread_Initial	iWaitLink.iSpare3	/**< @internalComponent */
   1.384 +
   1.385 +#endif
   1.386 +
   1.387 +/** @internalComponent */
   1.388 +#define	i_NThread_BasePri	iWaitLink.iSpare1
   1.389 +
   1.390 +/** @internalComponent */
   1.391 +#define	NTHREADBASE_CPU_AFFINITY_MASK	0x80000000
   1.392 +
   1.393 +/** @internalComponent */
   1.394 +inline NThreadBase* NThreadWaitState::Thread()
   1.395 +	{ return _LOFF(this, NThreadBase, iWaitState); }
   1.396 +
   1.397 +/********************************************
   1.398 + * Thread group
   1.399 + ********************************************/
   1.400 +
   1.401 +/**
   1.402 +@publishedPartner
   1.403 +@prototype
   1.404 +
   1.405 +Base class for a nanokernel thread or group
   1.406 +*/
   1.407 +class NThreadGroup : public NSchedulable
   1.408 +	{
   1.409 +public:
   1.410 +	NThreadGroup();
   1.411 +public:
   1.412 +	TInt iThreadCount;										/**< @internalComponent */
   1.413 +	TPriList<NThreadBase, KNumPriorities> iNThreadList;		/**< @internalComponent */
   1.414 +	};
   1.415 +
   1.416 +/********************************************
   1.417 + * Scheduler
   1.418 + ********************************************/
   1.419 +
   1.420 +/**
   1.421 +@internalComponent
   1.422 +*/
   1.423 +class TScheduler;
   1.424 +class NThread;
   1.425 +class NIrqHandler;
   1.426 +class TSubScheduler : public TPriListBase
   1.427 +	{
   1.428 +public:
   1.429 +	TSubScheduler();
   1.430 +	void QueueDfcs();
   1.431 +	void RotateReadyList(TInt aPriority);
   1.432 +	NThread* SelectNextThread();
   1.433 +	TBool QueueEvent(NEventHandler* aEvent);
   1.434 +	void QueueEventAndKick(NEventHandler* aEvent);
   1.435 +	void SaveTimesliceTimer(NThreadBase* aThread);
   1.436 +	void UpdateThreadTimes(NThreadBase* aOld, NThreadBase* aNew);
   1.437 +private:
   1.438 +	SDblQueLink*	iExtraQueues[KNumPriorities-1];
   1.439 +public:
   1.440 +	TSpinLock		iExIDfcLock;				// lock to protect exogenous IDFC queue
   1.441 +
   1.442 +	SDblQue			iExIDfcs;					// list of pending exogenous IDFCs (i.e. ones punted over from another CPU)
   1.443 +
   1.444 +	SDblQue			iDfcs;						// normal IDFC/DFC pending queue (only accessed by this CPU)
   1.445 +
   1.446 +	TDfc* volatile	iCurrentIDFC;				// pointer to IDFC currently running on this CPU
   1.447 +	NThread*		iCurrentThread;				// the thread currently running on this CPU
   1.448 +
   1.449 +	TUint32			iCpuNum;
   1.450 +	TUint32			iCpuMask;
   1.451 +
   1.452 +	TSpinLock		iReadyListLock;
   1.453 +
   1.454 +	volatile TUint8	iRescheduleNeededFlag;		// TRUE if a thread reschedule is pending
   1.455 +	TUint8			iSubSchedulerSBZ1;			// always zero
   1.456 +	volatile TUint8	iDfcPendingFlag;			// TRUE if a normal IDFC is pending
   1.457 +	volatile TUint8	iExIDfcPendingFlag;			// TRUE if an exogenous IDFC is pending
   1.458 +	TInt			iKernLockCount;				// how many times the current CPU has locked the kernel
   1.459 +
   1.460 +	TUint8			iInIDFC;					// TRUE if IDFCs are currently being run on this CPU
   1.461 +	volatile TUint8	iEventHandlersPending;		// TRUE if an event handler is pending on this CPU
   1.462 +	TUint8			iSubSchedulerSpare4;
   1.463 +	TUint8			iSubSchedulerSpare5;
   1.464 +	TAny*			iAddressSpace;
   1.465 +
   1.466 +	TUint32			iReschedIPIs;
   1.467 +	TScheduler*		iScheduler;
   1.468 +
   1.469 +	union
   1.470 +		{
   1.471 +		TUint64		iLastTimestamp64;			// NKern::Timestamp() value at last reschedule or timestamp sync
   1.472 +		TUint32		iLastTimestamp32[2];
   1.473 +		};
   1.474 +	union
   1.475 +		{
   1.476 +		TUint64		iReschedCount64;
   1.477 +		TUint32		iReschedCount32[2];
   1.478 +		};
   1.479 +
   1.480 +	TAny*			iExtras[24];				// Space for platform-specific extras
   1.481 +
   1.482 +	TGenericIPI*	iNextIPI;					// next generic IPI to run on this CPU
   1.483 +	NThread*		iInitialThread;				// Initial (idle) thread on this CPU
   1.484 +
   1.485 +	TSpinLock		iEventHandlerLock;			// lock to protect event handler queue
   1.486 +
   1.487 +	SDblQue			iEventHandlers;				// queue of pending event handlers on this CPU
   1.488 +
   1.489 +	TUint64			iSpinLockOrderCheck;		// bitmask showing which spinlock orders currently held
   1.490 +
   1.491 +	TUint32			iSubSchedulerPadding[8];
   1.492 +	};
   1.493 +
   1.494 +__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iExIDfcLock)&7));
   1.495 +__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iEventHandlerLock)&7));
   1.496 +__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iReadyListLock)&7));
   1.497 +__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iLastTimestamp64)&7));
   1.498 +__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iReschedCount64)&7));
   1.499 +__ASSERT_COMPILE(sizeof(TSubScheduler)==512);	// make it a nice power of 2 size for easy indexing
   1.500 +
   1.501 +/**
   1.502 +@internalComponent
   1.503 +*/
   1.504 +class TScheduler
   1.505 +	{
   1.506 +public:
   1.507 +	TScheduler();
   1.508 +	static void Reschedule();
   1.509 +	IMPORT_C static TScheduler* Ptr();
   1.510 +	inline void SetProcessHandler(TLinAddr aHandler) {iProcessHandler=aHandler;}
   1.511 +public:
   1.512 +	TLinAddr		iMonitorExceptionHandler;
   1.513 +	TLinAddr		iProcessHandler;
   1.514 +
   1.515 +	TLinAddr		iRescheduleHook;
   1.516 +	TUint32			iActiveCpus1;				// bit n set if CPU n is accepting unlocked threads
   1.517 +
   1.518 +	TUint32			iActiveCpus2;				// bit n set if CPU n is accepting generic IPIs
   1.519 +	TInt			iNumCpus;					// number of CPUs under the kernel's control
   1.520 +
   1.521 +	TSubScheduler*	iSub[KMaxCpus];				// one subscheduler per CPU
   1.522 +
   1.523 +	TAny*			iExtras[24];				// Space for platform-specific extras
   1.524 +
   1.525 +	NFastMutex		iLock;						// the 'system lock' fast mutex
   1.526 +
   1.527 +	TSpinLock		iIdleSpinLock;				// lock to protect list of DFCs to be run on idle
   1.528 +
   1.529 +	SDblQue			iIdleDfcs;					// list of DFCs to run when all CPUs go idle
   1.530 +
   1.531 +	TUint32			iCpusNotIdle;				// bitmask - Bit n set => CPU n is not idle
   1.532 +	TUint8			iIdleGeneration;			// Toggles between 0 and 1 each time iIdleDfcs list is spilled to a CPU IDFC queue
   1.533 +	TUint8			iIdleSpillCpu;				// Which CPU last spilled the iIdleDfcs list to its IDFC queue
   1.534 +	TUint8			iTSchedulerSpare1;
   1.535 +	TUint8			iTSchedulerSpare2;
   1.536 +
   1.537 +	TUint32			iIdleGenerationCount;		// Incremented each time iIdleDfcs list is spilled to a CPU IDFC queue
   1.538 +	TUint32			i_Scheduler_Padding[3];
   1.539 +
   1.540 +	// For EMI support - HOPEFULLY THIS CAN DIE
   1.541 +	NThread* iSigma;	
   1.542 +	TDfc* iEmiDfc;
   1.543 +	TUint32 iEmiMask;
   1.544 +	TUint32 iEmiState;
   1.545 +	TUint32 iEmiDfcTrigger;
   1.546 +	TBool iLogging;
   1.547 +	TAny* iBufferStart;
   1.548 +	TAny* iBufferEnd;
   1.549 +	TAny* iBufferTail;
   1.550 +	TAny* iBufferHead;
   1.551 +	};
   1.552 +
   1.553 +__ASSERT_COMPILE(!(_FOFF(TScheduler,iIdleSpinLock)&7));
   1.554 +__ASSERT_COMPILE(sizeof(TScheduler)==512);
   1.555 +
   1.556 +extern TScheduler TheScheduler;
   1.557 +extern TSubScheduler TheSubSchedulers[KMaxCpus];
   1.558 +
   1.559 +#ifdef __USE_BTRACE_LOCK__
   1.560 +extern TSpinLock BTraceLock;
   1.561 +
   1.562 +#define	__ACQUIRE_BTRACE_LOCK()			TInt _btrace_irq = BTraceLock.LockIrqSave()
   1.563 +#define	__RELEASE_BTRACE_LOCK()			BTraceLock.UnlockIrqRestore(_btrace_irq)
   1.564 +
   1.565 +#else
   1.566 +
   1.567 +#define	__ACQUIRE_BTRACE_LOCK()
   1.568 +#define	__RELEASE_BTRACE_LOCK()
   1.569 +
   1.570 +#endif
   1.571 +
   1.572 +/**
   1.573 +@internalComponent
   1.574 +*/
   1.575 +extern "C" TSubScheduler& SubScheduler();
   1.576 +
   1.577 +/**
   1.578 +@internalComponent
   1.579 +*/
   1.580 +extern "C" void send_resched_ipis(TUint32 aMask);
   1.581 +
   1.582 +/**
   1.583 +@internalComponent
   1.584 +*/
   1.585 +extern "C" void send_resched_ipi(TInt aCpu);
   1.586 +
   1.587 +/**
   1.588 +@internalComponent
   1.589 +*/
   1.590 +extern "C" void send_resched_ipi_and_wait(TInt aCpu);
   1.591 +
   1.592 +
   1.593 +#include <nk_plat.h>
   1.594 +
   1.595 +/**
   1.596 +Call with kernel locked
   1.597 +
   1.598 +@internalComponent
   1.599 +*/
   1.600 +inline void RescheduleNeeded()
   1.601 +	{ SubScheduler().iRescheduleNeededFlag = 1; }
   1.602 +
   1.603 +
   1.604 +/**
   1.605 +@internalComponent
   1.606 +*/
   1.607 +#define	NCurrentThread()	NKern::CurrentThread()
   1.608 +
   1.609 +/** Optimised current thread function which can only be called from places where
   1.610 +	CPU migration is not possible - i.e. with interrupts disabled or preemption
   1.611 +	disabled.
   1.612 +
   1.613 +@internalComponent
   1.614 +*/
   1.615 +extern "C" NThread* NCurrentThreadL();
   1.616 +
   1.617 +/** @internalComponent */
   1.618 +inline TBool CheckCpuAgainstAffinity(TInt aCpu, TUint32 aAffinity)
   1.619 +	{
   1.620 +	if (aAffinity & NTHREADBASE_CPU_AFFINITY_MASK)
   1.621 +		return aAffinity & (1<<aCpu);
   1.622 +	return aAffinity==(TUint32)aCpu;
   1.623 +	}
   1.624 +
   1.625 +/**
   1.626 +@internalComponent
   1.627 +*/
   1.628 +#define __NK_ASSERT_UNLOCKED	__NK_ASSERT_DEBUG(!NKern::KernelLocked())
   1.629 +
   1.630 +/**
   1.631 +@internalComponent
   1.632 +*/
   1.633 +#define __NK_ASSERT_LOCKED		__NK_ASSERT_DEBUG(NKern::KernelLocked())
   1.634 +
   1.635 +#ifdef _DEBUG
   1.636 +/**
   1.637 +@publishedPartner
   1.638 +@released
   1.639 +*/
   1.640 +#define __ASSERT_NO_FAST_MUTEX	__NK_ASSERT_DEBUG(!NKern::HeldFastMutex());
   1.641 +
   1.642 +/**
   1.643 +@publishedPartner
   1.644 +@released
   1.645 +*/
   1.646 +#define __ASSERT_FAST_MUTEX(m)	__NK_ASSERT_DEBUG((m)->HeldByCurrentThread());
   1.647 +
   1.648 +/**
   1.649 +@publishedPartner
   1.650 +@released
   1.651 +*/
   1.652 +#define __ASSERT_SYSTEM_LOCK	__NK_ASSERT_DEBUG(TScheduler::Ptr()->iLock.HeldByCurrentThread());
   1.653 +
   1.654 +#define __ASSERT_NOT_ISR		__NK_ASSERT_DEBUG(NKern::CurrentContext()!=NKern::EInterrupt)
   1.655 +
   1.656 +#else
   1.657 +#define __ASSERT_NO_FAST_MUTEX
   1.658 +#define __ASSERT_FAST_MUTEX(m)
   1.659 +#define	__ASSERT_SYSTEM_LOCK
   1.660 +#define __ASSERT_NOT_ISR
   1.661 +#endif
   1.662 +
   1.663 +/********************************************
   1.664 + * System timer queue
   1.665 + ********************************************/
   1.666 +
   1.667 +/**
   1.668 +@publishedPartner
   1.669 +@prototype
   1.670 +*/
   1.671 +class NTimerQ
   1.672 +	{
   1.673 +	friend class NTimer;
   1.674 +public:
   1.675 +	typedef void (*TDebugFn)(TAny* aPtr, TInt aPos);	/**< @internalComponent */
   1.676 +	enum { ETimerQMask=31, ENumTimerQueues=32 };		/**< @internalComponent */	// these are not easily modifiable
   1.677 +
   1.678 +	/** @internalComponent */
   1.679 +	struct STimerQ
   1.680 +		{
   1.681 +		SDblQue iIntQ;
   1.682 +		SDblQue iDfcQ;
   1.683 +		};
   1.684 +public:
   1.685 +	NTimerQ();
   1.686 +	static void Init1(TInt aTickPeriod);
   1.687 +	static void Init3(TDfcQue* aDfcQ);
   1.688 +	IMPORT_C static TAny* TimerAddress();
   1.689 +	IMPORT_C void Tick();
   1.690 +	IMPORT_C static TInt IdleTime();
   1.691 +	IMPORT_C static void Advance(TInt aTicks);
   1.692 +private:
   1.693 +	static void DfcFn(TAny* aPtr);
   1.694 +	void Dfc();
   1.695 +	void Add(NTimer* aTimer);
   1.696 +	void AddFinal(NTimer* aTimer);
   1.697 +public:
   1.698 +	STimerQ			iTickQ[ENumTimerQueues];	/**< @internalComponent */	// NOTE: the order of member data is important
   1.699 +	TUint32			iPresent;					/**< @internalComponent */	// The assembler code relies on it
   1.700 +	TUint32			iMsCount;					/**< @internalComponent */
   1.701 +	SDblQue			iHoldingQ;					/**< @internalComponent */
   1.702 +	SDblQue			iOrderedQ;					/**< @internalComponent */
   1.703 +	SDblQue			iCompletedQ;				/**< @internalComponent */
   1.704 +	TDfc			iDfc;						/**< @internalComponent */
   1.705 +	TUint8			iTransferringCancelled;		/**< @internalComponent */
   1.706 +	TUint8			iCriticalCancelled;			/**< @internalComponent */
   1.707 +	TUint8			iPad1;						/**< @internalComponent */
   1.708 +	TUint8			iPad2;						/**< @internalComponent */
   1.709 +	TDebugFn		iDebugFn;					/**< @internalComponent */
   1.710 +	TAny*			iDebugPtr;					/**< @internalComponent */
   1.711 +	TInt			iTickPeriod;				/**< @internalComponent */	// in microseconds
   1.712 +
   1.713 +	/**
   1.714 +	This member is intended for use by ASSP/variant interrupt code as a convenient
   1.715 +	location to store rounding error information where hardware interrupts are not
   1.716 +	exactly one millisecond. The Symbian kernel does not make any use of this member.
   1.717 +	@publishedPartner
   1.718 +	@prototype
   1.719 +	*/
   1.720 +	TInt			iRounding;
   1.721 +	TInt			iDfcCompleteCount;			/**< @internalComponent */
   1.722 +	TSpinLock		iTimerSpinLock;				/**< @internalComponent */
   1.723 +	};
   1.724 +
   1.725 +__ASSERT_COMPILE(!(_FOFF(NTimerQ,iTimerSpinLock)&7));
   1.726 +
   1.727 +
   1.728 +GLREF_D NTimerQ TheTimerQ;
   1.729 +
   1.730 +/**
   1.731 +@internalComponent
   1.732 +*/
   1.733 +inline TUint32 NTickCount()
   1.734 +	{return TheTimerQ.iMsCount;}
   1.735 +
   1.736 +/**
   1.737 +@internalComponent
   1.738 +*/
   1.739 +inline TInt NTickPeriod()
   1.740 +	{return TheTimerQ.iTickPeriod;}
   1.741 +
   1.742 +
   1.743 +extern "C" {
   1.744 +/**
   1.745 +@internalComponent
   1.746 +*/
   1.747 +extern void NKCrashHandler(TInt aPhase, const TAny* a0, TInt a1);
   1.748 +
   1.749 +/**
   1.750 +@internalComponent
   1.751 +*/
   1.752 +extern TUint32 CrashState;
   1.753 +}
   1.754 +
   1.755 +
   1.756 +/**
   1.757 +@internalComponent
   1.758 +*/
   1.759 +class TGenIPIList : public SDblQue
   1.760 +	{
   1.761 +public:
   1.762 +	TGenIPIList();
   1.763 +public:
   1.764 +	TSpinLock			iGenIPILock;
   1.765 +	};
   1.766 +
   1.767 +/**
   1.768 +@internalComponent
   1.769 +*/
   1.770 +class TCancelIPI : public TGenericIPI
   1.771 +	{
   1.772 +public:
   1.773 +	void Send(TDfc* aDfc, TInt aCpu);
   1.774 +	static void Isr(TGenericIPI*);
   1.775 +public:
   1.776 +	TDfc* volatile iDfc;
   1.777 +	};
   1.778 +
   1.779 +
   1.780 +/**
   1.781 +@internalComponent
   1.782 +*/
   1.783 +TBool InterruptsStatus(TBool aRequest);
   1.784 +
   1.785 +
   1.786 +//declarations for the checking of kernel preconditions
   1.787 +
   1.788 +/**
   1.789 +@internalComponent
   1.790 +
   1.791 +PRECOND_FUNCTION_CALLER is needed for __ASSERT_WITH_MESSAGE_ALWAYS(),
   1.792 +so is outside the #ifdef _DEBUG.
   1.793 +*/
   1.794 +#ifndef PRECOND_FUNCTION_CALLER
   1.795 +#define PRECOND_FUNCTION_CALLER		0
   1.796 +#endif
   1.797 +
   1.798 +#ifdef _DEBUG
   1.799 +
   1.800 +/**
   1.801 +@internalComponent
   1.802 +*/
   1.803 +#define MASK_NO_FAST_MUTEX 0x1
   1.804 +#define MASK_CRITICAL 0x2
   1.805 +#define MASK_NO_CRITICAL 0x4
   1.806 +#define MASK_KERNEL_LOCKED 0x8
   1.807 +#define MASK_KERNEL_UNLOCKED 0x10
   1.808 +#define MASK_KERNEL_LOCKED_ONCE 0x20
   1.809 +#define MASK_INTERRUPTS_ENABLED 0x40
   1.810 +#define MASK_INTERRUPTS_DISABLED 0x80
   1.811 +#define MASK_SYSTEM_LOCKED 0x100
   1.812 +#define MASK_NOT_ISR 0x400
   1.813 +#define MASK_NOT_IDFC 0x800 
   1.814 +#define MASK_NOT_THREAD 0x1000
   1.815 +#define MASK_NO_CRITICAL_IF_USER 0x2000
   1.816 +#define MASK_THREAD_STANDARD ( MASK_NO_FAST_MUTEX | MASK_KERNEL_UNLOCKED | MASK_INTERRUPTS_ENABLED | MASK_NOT_ISR | MASK_NOT_IDFC )
   1.817 +#define MASK_THREAD_CRITICAL ( MASK_THREAD_STANDARD | MASK_CRITICAL )
   1.818 +#define MASK_ALWAYS_FAIL 0x4000
   1.819 +#define	MASK_NO_RESCHED 0x8000
   1.820 +
   1.821 +#if defined(__STANDALONE_NANOKERNEL__) || (!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__))
   1.822 +#define CHECK_PRECONDITIONS(mask,function)
   1.823 +#define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function)
   1.824 +
   1.825 +#else
   1.826 +/**
   1.827 +@internalComponent
   1.828 +*/
   1.829 +extern "C" TInt CheckPreconditions(TUint32 aConditionMask, const char* aFunction, TLinAddr aAddr);
   1.830 +/**
   1.831 +@internalComponent
   1.832 +*/
   1.833 +#define CHECK_PRECONDITIONS(mask,function) CheckPreconditions(mask,function,PRECOND_FUNCTION_CALLER)
   1.834 +
   1.835 +#ifdef __KERNEL_APIS_CONTEXT_CHECKS_FAULT__
   1.836 +
   1.837 +/**
   1.838 +@internalComponent
   1.839 +*/
   1.840 +#define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function) \
   1.841 +			__ASSERT_DEBUG( (cond), ( \
   1.842 +			DEBUGPRINT("Assertion failed: %s\nFunction: %s; called from: %08x\n",message,function,PRECOND_FUNCTION_CALLER),\
   1.843 +			NKFault(function, 0)))
   1.844 +
   1.845 +#else//!__KERNEL_APIS_CONTEXT_CHECKS_FAULT__
   1.846 +/**
   1.847 +@internalComponent
   1.848 +*/
   1.849 +#define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function) \
   1.850 +			__ASSERT_DEBUG( (cond), \
   1.851 +			DEBUGPRINT("Assertion failed: %s\nFunction: %s; called from: %08x\n",message,function,PRECOND_FUNCTION_CALLER))
   1.852 +
   1.853 +
   1.854 +#endif//__KERNEL_APIS_CONTEXT_CHECKS_FAULT__
   1.855 +#endif//(!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__))
   1.856 +
   1.857 +#else//if !DEBUG
   1.858 +
   1.859 +#define CHECK_PRECONDITIONS(mask,function)
   1.860 +#define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function )
   1.861 +
   1.862 +#endif//_DEBUG
   1.863 +
   1.864 +#if (!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__))
   1.865 +#define __ASSERT_WITH_MESSAGE_ALWAYS(cond,message,function )
   1.866 +#else
   1.867 +#ifdef __KERNEL_APIS_CONTEXT_CHECKS_FAULT__
   1.868 +/**
   1.869 +@internalComponent
   1.870 +*/
   1.871 +#define __ASSERT_WITH_MESSAGE_ALWAYS(cond,message,function) \
   1.872 +			__ASSERT_ALWAYS( (cond), ( \
   1.873 +			DEBUGPRINT("Assertion failed: %s\nFunction: %s; called from: %08x\n",message,function,PRECOND_FUNCTION_CALLER),\
   1.874 +			NKFault(function, 0)))
   1.875 +#else
   1.876 +/**
   1.877 +@internalComponent
   1.878 +*/
   1.879 +#define __ASSERT_WITH_MESSAGE_ALWAYS(cond,message,function) \
   1.880 +			__ASSERT_ALWAYS( (cond), \
   1.881 +			DEBUGPRINT("Assertion failed: %s\nFunction: %s; called from: %08x\n",message,function,PRECOND_FUNCTION_CALLER))
   1.882 +#endif//__KERNEL_APIS_CONTEXT_CHECKS_FAULT__
   1.883 +#endif//(!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__))
   1.884 +
   1.885 +#endif