os/kernelhwsrv/kernel/eka/include/nkernsmp/nk_priv.h
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\include\nkernsmp\nk_priv.h
sl@0
    15
// 
sl@0
    16
// WARNING: This file contains some APIs which are internal and are subject
sl@0
    17
//          to change without notice. Such APIs should therefore not be used
sl@0
    18
//          outside the Kernel and Hardware Services package.
sl@0
    19
//
sl@0
    20
sl@0
    21
#ifndef __NK_PRIV_H__
sl@0
    22
#define __NK_PRIV_H__
sl@0
    23
#include <cpudefs.h>
sl@0
    24
#include <nkern.h>
sl@0
    25
sl@0
    26
#define __USE_BTRACE_LOCK__
sl@0
    27
sl@0
    28
class Monitor;
sl@0
    29
sl@0
    30
/********************************************
sl@0
    31
 * Schedulable = thread or thread group
sl@0
    32
 ********************************************/
sl@0
    33
sl@0
    34
/**
sl@0
    35
@publishedPartner
sl@0
    36
@prototype
sl@0
    37
sl@0
    38
Base class for a nanokernel thread or group
sl@0
    39
*/
sl@0
    40
class NThreadGroup;
sl@0
    41
class NSchedulable : public TPriListLink
sl@0
    42
	{
sl@0
    43
public:
sl@0
    44
	enum
sl@0
    45
		{
sl@0
    46
		EReadyGroup=1,
sl@0
    47
		EReadyCpuMask=0x7f,
sl@0
    48
		EReadyOffset=0x80,
sl@0
    49
		};
sl@0
    50
sl@0
    51
	enum NReadyFlags
sl@0
    52
		{
sl@0
    53
		ENewTimeslice=1,
sl@0
    54
		EPreferSameCpu=2,
sl@0
    55
		EUnPause=4,
sl@0
    56
		};
sl@0
    57
sl@0
    58
	enum NEventState
sl@0
    59
		{
sl@0
    60
		EEventCountShift=16u,
sl@0
    61
		EEventCountMask=0xffff0000u,
sl@0
    62
		EEventCountInc=0x10000u,
sl@0
    63
		EEventCpuShift=0u,
sl@0
    64
		EEventCpuMask=0x1fu,
sl@0
    65
		EThreadCpuShift=8u,
sl@0
    66
		EThreadCpuMask=0x1f00u,
sl@0
    67
		EDeferredReady=0x4000u,
sl@0
    68
		EEventParent=0x8000u,
sl@0
    69
		};
sl@0
    70
public:
sl@0
    71
	NSchedulable();
sl@0
    72
	void AcqSLock();
sl@0
    73
	void RelSLock();
sl@0
    74
	void LAcqSLock();
sl@0
    75
	void RelSLockU();
sl@0
    76
	void ReadyT(TUint aMode);					// make ready, assumes lock held
sl@0
    77
	TInt BeginTiedEvent();
sl@0
    78
	void EndTiedEvent();
sl@0
    79
	TInt AddTiedEvent(NEventHandler* aEvent);
sl@0
    80
	TBool TiedEventReadyInterlock(TInt aCpu);
sl@0
    81
	void UnPauseT();							// decrement pause count and make ready if necessary
sl@0
    82
	static void DeferredReadyIDfcFn(TAny*);
sl@0
    83
	void DetachTiedEvents();
sl@0
    84
public:
sl@0
    85
	inline TBool IsGroup()			{return !iParent;}
sl@0
    86
	inline TBool IsLoneThread()		{return iParent==this;}
sl@0
    87
	inline TBool IsGroupThread()	{return iParent && iParent!=this;}
sl@0
    88
public:
sl@0
    89
//	TUint8				iReady;					/**< @internalComponent */	// flag indicating thread on ready list = cpu number | EReadyOffset
sl@0
    90
//	TUint8				iCurrent;				/**< @internalComponent */	// flag indicating thread is running
sl@0
    91
//	TUint8				iLastCpu;				/**< @internalComponent */	// CPU on which this thread last ran
sl@0
    92
	TUint8				iPauseCount;			/**< @internalComponent */	// count of externally requested pauses extending a voluntary wait
sl@0
    93
	TUint8				iSuspended;				/**< @internalComponent */	// flag indicating active external suspend (Not used for groups)
sl@0
    94
	TUint8				iNSchedulableSpare1;	/**< @internalComponent */
sl@0
    95
	TUint8				iNSchedulableSpare2;	/**< @internalComponent */
sl@0
    96
sl@0
    97
	TUint8				iCpuChange;				/**< @internalComponent */	// flag showing CPU migration outstanding
sl@0
    98
	TUint8				iStopping;				/**< @internalComponent */	// thread is exiting, thread group is being destroyed
sl@0
    99
	TUint16				iFreezeCpu;				/**< @internalComponent */	// flag set if CPU frozen - count for groups
sl@0
   100
	NSchedulable*		iParent;				/**< @internalComponent */	// Pointer to group containing thread, =this for normal thread, =0 for group
sl@0
   101
sl@0
   102
	TUint32				iCpuAffinity;			/**< @internalComponent */
sl@0
   103
	volatile TUint32	iEventState;			/**< @internalComponent */	// bits 16-31=count, 0-4=event CPU, 5-9=thread CPU, 10=defer, 11=parent
sl@0
   104
sl@0
   105
	TSpinLock			iSSpinLock;				/**< @internalComponent */
sl@0
   106
sl@0
   107
	SDblQue				iEvents;				/**< @internalComponent */	// doubly-linked list of tied events
sl@0
   108
sl@0
   109
	TUint32				i_IDfcMem[sizeof(TDfc)/sizeof(TUint32)];	/**< @internalComponent */	// IDFC used to make thread ready after last tied event completes
sl@0
   110
//	TDfc				iDeferredReadyIDfc;		/**< @internalComponent */	// IDFC used to make thread ready after last tied event completes
sl@0
   111
sl@0
   112
	union
sl@0
   113
		{
sl@0
   114
		TUint64			iRunCount64;
sl@0
   115
		TUint32			iRunCount32[2];
sl@0
   116
		};
sl@0
   117
	union
sl@0
   118
		{
sl@0
   119
		TUint64			iTotalCpuTime64;		/**< @internalComponent */	// total time spent running, in hi-res timer ticks
sl@0
   120
		TUint32			iTotalCpuTime32[2];		/**< @internalComponent */	// total time spent running, in hi-res timer ticks
sl@0
   121
		};
sl@0
   122
	};
sl@0
   123
sl@0
   124
__ASSERT_COMPILE(!(_FOFF(NSchedulable,iSSpinLock)&7));
sl@0
   125
__ASSERT_COMPILE(!(_FOFF(NSchedulable,iRunCount64)&7));
sl@0
   126
__ASSERT_COMPILE(!(_FOFF(NSchedulable,iTotalCpuTime64)&7));
sl@0
   127
__ASSERT_COMPILE(!(sizeof(NSchedulable)&7));
sl@0
   128
sl@0
   129
sl@0
   130
/**
sl@0
   131
@internalComponent
sl@0
   132
*/
sl@0
   133
inline TBool TDfc::IsValid()
sl@0
   134
	{
sl@0
   135
	if (iHType < KNumDfcPriorities)
sl@0
   136
		return TRUE;
sl@0
   137
	if (iHType != EEventHandlerIDFC)
sl@0
   138
		return FALSE;
sl@0
   139
	return !iTied || !iTied->iStopping;
sl@0
   140
	}
sl@0
   141
sl@0
   142
/********************************************
sl@0
   143
 * Thread
sl@0
   144
 ********************************************/
sl@0
   145
sl@0
   146
/**
sl@0
   147
@internalComponent
sl@0
   148
*/
sl@0
   149
class NThreadWaitState
sl@0
   150
	{
sl@0
   151
private:
sl@0
   152
	enum TWtStFlags
sl@0
   153
		{
sl@0
   154
		EWtStWaitPending		=0x01u,		// thread is about to wait
sl@0
   155
		EWtStWaitActive			=0x02u,		// thread is actually blocked
sl@0
   156
		EWtStTimeout			=0x04u,		// timeout is active on this wait
sl@0
   157
		EWtStObstructed			=0x08u,		// wait is due to obstruction (e.g. mutex) rather than lack of work to do
sl@0
   158
		EWtStDead				=0x80u,		// thread is dead
sl@0
   159
		};
sl@0
   160
private:
sl@0
   161
	NThreadWaitState();
sl@0
   162
	void SetUpWait(TUint aType, TUint aFlags, TAny* aWaitObj);
sl@0
   163
	void SetUpWait(TUint aType, TUint aFlags, TAny* aWaitObj, TUint32 aTimeout);
sl@0
   164
	void SetDead(TDfc* aKillDfc);
sl@0
   165
	void CancelWait();
sl@0
   166
	TInt DoWait();
sl@0
   167
	static void TimerExpired(TAny*);
sl@0
   168
	TInt UnBlockT(TUint aType, TAny* aWaitObj, TInt aReturnValue);
sl@0
   169
	TUint32 ReleaseT(TAny*& aWaitObj, TInt aReturnValue);
sl@0
   170
	void CancelTimerT();
sl@0
   171
private:
sl@0
   172
	inline NThreadBase* Thread();
sl@0
   173
	inline TBool WaitPending()
sl@0
   174
		{ return iWtC.iWtStFlags & (EWtStWaitPending|EWtStDead); }
sl@0
   175
	inline TBool ThreadIsBlocked()
sl@0
   176
		{ return iWtC.iWtStFlags & (EWtStWaitActive|EWtStDead); }
sl@0
   177
	inline TBool ThreadIsDead()
sl@0
   178
		{ return iWtC.iWtStFlags & EWtStDead; }
sl@0
   179
private:
sl@0
   180
	struct S
sl@0
   181
		{
sl@0
   182
		volatile TUint8			iWtStFlags;
sl@0
   183
		volatile TUint8			iWtObjType;
sl@0
   184
		volatile TUint8			iWtStSpare1;
sl@0
   185
		volatile TUint8			iWtStSpare2;
sl@0
   186
		union
sl@0
   187
			{
sl@0
   188
			TAny* volatile		iWtObj;
sl@0
   189
			volatile TInt		iRetVal;
sl@0
   190
			TDfc* volatile		iKillDfc;
sl@0
   191
			};
sl@0
   192
		};
sl@0
   193
	union
sl@0
   194
		{
sl@0
   195
		S						iWtC;
sl@0
   196
		volatile TUint32		iWtSt32[2];
sl@0
   197
		volatile TUint64		iWtSt64;
sl@0
   198
		};
sl@0
   199
	NTimer						iTimer;
sl@0
   200
private:
sl@0
   201
	friend class NSchedulable;
sl@0
   202
	friend class NThreadBase;
sl@0
   203
	friend class NThread;
sl@0
   204
	friend class TScheduler;
sl@0
   205
	friend class TSubScheduler;
sl@0
   206
	friend class TDfc;
sl@0
   207
	friend class TDfcQue;
sl@0
   208
	friend class NFastSemaphore;
sl@0
   209
	friend class NFastMutex;
sl@0
   210
	friend class NTimer;
sl@0
   211
	friend class NTimerQ;
sl@0
   212
	friend class NKern;
sl@0
   213
	friend class Monitor;
sl@0
   214
	friend class NKTest;
sl@0
   215
	};
sl@0
   216
sl@0
   217
/**
sl@0
   218
@publishedPartner
sl@0
   219
@prototype
sl@0
   220
sl@0
   221
Base class for a nanokernel thread.
sl@0
   222
*/
sl@0
   223
class TSubScheduler;
sl@0
   224
class NThreadBase : public NSchedulable
sl@0
   225
	{
sl@0
   226
public:
sl@0
   227
    /**
sl@0
   228
    Defines the possible types of wait object
sl@0
   229
    */
sl@0
   230
	enum NThreadWaitType
sl@0
   231
		{
sl@0
   232
		EWaitNone,
sl@0
   233
		EWaitFastSemaphore,
sl@0
   234
		EWaitFastMutex,
sl@0
   235
		EWaitSleep,
sl@0
   236
		EWaitBlocked,
sl@0
   237
		EWaitDfc,
sl@0
   238
		
sl@0
   239
		ENumWaitTypes
sl@0
   240
		};
sl@0
   241
sl@0
   242
		
sl@0
   243
	/**
sl@0
   244
	@internalComponent
sl@0
   245
	*/
sl@0
   246
	enum NThreadCSFunction
sl@0
   247
		{
sl@0
   248
		ECSExitPending=-1,
sl@0
   249
		ECSExitInProgress=-2,
sl@0
   250
		ECSDivertPending=-3,
sl@0
   251
		};
sl@0
   252
sl@0
   253
	/**
sl@0
   254
	@internalComponent
sl@0
   255
	*/
sl@0
   256
	enum NThreadTimeoutOp
sl@0
   257
		{
sl@0
   258
		ETimeoutPreamble=0,
sl@0
   259
		ETimeoutPostamble=1,
sl@0
   260
		ETimeoutSpurious=2,
sl@0
   261
		};
sl@0
   262
public:
sl@0
   263
	NThreadBase();
sl@0
   264
	TInt Create(SNThreadCreateInfo& anInfo,	TBool aInitial);
sl@0
   265
	void UnReadyT();
sl@0
   266
	TBool SuspendOrKill(TInt aCount);
sl@0
   267
	TBool DoSuspendOrKillT(TInt aCount, TSubScheduler* aS);
sl@0
   268
	TBool CancelTimerT();
sl@0
   269
	void DoReleaseT(TInt aReturnCode, TUint aMode);
sl@0
   270
	TBool CheckFastMutexDefer();
sl@0
   271
	void DoCsFunctionT();
sl@0
   272
	TBool Resume(TBool aForce);
sl@0
   273
	IMPORT_C TBool Suspend(TInt aCount);		/**< @internalComponent */
sl@0
   274
	IMPORT_C TBool Resume();					/**< @internalComponent */
sl@0
   275
	IMPORT_C TBool ForceResume();				/**< @internalComponent */
sl@0
   276
	IMPORT_C void Release(TInt aReturnCode, TUint aMode);	/**< @internalComponent */
sl@0
   277
	IMPORT_C void RequestSignal();				/**< @internalComponent */
sl@0
   278
	IMPORT_C void SetPriority(TInt aPriority);	/**< @internalComponent */
sl@0
   279
	void SetMutexPriority(NFastMutex* aMutex);
sl@0
   280
	void LoseInheritedPriorityT();
sl@0
   281
	void ChangeReadyThreadPriority();
sl@0
   282
	TUint32 SetCpuAffinity(TUint32 aAffinity);
sl@0
   283
	TBool TiedEventLeaveInterlock();
sl@0
   284
	TBool TiedEventJoinInterlock();
sl@0
   285
	IMPORT_C void Kill();						/**< @internalComponent */
sl@0
   286
	void Exit();
sl@0
   287
	// hooks for platform-specific code
sl@0
   288
	void OnKill(); 
sl@0
   289
	void OnExit();
sl@0
   290
public:
sl@0
   291
	static void TimerExpired(TAny* aPtr);
sl@0
   292
sl@0
   293
	/** @internalComponent */
sl@0
   294
	inline void UnknownState(TInt aOp, TInt aParam)
sl@0
   295
		{ (*iHandlers->iStateHandler)((NThread*)this,aOp,aParam); }
sl@0
   296
sl@0
   297
	/** @internalComponent */
sl@0
   298
	inline TUint8 Attributes()
sl@0
   299
		{ return i_ThrdAttr; }
sl@0
   300
sl@0
   301
	/** @internalComponent */
sl@0
   302
	inline TUint8 SetAttributes(TUint8 aNewAtt)
sl@0
   303
		{ return __e32_atomic_swp_ord8(&i_ThrdAttr, aNewAtt); }
sl@0
   304
sl@0
   305
	/** @internalComponent */
sl@0
   306
	inline TUint8 ModifyAttributes(TUint8 aClearMask, TUint8 aSetMask)
sl@0
   307
		{ return __e32_atomic_axo_ord8(&i_ThrdAttr, (TUint8)~(aClearMask|aSetMask), aSetMask); }
sl@0
   308
sl@0
   309
	/** @internalComponent */
sl@0
   310
	inline void SetAddressSpace(TAny* a)
sl@0
   311
		{ iAddressSpace=a; }
sl@0
   312
sl@0
   313
	/** @internalComponent */
sl@0
   314
	inline void SetExtraContext(TAny* a, TInt aSize)
sl@0
   315
		{ iExtraContext = a; iExtraContextSize = aSize; }
sl@0
   316
sl@0
   317
	/** @internalTechnology */
sl@0
   318
	inline TBool IsDead()
sl@0
   319
		{ return iWaitState.ThreadIsDead(); }
sl@0
   320
public:
sl@0
   321
	TPriListLink		iWaitLink;				/**< @internalComponent */	// used to link thread into a wait queue
sl@0
   322
//	TUint8				iBasePri;				/**< @internalComponent */	// priority with no fast mutex held
sl@0
   323
//	TUint8				iMutexPri;				/**< @internalComponent */	// priority from held fast mutex
sl@0
   324
//	TUint8				iInitial;				/**< @internalComponent */	// TRUE if this is an initial thread
sl@0
   325
	TUint8				iLinkedObjType;
sl@0
   326
	TUint8				i_ThrdAttr;				/**< @internalComponent */
sl@0
   327
	TUint8				iNThreadBaseSpare10;
sl@0
   328
	TUint8				iFastMutexDefer;		/**< @internalComponent */
sl@0
   329
sl@0
   330
	NFastSemaphore		iRequestSemaphore;		/**< @internalComponent */
sl@0
   331
sl@0
   332
	TInt				iTime;					/**< @internalComponent */	// time remaining, 0 if expired
sl@0
   333
	TInt				iTimeslice;				/**< @internalComponent */	// timeslice for this thread, -ve = no timeslicing
sl@0
   334
sl@0
   335
	TLinAddr			iSavedSP;				/**< @internalComponent */
sl@0
   336
	TAny*				iAddressSpace;			/**< @internalComponent */
sl@0
   337
sl@0
   338
	NFastMutex* volatile iHeldFastMutex;		/**< @internalComponent */	// fast mutex held by this thread
sl@0
   339
	TUserModeCallback* volatile iUserModeCallbacks;	/**< @internalComponent */	// Head of singly-linked list of callbacks
sl@0
   340
	TAny* volatile		iLinkedObj;				/**< @internalComponent */	// object to which this thread is linked
sl@0
   341
	NThreadGroup*		iNewParent;				/**< @internalComponent */	// group to join
sl@0
   342
sl@0
   343
	const SFastExecTable* iFastExecTable;		/**< @internalComponent */
sl@0
   344
	const SSlowExecEntry* iSlowExecTable;		/**< @internalComponent */	// points to first entry iEntries[0]
sl@0
   345
sl@0
   346
	volatile TInt		iCsCount;				/**< @internalComponent */	// critical section count
sl@0
   347
	volatile TInt		iCsFunction;			/**< @internalComponent */	// what to do on leaving CS: +n=suspend n times, 0=nothing, -1=exit
sl@0
   348
sl@0
   349
	NThreadWaitState	iWaitState;				/**< @internalComponent */
sl@0
   350
sl@0
   351
	const SNThreadHandlers* iHandlers;			/**< @internalComponent */	// additional thread event handlers
sl@0
   352
	TInt				iSuspendCount;			/**< @internalComponent */	// -how many times we have been suspended
sl@0
   353
sl@0
   354
	TLinAddr			iStackBase;				/**< @internalComponent */
sl@0
   355
	TInt				iStackSize;				/**< @internalComponent */
sl@0
   356
sl@0
   357
	TAny*				iExtraContext;			/**< @internalComponent */	// parent FPSCR value (iExtraContextSize == -1), coprocessor context (iExtraContextSize > 0) or NULL
sl@0
   358
	TInt				iExtraContextSize;		/**< @internalComponent */	// +ve=dynamically allocated, 0=none, -1=iExtraContext stores parent FPSCR value
sl@0
   359
sl@0
   360
	TUint32				iNThreadBaseSpare6;		/**< @internalComponent */	// spare to allow growth while preserving BC
sl@0
   361
	TUint32				iNThreadBaseSpare7;		/**< @internalComponent */	// spare to allow growth while preserving BC
sl@0
   362
	TUint32				iNThreadBaseSpare8;		/**< @internalComponent */	// spare to allow growth while preserving BC
sl@0
   363
	TUint32				iNThreadBaseSpare9;		/**< @internalComponent */	// spare to allow growth while preserving BC
sl@0
   364
sl@0
   365
	// For EMI support - HOPEFULLY THIS CAN DIE
sl@0
   366
	TUint32	iTag;							/**< @internalComponent */	// User defined set of bits which is ANDed with a mask when the thread is scheduled, and indicates if a DFC should be scheduled.
sl@0
   367
	TAny* iVemsData;						/**< @internalComponent */	// This pointer can be used by any VEMS to store any data associated with the thread.  This data must be clean up before the Thread Exit Monitor completes.
sl@0
   368
	};
sl@0
   369
sl@0
   370
__ASSERT_COMPILE(!(_FOFF(NThreadBase,iWaitLink)&7));
sl@0
   371
__ASSERT_COMPILE(!(sizeof(NThreadBase)&7));
sl@0
   372
sl@0
   373
#ifdef __INCLUDE_NTHREADBASE_DEFINES__
sl@0
   374
#define	iReady				iSpare1				/**< @internalComponent */
sl@0
   375
#define	iCurrent			iSpare2				/**< @internalComponent */
sl@0
   376
#define	iLastCpu			iSpare3				/**< @internalComponent */
sl@0
   377
sl@0
   378
#define iBasePri			iWaitLink.iSpare1	/**< @internalComponent */
sl@0
   379
#define	iMutexPri			iWaitLink.iSpare2	/**< @internalComponent */
sl@0
   380
#define	i_NThread_Initial	iWaitLink.iSpare3	/**< @internalComponent */
sl@0
   381
sl@0
   382
#endif
sl@0
   383
sl@0
   384
/** @internalComponent */
sl@0
   385
#define	i_NThread_BasePri	iWaitLink.iSpare1
sl@0
   386
sl@0
   387
/** @internalComponent */
sl@0
   388
#define	NTHREADBASE_CPU_AFFINITY_MASK	0x80000000
sl@0
   389
sl@0
   390
/** @internalComponent */
sl@0
   391
inline NThreadBase* NThreadWaitState::Thread()
sl@0
   392
	{ return _LOFF(this, NThreadBase, iWaitState); }
sl@0
   393
sl@0
   394
/********************************************
sl@0
   395
 * Thread group
sl@0
   396
 ********************************************/
sl@0
   397
sl@0
   398
/**
sl@0
   399
@publishedPartner
sl@0
   400
@prototype
sl@0
   401
sl@0
   402
Base class for a nanokernel thread or group
sl@0
   403
*/
sl@0
   404
class NThreadGroup : public NSchedulable
sl@0
   405
	{
sl@0
   406
public:
sl@0
   407
	NThreadGroup();
sl@0
   408
public:
sl@0
   409
	TInt iThreadCount;										/**< @internalComponent */
sl@0
   410
	TPriList<NThreadBase, KNumPriorities> iNThreadList;		/**< @internalComponent */
sl@0
   411
	};
sl@0
   412
sl@0
   413
/********************************************
sl@0
   414
 * Scheduler
sl@0
   415
 ********************************************/
sl@0
   416
sl@0
   417
/**
sl@0
   418
@internalComponent
sl@0
   419
*/
sl@0
   420
class TScheduler;
sl@0
   421
class NThread;
sl@0
   422
class NIrqHandler;
sl@0
   423
class TSubScheduler : public TPriListBase
sl@0
   424
	{
sl@0
   425
public:
sl@0
   426
	TSubScheduler();
sl@0
   427
	void QueueDfcs();
sl@0
   428
	void RotateReadyList(TInt aPriority);
sl@0
   429
	NThread* SelectNextThread();
sl@0
   430
	TBool QueueEvent(NEventHandler* aEvent);
sl@0
   431
	void QueueEventAndKick(NEventHandler* aEvent);
sl@0
   432
	void SaveTimesliceTimer(NThreadBase* aThread);
sl@0
   433
	void UpdateThreadTimes(NThreadBase* aOld, NThreadBase* aNew);
sl@0
   434
private:
sl@0
   435
	SDblQueLink*	iExtraQueues[KNumPriorities-1];
sl@0
   436
public:
sl@0
   437
	TSpinLock		iExIDfcLock;				// lock to protect exogenous IDFC queue
sl@0
   438
sl@0
   439
	SDblQue			iExIDfcs;					// list of pending exogenous IDFCs (i.e. ones punted over from another CPU)
sl@0
   440
sl@0
   441
	SDblQue			iDfcs;						// normal IDFC/DFC pending queue (only accessed by this CPU)
sl@0
   442
sl@0
   443
	TDfc* volatile	iCurrentIDFC;				// pointer to IDFC currently running on this CPU
sl@0
   444
	NThread*		iCurrentThread;				// the thread currently running on this CPU
sl@0
   445
sl@0
   446
	TUint32			iCpuNum;
sl@0
   447
	TUint32			iCpuMask;
sl@0
   448
sl@0
   449
	TSpinLock		iReadyListLock;
sl@0
   450
sl@0
   451
	volatile TUint8	iRescheduleNeededFlag;		// TRUE if a thread reschedule is pending
sl@0
   452
	TUint8			iSubSchedulerSBZ1;			// always zero
sl@0
   453
	volatile TUint8	iDfcPendingFlag;			// TRUE if a normal IDFC is pending
sl@0
   454
	volatile TUint8	iExIDfcPendingFlag;			// TRUE if an exogenous IDFC is pending
sl@0
   455
	TInt			iKernLockCount;				// how many times the current CPU has locked the kernel
sl@0
   456
sl@0
   457
	TUint8			iInIDFC;					// TRUE if IDFCs are currently being run on this CPU
sl@0
   458
	volatile TUint8	iEventHandlersPending;		// TRUE if an event handler is pending on this CPU
sl@0
   459
	TUint8			iSubSchedulerSpare4;
sl@0
   460
	TUint8			iSubSchedulerSpare5;
sl@0
   461
	TAny*			iAddressSpace;
sl@0
   462
sl@0
   463
	TUint32			iReschedIPIs;
sl@0
   464
	TScheduler*		iScheduler;
sl@0
   465
sl@0
   466
	union
sl@0
   467
		{
sl@0
   468
		TUint64		iLastTimestamp64;			// NKern::Timestamp() value at last reschedule or timestamp sync
sl@0
   469
		TUint32		iLastTimestamp32[2];
sl@0
   470
		};
sl@0
   471
	union
sl@0
   472
		{
sl@0
   473
		TUint64		iReschedCount64;
sl@0
   474
		TUint32		iReschedCount32[2];
sl@0
   475
		};
sl@0
   476
sl@0
   477
	TAny*			iExtras[24];				// Space for platform-specific extras
sl@0
   478
sl@0
   479
	TGenericIPI*	iNextIPI;					// next generic IPI to run on this CPU
sl@0
   480
	NThread*		iInitialThread;				// Initial (idle) thread on this CPU
sl@0
   481
sl@0
   482
	TSpinLock		iEventHandlerLock;			// lock to protect event handler queue
sl@0
   483
sl@0
   484
	SDblQue			iEventHandlers;				// queue of pending event handlers on this CPU
sl@0
   485
sl@0
   486
	TUint64			iSpinLockOrderCheck;		// bitmask showing which spinlock orders currently held
sl@0
   487
sl@0
   488
	TUint32			iSubSchedulerPadding[8];
sl@0
   489
	};
sl@0
   490
sl@0
   491
__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iExIDfcLock)&7));
sl@0
   492
__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iEventHandlerLock)&7));
sl@0
   493
__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iReadyListLock)&7));
sl@0
   494
__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iLastTimestamp64)&7));
sl@0
   495
__ASSERT_COMPILE(!(_FOFF(TSubScheduler,iReschedCount64)&7));
sl@0
   496
__ASSERT_COMPILE(sizeof(TSubScheduler)==512);	// make it a nice power of 2 size for easy indexing
sl@0
   497
sl@0
   498
/**
sl@0
   499
@internalComponent
sl@0
   500
*/
sl@0
   501
class TScheduler
sl@0
   502
	{
sl@0
   503
public:
sl@0
   504
	TScheduler();
sl@0
   505
	static void Reschedule();
sl@0
   506
	IMPORT_C static TScheduler* Ptr();
sl@0
   507
	inline void SetProcessHandler(TLinAddr aHandler) {iProcessHandler=aHandler;}
sl@0
   508
public:
sl@0
   509
	TLinAddr		iMonitorExceptionHandler;
sl@0
   510
	TLinAddr		iProcessHandler;
sl@0
   511
sl@0
   512
	TLinAddr		iRescheduleHook;
sl@0
   513
	TUint32			iActiveCpus1;				// bit n set if CPU n is accepting unlocked threads
sl@0
   514
sl@0
   515
	TUint32			iActiveCpus2;				// bit n set if CPU n is accepting generic IPIs
sl@0
   516
	TInt			iNumCpus;					// number of CPUs under the kernel's control
sl@0
   517
sl@0
   518
	TSubScheduler*	iSub[KMaxCpus];				// one subscheduler per CPU
sl@0
   519
sl@0
   520
	TAny*			iExtras[24];				// Space for platform-specific extras
sl@0
   521
sl@0
   522
	NFastMutex		iLock;						// the 'system lock' fast mutex
sl@0
   523
sl@0
   524
	TSpinLock		iIdleSpinLock;				// lock to protect list of DFCs to be run on idle
sl@0
   525
sl@0
   526
	SDblQue			iIdleDfcs;					// list of DFCs to run when all CPUs go idle
sl@0
   527
sl@0
   528
	TUint32			iCpusNotIdle;				// bitmask - Bit n set => CPU n is not idle
sl@0
   529
	TUint8			iIdleGeneration;			// Toggles between 0 and 1 each time iIdleDfcs list is spilled to a CPU IDFC queue
sl@0
   530
	TUint8			iIdleSpillCpu;				// Which CPU last spilled the iIdleDfcs list to its IDFC queue
sl@0
   531
	TUint8			iTSchedulerSpare1;
sl@0
   532
	TUint8			iTSchedulerSpare2;
sl@0
   533
sl@0
   534
	TUint32			iIdleGenerationCount;		// Incremented each time iIdleDfcs list is spilled to a CPU IDFC queue
sl@0
   535
	TUint32			i_Scheduler_Padding[3];
sl@0
   536
sl@0
   537
	// For EMI support - HOPEFULLY THIS CAN DIE
sl@0
   538
	NThread* iSigma;	
sl@0
   539
	TDfc* iEmiDfc;
sl@0
   540
	TUint32 iEmiMask;
sl@0
   541
	TUint32 iEmiState;
sl@0
   542
	TUint32 iEmiDfcTrigger;
sl@0
   543
	TBool iLogging;
sl@0
   544
	TAny* iBufferStart;
sl@0
   545
	TAny* iBufferEnd;
sl@0
   546
	TAny* iBufferTail;
sl@0
   547
	TAny* iBufferHead;
sl@0
   548
	};
sl@0
   549
sl@0
   550
__ASSERT_COMPILE(!(_FOFF(TScheduler,iIdleSpinLock)&7));
sl@0
   551
__ASSERT_COMPILE(sizeof(TScheduler)==512);
sl@0
   552
sl@0
   553
extern TScheduler TheScheduler;
sl@0
   554
extern TSubScheduler TheSubSchedulers[KMaxCpus];
sl@0
   555
sl@0
   556
#ifdef __USE_BTRACE_LOCK__
sl@0
   557
extern TSpinLock BTraceLock;
sl@0
   558
sl@0
   559
#define	__ACQUIRE_BTRACE_LOCK()			TInt _btrace_irq = BTraceLock.LockIrqSave()
sl@0
   560
#define	__RELEASE_BTRACE_LOCK()			BTraceLock.UnlockIrqRestore(_btrace_irq)
sl@0
   561
sl@0
   562
#else
sl@0
   563
sl@0
   564
#define	__ACQUIRE_BTRACE_LOCK()
sl@0
   565
#define	__RELEASE_BTRACE_LOCK()
sl@0
   566
sl@0
   567
#endif
sl@0
   568
sl@0
   569
/**
sl@0
   570
@internalComponent
sl@0
   571
*/
sl@0
   572
extern "C" TSubScheduler& SubScheduler();
sl@0
   573
sl@0
   574
/**
sl@0
   575
@internalComponent
sl@0
   576
*/
sl@0
   577
extern "C" void send_resched_ipis(TUint32 aMask);
sl@0
   578
sl@0
   579
/**
sl@0
   580
@internalComponent
sl@0
   581
*/
sl@0
   582
extern "C" void send_resched_ipi(TInt aCpu);
sl@0
   583
sl@0
   584
/**
sl@0
   585
@internalComponent
sl@0
   586
*/
sl@0
   587
extern "C" void send_resched_ipi_and_wait(TInt aCpu);
sl@0
   588
sl@0
   589
sl@0
   590
#include <nk_plat.h>
sl@0
   591
sl@0
   592
/**
sl@0
   593
Call with kernel locked
sl@0
   594
sl@0
   595
@internalComponent
sl@0
   596
*/
sl@0
   597
inline void RescheduleNeeded()
sl@0
   598
	{ SubScheduler().iRescheduleNeededFlag = 1; }
sl@0
   599
sl@0
   600
sl@0
   601
/**
sl@0
   602
@internalComponent
sl@0
   603
*/
sl@0
   604
#define	NCurrentThread()	NKern::CurrentThread()
sl@0
   605
sl@0
   606
/** Optimised current thread function which can only be called from places where
sl@0
   607
	CPU migration is not possible - i.e. with interrupts disabled or preemption
sl@0
   608
	disabled.
sl@0
   609
sl@0
   610
@internalComponent
sl@0
   611
*/
sl@0
   612
extern "C" NThread* NCurrentThreadL();
sl@0
   613
sl@0
   614
/** @internalComponent */
sl@0
   615
inline TBool CheckCpuAgainstAffinity(TInt aCpu, TUint32 aAffinity)
sl@0
   616
	{
sl@0
   617
	if (aAffinity & NTHREADBASE_CPU_AFFINITY_MASK)
sl@0
   618
		return aAffinity & (1<<aCpu);
sl@0
   619
	return aAffinity==(TUint32)aCpu;
sl@0
   620
	}
sl@0
   621
sl@0
   622
/**
sl@0
   623
@internalComponent
sl@0
   624
*/
sl@0
   625
#define __NK_ASSERT_UNLOCKED	__NK_ASSERT_DEBUG(!NKern::KernelLocked())
sl@0
   626
sl@0
   627
/**
sl@0
   628
@internalComponent
sl@0
   629
*/
sl@0
   630
#define __NK_ASSERT_LOCKED		__NK_ASSERT_DEBUG(NKern::KernelLocked())
sl@0
   631
sl@0
   632
#ifdef _DEBUG
sl@0
   633
/**
sl@0
   634
@publishedPartner
sl@0
   635
@released
sl@0
   636
*/
sl@0
   637
#define __ASSERT_NO_FAST_MUTEX	__NK_ASSERT_DEBUG(!NKern::HeldFastMutex());
sl@0
   638
sl@0
   639
/**
sl@0
   640
@publishedPartner
sl@0
   641
@released
sl@0
   642
*/
sl@0
   643
#define __ASSERT_FAST_MUTEX(m)	__NK_ASSERT_DEBUG((m)->HeldByCurrentThread());
sl@0
   644
sl@0
   645
/**
sl@0
   646
@publishedPartner
sl@0
   647
@released
sl@0
   648
*/
sl@0
   649
#define __ASSERT_SYSTEM_LOCK	__NK_ASSERT_DEBUG(TScheduler::Ptr()->iLock.HeldByCurrentThread());
sl@0
   650
sl@0
   651
#define __ASSERT_NOT_ISR		__NK_ASSERT_DEBUG(NKern::CurrentContext()!=NKern::EInterrupt)
sl@0
   652
sl@0
   653
#else
sl@0
   654
#define __ASSERT_NO_FAST_MUTEX
sl@0
   655
#define __ASSERT_FAST_MUTEX(m)
sl@0
   656
#define	__ASSERT_SYSTEM_LOCK
sl@0
   657
#define __ASSERT_NOT_ISR
sl@0
   658
#endif
sl@0
   659
sl@0
   660
/********************************************
sl@0
   661
 * System timer queue
sl@0
   662
 ********************************************/
sl@0
   663
sl@0
   664
/**
sl@0
   665
@publishedPartner
sl@0
   666
@prototype
sl@0
   667
*/
sl@0
   668
class NTimerQ
sl@0
   669
	{
sl@0
   670
	friend class NTimer;
sl@0
   671
public:
sl@0
   672
	typedef void (*TDebugFn)(TAny* aPtr, TInt aPos);	/**< @internalComponent */
sl@0
   673
	enum { ETimerQMask=31, ENumTimerQueues=32 };		/**< @internalComponent */	// these are not easily modifiable
sl@0
   674
sl@0
   675
	/** @internalComponent */
sl@0
   676
	struct STimerQ
sl@0
   677
		{
sl@0
   678
		SDblQue iIntQ;
sl@0
   679
		SDblQue iDfcQ;
sl@0
   680
		};
sl@0
   681
public:
sl@0
   682
	NTimerQ();
sl@0
   683
	static void Init1(TInt aTickPeriod);
sl@0
   684
	static void Init3(TDfcQue* aDfcQ);
sl@0
   685
	IMPORT_C static TAny* TimerAddress();
sl@0
   686
	IMPORT_C void Tick();
sl@0
   687
	IMPORT_C static TInt IdleTime();
sl@0
   688
	IMPORT_C static void Advance(TInt aTicks);
sl@0
   689
private:
sl@0
   690
	static void DfcFn(TAny* aPtr);
sl@0
   691
	void Dfc();
sl@0
   692
	void Add(NTimer* aTimer);
sl@0
   693
	void AddFinal(NTimer* aTimer);
sl@0
   694
public:
sl@0
   695
	STimerQ			iTickQ[ENumTimerQueues];	/**< @internalComponent */	// NOTE: the order of member data is important
sl@0
   696
	TUint32			iPresent;					/**< @internalComponent */	// The assembler code relies on it
sl@0
   697
	TUint32			iMsCount;					/**< @internalComponent */
sl@0
   698
	SDblQue			iHoldingQ;					/**< @internalComponent */
sl@0
   699
	SDblQue			iOrderedQ;					/**< @internalComponent */
sl@0
   700
	SDblQue			iCompletedQ;				/**< @internalComponent */
sl@0
   701
	TDfc			iDfc;						/**< @internalComponent */
sl@0
   702
	TUint8			iTransferringCancelled;		/**< @internalComponent */
sl@0
   703
	TUint8			iCriticalCancelled;			/**< @internalComponent */
sl@0
   704
	TUint8			iPad1;						/**< @internalComponent */
sl@0
   705
	TUint8			iPad2;						/**< @internalComponent */
sl@0
   706
	TDebugFn		iDebugFn;					/**< @internalComponent */
sl@0
   707
	TAny*			iDebugPtr;					/**< @internalComponent */
sl@0
   708
	TInt			iTickPeriod;				/**< @internalComponent */	// in microseconds
sl@0
   709
sl@0
   710
	/**
sl@0
   711
	This member is intended for use by ASSP/variant interrupt code as a convenient
sl@0
   712
	location to store rounding error information where hardware interrupts are not
sl@0
   713
	exactly one millisecond. The Symbian kernel does not make any use of this member.
sl@0
   714
	@publishedPartner
sl@0
   715
	@prototype
sl@0
   716
	*/
sl@0
   717
	TInt			iRounding;
sl@0
   718
	TInt			iDfcCompleteCount;			/**< @internalComponent */
sl@0
   719
	TSpinLock		iTimerSpinLock;				/**< @internalComponent */
sl@0
   720
	};
sl@0
   721
sl@0
   722
__ASSERT_COMPILE(!(_FOFF(NTimerQ,iTimerSpinLock)&7));
sl@0
   723
sl@0
   724
sl@0
   725
GLREF_D NTimerQ TheTimerQ;
sl@0
   726
sl@0
   727
/**
sl@0
   728
@internalComponent
sl@0
   729
*/
sl@0
   730
inline TUint32 NTickCount()
sl@0
   731
	{return TheTimerQ.iMsCount;}
sl@0
   732
sl@0
   733
/**
sl@0
   734
@internalComponent
sl@0
   735
*/
sl@0
   736
inline TInt NTickPeriod()
sl@0
   737
	{return TheTimerQ.iTickPeriod;}
sl@0
   738
sl@0
   739
sl@0
   740
extern "C" {
sl@0
   741
/**
sl@0
   742
@internalComponent
sl@0
   743
*/
sl@0
   744
extern void NKCrashHandler(TInt aPhase, const TAny* a0, TInt a1);
sl@0
   745
sl@0
   746
/**
sl@0
   747
@internalComponent
sl@0
   748
*/
sl@0
   749
extern TUint32 CrashState;
sl@0
   750
}
sl@0
   751
sl@0
   752
sl@0
   753
/**
sl@0
   754
@internalComponent
sl@0
   755
*/
sl@0
   756
class TGenIPIList : public SDblQue
sl@0
   757
	{
sl@0
   758
public:
sl@0
   759
	TGenIPIList();
sl@0
   760
public:
sl@0
   761
	TSpinLock			iGenIPILock;
sl@0
   762
	};
sl@0
   763
sl@0
   764
/**
sl@0
   765
@internalComponent
sl@0
   766
*/
sl@0
   767
class TCancelIPI : public TGenericIPI
sl@0
   768
	{
sl@0
   769
public:
sl@0
   770
	void Send(TDfc* aDfc, TInt aCpu);
sl@0
   771
	static void Isr(TGenericIPI*);
sl@0
   772
public:
sl@0
   773
	TDfc* volatile iDfc;
sl@0
   774
	};
sl@0
   775
sl@0
   776
sl@0
   777
/**
sl@0
   778
@internalComponent
sl@0
   779
*/
sl@0
   780
TBool InterruptsStatus(TBool aRequest);
sl@0
   781
sl@0
   782
sl@0
   783
//declarations for the checking of kernel preconditions
sl@0
   784
sl@0
   785
/**
sl@0
   786
@internalComponent
sl@0
   787
sl@0
   788
PRECOND_FUNCTION_CALLER is needed for __ASSERT_WITH_MESSAGE_ALWAYS(),
sl@0
   789
so is outside the #ifdef _DEBUG.
sl@0
   790
*/
sl@0
   791
#ifndef PRECOND_FUNCTION_CALLER
sl@0
   792
#define PRECOND_FUNCTION_CALLER		0
sl@0
   793
#endif
sl@0
   794
sl@0
   795
#ifdef _DEBUG
sl@0
   796
sl@0
   797
/**
sl@0
   798
@internalComponent
sl@0
   799
*/
sl@0
   800
#define MASK_NO_FAST_MUTEX 0x1
sl@0
   801
#define MASK_CRITICAL 0x2
sl@0
   802
#define MASK_NO_CRITICAL 0x4
sl@0
   803
#define MASK_KERNEL_LOCKED 0x8
sl@0
   804
#define MASK_KERNEL_UNLOCKED 0x10
sl@0
   805
#define MASK_KERNEL_LOCKED_ONCE 0x20
sl@0
   806
#define MASK_INTERRUPTS_ENABLED 0x40
sl@0
   807
#define MASK_INTERRUPTS_DISABLED 0x80
sl@0
   808
#define MASK_SYSTEM_LOCKED 0x100
sl@0
   809
#define MASK_NOT_ISR 0x400
sl@0
   810
#define MASK_NOT_IDFC 0x800 
sl@0
   811
#define MASK_NOT_THREAD 0x1000
sl@0
   812
#define MASK_NO_CRITICAL_IF_USER 0x2000
sl@0
   813
#define MASK_THREAD_STANDARD ( MASK_NO_FAST_MUTEX | MASK_KERNEL_UNLOCKED | MASK_INTERRUPTS_ENABLED | MASK_NOT_ISR | MASK_NOT_IDFC )
sl@0
   814
#define MASK_THREAD_CRITICAL ( MASK_THREAD_STANDARD | MASK_CRITICAL )
sl@0
   815
#define MASK_ALWAYS_FAIL 0x4000
sl@0
   816
#define	MASK_NO_RESCHED 0x8000
sl@0
   817
sl@0
   818
#if defined(__STANDALONE_NANOKERNEL__) || (!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__))
sl@0
   819
#define CHECK_PRECONDITIONS(mask,function)
sl@0
   820
#define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function)
sl@0
   821
sl@0
   822
#else
sl@0
   823
/**
sl@0
   824
@internalComponent
sl@0
   825
*/
sl@0
   826
extern "C" TInt CheckPreconditions(TUint32 aConditionMask, const char* aFunction, TLinAddr aAddr);
sl@0
   827
/**
sl@0
   828
@internalComponent
sl@0
   829
*/
sl@0
   830
#define CHECK_PRECONDITIONS(mask,function) CheckPreconditions(mask,function,PRECOND_FUNCTION_CALLER)
sl@0
   831
sl@0
   832
#ifdef __KERNEL_APIS_CONTEXT_CHECKS_FAULT__
sl@0
   833
sl@0
   834
/**
sl@0
   835
@internalComponent
sl@0
   836
*/
sl@0
   837
#define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function) \
sl@0
   838
			__ASSERT_DEBUG( (cond), ( \
sl@0
   839
			DEBUGPRINT("Assertion failed: %s\nFunction: %s; called from: %08x\n",message,function,PRECOND_FUNCTION_CALLER),\
sl@0
   840
			NKFault(function, 0)))
sl@0
   841
sl@0
   842
#else//!__KERNEL_APIS_CONTEXT_CHECKS_FAULT__
sl@0
   843
/**
sl@0
   844
@internalComponent
sl@0
   845
*/
sl@0
   846
#define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function) \
sl@0
   847
			__ASSERT_DEBUG( (cond), \
sl@0
   848
			DEBUGPRINT("Assertion failed: %s\nFunction: %s; called from: %08x\n",message,function,PRECOND_FUNCTION_CALLER))
sl@0
   849
sl@0
   850
sl@0
   851
#endif//__KERNEL_APIS_CONTEXT_CHECKS_FAULT__
sl@0
   852
#endif//(!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__))
sl@0
   853
sl@0
   854
#else//if !DEBUG
sl@0
   855
sl@0
   856
#define CHECK_PRECONDITIONS(mask,function)
sl@0
   857
#define __ASSERT_WITH_MESSAGE_DEBUG(cond,message,function )
sl@0
   858
sl@0
   859
#endif//_DEBUG
sl@0
   860
sl@0
   861
#if (!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__))
sl@0
   862
#define __ASSERT_WITH_MESSAGE_ALWAYS(cond,message,function )
sl@0
   863
#else
sl@0
   864
#ifdef __KERNEL_APIS_CONTEXT_CHECKS_FAULT__
sl@0
   865
/**
sl@0
   866
@internalComponent
sl@0
   867
*/
sl@0
   868
#define __ASSERT_WITH_MESSAGE_ALWAYS(cond,message,function) \
sl@0
   869
			__ASSERT_ALWAYS( (cond), ( \
sl@0
   870
			DEBUGPRINT("Assertion failed: %s\nFunction: %s; called from: %08x\n",message,function,PRECOND_FUNCTION_CALLER),\
sl@0
   871
			NKFault(function, 0)))
sl@0
   872
#else
sl@0
   873
/**
sl@0
   874
@internalComponent
sl@0
   875
*/
sl@0
   876
#define __ASSERT_WITH_MESSAGE_ALWAYS(cond,message,function) \
sl@0
   877
			__ASSERT_ALWAYS( (cond), \
sl@0
   878
			DEBUGPRINT("Assertion failed: %s\nFunction: %s; called from: %08x\n",message,function,PRECOND_FUNCTION_CALLER))
sl@0
   879
#endif//__KERNEL_APIS_CONTEXT_CHECKS_FAULT__
sl@0
   880
#endif//(!defined (__KERNEL_APIS_CONTEXT_CHECKS_WARNING__)&&!defined (__KERNEL_APIS_CONTEXT_CHECKS_FAULT__))
sl@0
   881
sl@0
   882
#endif