os/kernelhwsrv/kernel/eka/nkern/nkern.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\nkern\nkern.cpp
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
// NThreadBase member data
sl@0
    19
#define __INCLUDE_NTHREADBASE_DEFINES__
sl@0
    20
sl@0
    21
#include "nk_priv.h"
sl@0
    22
sl@0
    23
/******************************************************************************
sl@0
    24
 * Fast mutex
sl@0
    25
 ******************************************************************************/
sl@0
    26
sl@0
    27
/** Checks if the current thread holds this fast mutex
sl@0
    28
sl@0
    29
	@return TRUE if the current thread holds this fast mutex
sl@0
    30
	@return FALSE if not
sl@0
    31
*/
sl@0
    32
EXPORT_C TBool NFastMutex::HeldByCurrentThread()
sl@0
    33
	{
sl@0
    34
	return iHoldingThread == NCurrentThread();
sl@0
    35
	}
sl@0
    36
sl@0
    37
/** Find the fast mutex held by the current thread
sl@0
    38
sl@0
    39
	@return a pointer to the fast mutex held by the current thread
sl@0
    40
	@return NULL if the current thread does not hold a fast mutex
sl@0
    41
*/
sl@0
    42
EXPORT_C NFastMutex* NKern::HeldFastMutex()
sl@0
    43
	{
sl@0
    44
	return TheScheduler.iCurrentThread->iHeldFastMutex;
sl@0
    45
	}
sl@0
    46
sl@0
    47
sl@0
    48
#ifndef __FAST_MUTEX_MACHINE_CODED__
sl@0
    49
/** Acquires the fast mutex.
sl@0
    50
sl@0
    51
    This will block until the mutex is available, and causes
sl@0
    52
	the thread to enter an implicit critical section until the mutex is released.
sl@0
    53
sl@0
    54
	Generally threads would use NKern::FMWait() which manipulates the kernel lock
sl@0
    55
	for you.
sl@0
    56
	
sl@0
    57
	@pre Kernel must be locked, with lock count 1.
sl@0
    58
	@pre The calling thread holds no fast mutexes.
sl@0
    59
	
sl@0
    60
	@post Kernel is locked, with lock count 1.
sl@0
    61
	@post The calling thread holds the mutex.
sl@0
    62
	
sl@0
    63
	@see NFastMutex::Signal()
sl@0
    64
	@see NKern::FMWait()
sl@0
    65
*/
sl@0
    66
EXPORT_C void NFastMutex::Wait()
sl@0
    67
	{
sl@0
    68
	__KTRACE_OPT(KNKERN,DEBUGPRINT("FMWait %M",this));
sl@0
    69
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED_ONCE|MASK_NO_FAST_MUTEX,"NFastMutex::Wait");			
sl@0
    70
	NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
    71
	if (iHoldingThread)
sl@0
    72
		{
sl@0
    73
		iWaiting=1;
sl@0
    74
		pC->iWaitFastMutex=this;
sl@0
    75
		__KTRACE_OPT(KNKERN,DEBUGPRINT("FMWait: YieldTo %T",iHoldingThread));
sl@0
    76
		TheScheduler.YieldTo(iHoldingThread);	// returns with kernel unlocked, interrupts disabled
sl@0
    77
		TheScheduler.iKernCSLocked = 1;	// relock kernel
sl@0
    78
		NKern::EnableAllInterrupts();
sl@0
    79
		pC->iWaitFastMutex=NULL;
sl@0
    80
		}
sl@0
    81
	pC->iHeldFastMutex=this;		// automatically puts thread into critical section
sl@0
    82
#ifdef BTRACE_FAST_MUTEX
sl@0
    83
	BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexWait,this);
sl@0
    84
#endif
sl@0
    85
	iHoldingThread=pC;
sl@0
    86
	}
sl@0
    87
sl@0
    88
sl@0
    89
/** Releases a previously acquired fast mutex.
sl@0
    90
	
sl@0
    91
	Generally, threads would use NKern::FMSignal() which manipulates the kernel lock
sl@0
    92
	for you.
sl@0
    93
	
sl@0
    94
	@pre The calling thread holds the mutex.
sl@0
    95
	@pre Kernel must be locked.
sl@0
    96
	
sl@0
    97
	@post Kernel is locked.
sl@0
    98
	
sl@0
    99
	@see NFastMutex::Wait()
sl@0
   100
	@see NKern::FMSignal()
sl@0
   101
*/
sl@0
   102
EXPORT_C void NFastMutex::Signal()
sl@0
   103
	{
sl@0
   104
	__KTRACE_OPT(KNKERN,DEBUGPRINT("FMSignal %M",this));
sl@0
   105
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED,"NFastMutex::Signal");			
sl@0
   106
	NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
   107
	__ASSERT_WITH_MESSAGE_DEBUG(pC->iHeldFastMutex==this,"The calling thread holds the mutex","NFastMutex::Signal");
sl@0
   108
#ifdef BTRACE_FAST_MUTEX
sl@0
   109
	BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexSignal,this);
sl@0
   110
#endif
sl@0
   111
	iHoldingThread=NULL;
sl@0
   112
	pC->iHeldFastMutex=NULL;
sl@0
   113
	TBool w=iWaiting;
sl@0
   114
	iWaiting=0;
sl@0
   115
	if (w)
sl@0
   116
		{
sl@0
   117
		RescheduleNeeded();
sl@0
   118
		if (pC->iCsFunction && !pC->iCsCount)
sl@0
   119
			pC->DoCsFunction();
sl@0
   120
		}
sl@0
   121
	}
sl@0
   122
sl@0
   123
sl@0
   124
/** Acquires a fast mutex.
sl@0
   125
sl@0
   126
    This will block until the mutex is available, and causes
sl@0
   127
	the thread to enter an implicit critical section until the mutex is released.
sl@0
   128
sl@0
   129
	@param aMutex The fast mutex to acquire.
sl@0
   130
	
sl@0
   131
	@post The calling thread holds the mutex.
sl@0
   132
	
sl@0
   133
	@see NFastMutex::Wait()
sl@0
   134
	@see NKern::FMSignal()
sl@0
   135
sl@0
   136
	@pre No fast mutex can be held.
sl@0
   137
	@pre Call in a thread context.
sl@0
   138
	@pre Kernel must be unlocked
sl@0
   139
	@pre interrupts enabled
sl@0
   140
sl@0
   141
*/
sl@0
   142
EXPORT_C void NKern::FMWait(NFastMutex* aMutex)
sl@0
   143
	{
sl@0
   144
	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::FMWait");
sl@0
   145
	NKern::Lock();
sl@0
   146
	aMutex->Wait();
sl@0
   147
	NKern::Unlock();
sl@0
   148
	}
sl@0
   149
sl@0
   150
sl@0
   151
/** Releases a previously acquired fast mutex.
sl@0
   152
	
sl@0
   153
	@param aMutex The fast mutex to release.
sl@0
   154
	
sl@0
   155
	@pre The calling thread holds the mutex.
sl@0
   156
	
sl@0
   157
	@see NFastMutex::Signal()
sl@0
   158
	@see NKern::FMWait()
sl@0
   159
*/
sl@0
   160
EXPORT_C void NKern::FMSignal(NFastMutex* aMutex)
sl@0
   161
	{
sl@0
   162
	NKern::Lock();
sl@0
   163
	aMutex->Signal();
sl@0
   164
	NKern::Unlock();
sl@0
   165
	}
sl@0
   166
sl@0
   167
sl@0
   168
/** Acquires the System Lock.
sl@0
   169
sl@0
   170
    This will block until the mutex is available, and causes
sl@0
   171
	the thread to enter an implicit critical section until the mutex is released.
sl@0
   172
sl@0
   173
	@post System lock is held.
sl@0
   174
sl@0
   175
	@see NKern::UnlockSystem()
sl@0
   176
	@see NKern::FMWait()
sl@0
   177
sl@0
   178
	@pre No fast mutex can be held.
sl@0
   179
	@pre Call in a thread context.
sl@0
   180
	@pre Kernel must be unlocked
sl@0
   181
	@pre interrupts enabled
sl@0
   182
*/
sl@0
   183
EXPORT_C void NKern::LockSystem()
sl@0
   184
	{
sl@0
   185
	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::LockSystem");
sl@0
   186
	NKern::Lock();
sl@0
   187
	TheScheduler.iLock.Wait();
sl@0
   188
	NKern::Unlock();
sl@0
   189
	}
sl@0
   190
sl@0
   191
sl@0
   192
/** Releases the System Lock.
sl@0
   193
sl@0
   194
	@pre System lock must be held.
sl@0
   195
sl@0
   196
	@see NKern::LockSystem()
sl@0
   197
	@see NKern::FMSignal()
sl@0
   198
*/
sl@0
   199
EXPORT_C void NKern::UnlockSystem()
sl@0
   200
	{
sl@0
   201
	NKern::Lock();
sl@0
   202
	TheScheduler.iLock.Signal();
sl@0
   203
	NKern::Unlock();
sl@0
   204
	}
sl@0
   205
sl@0
   206
sl@0
   207
/** Temporarily releases a fast mutex if there is contention.
sl@0
   208
sl@0
   209
    If there is another thread attempting to acquire the mutex, the calling
sl@0
   210
	thread releases the mutex and then acquires it again.
sl@0
   211
	
sl@0
   212
	This is more efficient than the equivalent code:
sl@0
   213
	
sl@0
   214
	@code
sl@0
   215
	NKern::FMSignal();
sl@0
   216
	NKern::FMWait();
sl@0
   217
	@endcode
sl@0
   218
sl@0
   219
	@return	TRUE if the mutex was relinquished, FALSE if not.
sl@0
   220
sl@0
   221
	@pre	The mutex must be held.
sl@0
   222
sl@0
   223
	@post	The mutex is held.
sl@0
   224
*/
sl@0
   225
EXPORT_C TBool NKern::FMFlash(NFastMutex* aM)
sl@0
   226
	{
sl@0
   227
	__ASSERT_WITH_MESSAGE_DEBUG(aM->HeldByCurrentThread(),"The calling thread holds the mutex","NKern::FMFlash");
sl@0
   228
	TBool w = aM->iWaiting;
sl@0
   229
	if (w)
sl@0
   230
		{
sl@0
   231
		NKern::Lock();
sl@0
   232
		aM->Signal();
sl@0
   233
		NKern::PreemptionPoint();
sl@0
   234
		aM->Wait();
sl@0
   235
		NKern::Unlock();
sl@0
   236
		}
sl@0
   237
#ifdef BTRACE_FAST_MUTEX
sl@0
   238
	else
sl@0
   239
		{
sl@0
   240
		BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexFlash,aM);
sl@0
   241
		}
sl@0
   242
#endif
sl@0
   243
	return w;
sl@0
   244
	}
sl@0
   245
sl@0
   246
sl@0
   247
/** Temporarily releases the System Lock if there is contention.
sl@0
   248
sl@0
   249
    If there
sl@0
   250
	is another thread attempting to acquire the System lock, the calling
sl@0
   251
	thread releases the mutex and then acquires it again.
sl@0
   252
	
sl@0
   253
	This is more efficient than the equivalent code:
sl@0
   254
	
sl@0
   255
	@code
sl@0
   256
	NKern::UnlockSystem();
sl@0
   257
	NKern::LockSystem();
sl@0
   258
	@endcode
sl@0
   259
sl@0
   260
	Note that this can only allow higher priority threads to use the System
sl@0
   261
	lock as lower priority cannot cause contention on a fast mutex.
sl@0
   262
sl@0
   263
	@return	TRUE if the system lock was relinquished, FALSE if not.
sl@0
   264
sl@0
   265
	@pre	System lock must be held.
sl@0
   266
sl@0
   267
	@post	System lock is held.
sl@0
   268
sl@0
   269
	@see NKern::LockSystem()
sl@0
   270
	@see NKern::UnlockSystem()
sl@0
   271
*/
sl@0
   272
EXPORT_C TBool NKern::FlashSystem()
sl@0
   273
	{
sl@0
   274
	return NKern::FMFlash(&TheScheduler.iLock);
sl@0
   275
	}
sl@0
   276
#endif
sl@0
   277
sl@0
   278
sl@0
   279
/******************************************************************************
sl@0
   280
 * Fast semaphore
sl@0
   281
 ******************************************************************************/
sl@0
   282
sl@0
   283
/** Sets the owner of a fast semaphore.
sl@0
   284
sl@0
   285
	@param aThread The thread to own this semaphore. If aThread==0, then the
sl@0
   286
					owner is set to the current thread.
sl@0
   287
sl@0
   288
	@pre Kernel must be locked.
sl@0
   289
	@pre If changing ownership form one thread to another, the there must be no
sl@0
   290
		 pending signals or waits.
sl@0
   291
	@pre Call either in a thread or an IDFC context.
sl@0
   292
	
sl@0
   293
	@post Kernel is locked.
sl@0
   294
*/
sl@0
   295
EXPORT_C void NFastSemaphore::SetOwner(NThreadBase* aThread)
sl@0
   296
	{
sl@0
   297
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::SetOwner");		
sl@0
   298
	if(!aThread)
sl@0
   299
		aThread = TheScheduler.iCurrentThread;
sl@0
   300
	if(iOwningThread && iOwningThread!=aThread)
sl@0
   301
		{
sl@0
   302
		__NK_ASSERT_ALWAYS(!iCount);	// Can't change owner if iCount!=0
sl@0
   303
		}
sl@0
   304
	iOwningThread = aThread;
sl@0
   305
	}
sl@0
   306
sl@0
   307
sl@0
   308
#ifndef __FAST_SEM_MACHINE_CODED__
sl@0
   309
/** Waits on a fast semaphore.
sl@0
   310
sl@0
   311
    Decrements the signal count for the semaphore and
sl@0
   312
	removes the calling thread from the ready-list if the sempahore becomes
sl@0
   313
	unsignalled. Only the thread that owns a fast semaphore can wait on it.
sl@0
   314
	
sl@0
   315
	Note that this function does not block, it merely updates the NThread state,
sl@0
   316
	rescheduling will only occur when the kernel is unlocked. Generally threads
sl@0
   317
	would use NKern::FSWait() which manipulates the kernel lock for you.
sl@0
   318
sl@0
   319
	@pre The calling thread must own the semaphore.
sl@0
   320
	@pre No fast mutex can be held.
sl@0
   321
	@pre Kernel must be locked.
sl@0
   322
	
sl@0
   323
	@post Kernel is locked.
sl@0
   324
	
sl@0
   325
	@see NFastSemaphore::Signal()
sl@0
   326
	@see NKern::FSWait()
sl@0
   327
	@see NKern::Unlock()
sl@0
   328
 */
sl@0
   329
EXPORT_C void NFastSemaphore::Wait()
sl@0
   330
	{
sl@0
   331
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NO_FAST_MUTEX,"NFastSemaphore::Wait");		
sl@0
   332
	NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
   333
	__ASSERT_WITH_MESSAGE_ALWAYS(pC==iOwningThread,"The calling thread must own the semaphore","NFastSemaphore::Wait");
sl@0
   334
	if (--iCount<0)
sl@0
   335
		{
sl@0
   336
		pC->iNState=NThread::EWaitFastSemaphore;
sl@0
   337
		pC->iWaitObj=this;
sl@0
   338
		TheScheduler.Remove(pC);
sl@0
   339
		RescheduleNeeded();
sl@0
   340
		}
sl@0
   341
	}
sl@0
   342
sl@0
   343
sl@0
   344
/** Signals a fast semaphore.
sl@0
   345
sl@0
   346
    Increments the signal count of a fast semaphore by
sl@0
   347
	one and releases any waiting thread if the semphore becomes signalled.
sl@0
   348
	
sl@0
   349
	Note that a reschedule will not occur before this function returns, this will
sl@0
   350
	only take place when the kernel is unlocked. Generally threads
sl@0
   351
	would use NKern::FSSignal() which manipulates the kernel lock for you.
sl@0
   352
	
sl@0
   353
	@pre Kernel must be locked.
sl@0
   354
	@pre Call either in a thread or an IDFC context.
sl@0
   355
	
sl@0
   356
	@post Kernel is locked.
sl@0
   357
	
sl@0
   358
	@see NFastSemaphore::Wait()
sl@0
   359
	@see NKern::FSSignal()
sl@0
   360
	@see NKern::Unlock()
sl@0
   361
 */
sl@0
   362
EXPORT_C void NFastSemaphore::Signal()
sl@0
   363
	{
sl@0
   364
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::Signal");			
sl@0
   365
	if (++iCount<=0)
sl@0
   366
		{
sl@0
   367
		iOwningThread->iWaitObj=NULL;
sl@0
   368
		iOwningThread->CheckSuspendThenReady();
sl@0
   369
		}
sl@0
   370
	}
sl@0
   371
sl@0
   372
sl@0
   373
/** Signals a fast semaphore multiple times.
sl@0
   374
sl@0
   375
	@pre Kernel must be locked.
sl@0
   376
	@pre Call either in a thread or an IDFC context.
sl@0
   377
	
sl@0
   378
	@post Kernel is locked.
sl@0
   379
sl@0
   380
	@internalComponent	
sl@0
   381
 */
sl@0
   382
EXPORT_C void NFastSemaphore::SignalN(TInt aCount)
sl@0
   383
	{
sl@0
   384
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::SignalN");			
sl@0
   385
	__NK_ASSERT_DEBUG(aCount>=0);
sl@0
   386
	if (aCount>0 && iCount<0)
sl@0
   387
		{
sl@0
   388
		iOwningThread->iWaitObj=NULL;
sl@0
   389
		iOwningThread->CheckSuspendThenReady();
sl@0
   390
		}
sl@0
   391
	iCount+=aCount;
sl@0
   392
	}
sl@0
   393
sl@0
   394
sl@0
   395
/** Resets a fast semaphore.
sl@0
   396
sl@0
   397
	@pre Kernel must be locked.
sl@0
   398
	@pre Call either in a thread or an IDFC context.
sl@0
   399
	
sl@0
   400
	@post Kernel is locked.
sl@0
   401
sl@0
   402
	@internalComponent	
sl@0
   403
 */
sl@0
   404
EXPORT_C void NFastSemaphore::Reset()
sl@0
   405
	{
sl@0
   406
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::Reset");			
sl@0
   407
	if (iCount<0)
sl@0
   408
		{
sl@0
   409
		iOwningThread->iWaitObj=NULL;
sl@0
   410
		iOwningThread->CheckSuspendThenReady();
sl@0
   411
		}
sl@0
   412
	iCount=0;
sl@0
   413
	}
sl@0
   414
sl@0
   415
sl@0
   416
/** Cancels a wait on a fast semaphore.
sl@0
   417
sl@0
   418
	@pre Kernel must be locked.
sl@0
   419
	@pre Call either in a thread or an IDFC context.
sl@0
   420
	
sl@0
   421
	@post Kernel is locked.
sl@0
   422
sl@0
   423
	@internalComponent	
sl@0
   424
 */
sl@0
   425
void NFastSemaphore::WaitCancel()
sl@0
   426
	{
sl@0
   427
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::WaitCancel");			
sl@0
   428
	iCount=0;
sl@0
   429
	iOwningThread->iWaitObj=NULL;
sl@0
   430
	iOwningThread->CheckSuspendThenReady();
sl@0
   431
	}
sl@0
   432
sl@0
   433
sl@0
   434
/** Waits for a signal on the current thread's I/O semaphore.
sl@0
   435
sl@0
   436
	@pre No fast mutex can be held.
sl@0
   437
	@pre Call in a thread context.
sl@0
   438
	@pre Kernel must be unlocked
sl@0
   439
	@pre interrupts enabled
sl@0
   440
 */
sl@0
   441
EXPORT_C void NKern::WaitForAnyRequest()
sl@0
   442
	{
sl@0
   443
	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::WaitForAnyRequest");
sl@0
   444
	__KTRACE_OPT(KNKERN,DEBUGPRINT("WfAR"));
sl@0
   445
	NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
   446
	NKern::Lock();
sl@0
   447
	pC->iRequestSemaphore.Wait();
sl@0
   448
	NKern::Unlock();
sl@0
   449
	}
sl@0
   450
#endif
sl@0
   451
sl@0
   452
sl@0
   453
/** Sets the owner of a fast semaphore.
sl@0
   454
sl@0
   455
	@param aSem The semaphore to change ownership off.
sl@0
   456
	@param aThread The thread to own this semaphore. If aThread==0, then the
sl@0
   457
					owner is set to the current thread.
sl@0
   458
sl@0
   459
	@pre If changing ownership form one thread to another, the there must be no
sl@0
   460
		 pending signals or waits.
sl@0
   461
*/
sl@0
   462
EXPORT_C void NKern::FSSetOwner(NFastSemaphore* aSem,NThreadBase* aThread)
sl@0
   463
	{
sl@0
   464
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSetOwner %m %T",aSem,aThread));
sl@0
   465
	NKern::Lock();
sl@0
   466
	aSem->SetOwner(aThread);
sl@0
   467
	NKern::Unlock();
sl@0
   468
	}
sl@0
   469
sl@0
   470
/** Waits on a fast semaphore.
sl@0
   471
sl@0
   472
    Decrements the signal count for the semaphore
sl@0
   473
	and waits for a signal if the sempahore becomes unsignalled. Only the
sl@0
   474
	thread that owns a fast	semaphore can wait on it.
sl@0
   475
sl@0
   476
	@param aSem The semaphore to wait on.
sl@0
   477
	
sl@0
   478
	@pre The calling thread must own the semaphore.
sl@0
   479
	@pre No fast mutex can be held.
sl@0
   480
	
sl@0
   481
	@see NFastSemaphore::Wait()
sl@0
   482
*/
sl@0
   483
EXPORT_C void NKern::FSWait(NFastSemaphore* aSem)
sl@0
   484
	{
sl@0
   485
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSWait %m",aSem));
sl@0
   486
	NKern::Lock();
sl@0
   487
	aSem->Wait();
sl@0
   488
	NKern::Unlock();
sl@0
   489
	}
sl@0
   490
sl@0
   491
sl@0
   492
/** Signals a fast semaphore.
sl@0
   493
sl@0
   494
    Increments the signal count of a fast semaphore
sl@0
   495
	by one and releases any	waiting thread if the semphore becomes signalled.
sl@0
   496
	
sl@0
   497
	@param aSem The semaphore to signal.
sl@0
   498
sl@0
   499
	@see NKern::FSWait()
sl@0
   500
sl@0
   501
	@pre Interrupts must be enabled.
sl@0
   502
	@pre Do not call from an ISR
sl@0
   503
 */
sl@0
   504
EXPORT_C void NKern::FSSignal(NFastSemaphore* aSem)
sl@0
   505
	{
sl@0
   506
	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::FSSignal(NFastSemaphore*)");
sl@0
   507
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignal %m",aSem));
sl@0
   508
	NKern::Lock();
sl@0
   509
	aSem->Signal();
sl@0
   510
	NKern::Unlock();
sl@0
   511
	}
sl@0
   512
sl@0
   513
sl@0
   514
/** Atomically signals a fast semaphore and releases a fast mutex.
sl@0
   515
sl@0
   516
	Rescheduling only occurs after both synchronisation operations are complete.
sl@0
   517
	
sl@0
   518
	@param aSem The semaphore to signal.
sl@0
   519
	@param aMutex The mutex to release. If NULL, the System Lock is released
sl@0
   520
sl@0
   521
	@pre The calling thread must hold the mutex.
sl@0
   522
	
sl@0
   523
	@see NKern::FMSignal()
sl@0
   524
 */
sl@0
   525
EXPORT_C void NKern::FSSignal(NFastSemaphore* aSem, NFastMutex* aMutex)
sl@0
   526
	{
sl@0
   527
	if (!aMutex)
sl@0
   528
		aMutex=&TheScheduler.iLock;
sl@0
   529
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignal %m +FM %M",aSem,aMutex));
sl@0
   530
	NKern::Lock();
sl@0
   531
	aSem->Signal();
sl@0
   532
	aMutex->Signal();
sl@0
   533
	NKern::Unlock();
sl@0
   534
	}
sl@0
   535
sl@0
   536
sl@0
   537
/** Signals a fast semaphore multiple times.
sl@0
   538
sl@0
   539
    Increments the signal count of a
sl@0
   540
	fast semaphore by aCount and releases any waiting thread if the semphore
sl@0
   541
	becomes signalled.
sl@0
   542
	
sl@0
   543
	@param aSem The semaphore to signal.
sl@0
   544
	@param aCount The number of times to signal the semaphore.
sl@0
   545
sl@0
   546
	@see NKern::FSWait()
sl@0
   547
sl@0
   548
	@pre Interrupts must be enabled.
sl@0
   549
	@pre Do not call from an ISR
sl@0
   550
 */
sl@0
   551
EXPORT_C void NKern::FSSignalN(NFastSemaphore* aSem, TInt aCount)
sl@0
   552
	{
sl@0
   553
	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::FSSignalN(NFastSemaphore*, TInt)");
sl@0
   554
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignalN %m %d",aSem,aCount));
sl@0
   555
	NKern::Lock();
sl@0
   556
	aSem->SignalN(aCount);
sl@0
   557
	NKern::Unlock();
sl@0
   558
	}
sl@0
   559
sl@0
   560
sl@0
   561
/** Atomically signals a fast semaphore multiple times and releases a fast mutex.
sl@0
   562
sl@0
   563
	Rescheduling only occurs after both synchronisation operations are complete.
sl@0
   564
	
sl@0
   565
	@param aSem The semaphore to signal.
sl@0
   566
	@param aCount The number of times to signal the semaphore.
sl@0
   567
	@param aMutex The mutex to release. If NULL, the System Lock is released.
sl@0
   568
sl@0
   569
	@pre The calling thread must hold the mutex.
sl@0
   570
	
sl@0
   571
	@see NKern::FMSignal()
sl@0
   572
 */
sl@0
   573
EXPORT_C void NKern::FSSignalN(NFastSemaphore* aSem, TInt aCount, NFastMutex* aMutex)
sl@0
   574
	{
sl@0
   575
	if (!aMutex)
sl@0
   576
		aMutex=&TheScheduler.iLock;
sl@0
   577
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignalN %m %d + FM %M",aSem,aCount,aMutex));
sl@0
   578
	NKern::Lock();
sl@0
   579
	aSem->SignalN(aCount);
sl@0
   580
	aMutex->Signal();
sl@0
   581
	NKern::Unlock();
sl@0
   582
	}
sl@0
   583
sl@0
   584
sl@0
   585
/******************************************************************************
sl@0
   586
 * Thread
sl@0
   587
 ******************************************************************************/
sl@0
   588
sl@0
   589
#ifndef __SCHEDULER_MACHINE_CODED__
sl@0
   590
/** Makes a nanothread ready provided that it is not explicitly suspended.
sl@0
   591
	
sl@0
   592
	For use by RTOS personality layers.
sl@0
   593
sl@0
   594
	@pre	Kernel must be locked.
sl@0
   595
	@pre	Call either in a thread or an IDFC context.
sl@0
   596
sl@0
   597
	@post	Kernel is locked.
sl@0
   598
 */
sl@0
   599
EXPORT_C void NThreadBase::CheckSuspendThenReady()
sl@0
   600
	{
sl@0
   601
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::CheckSuspendThenReady");	
sl@0
   602
	if (iSuspendCount==0)
sl@0
   603
		Ready();
sl@0
   604
	else
sl@0
   605
		iNState=ESuspended;
sl@0
   606
	}
sl@0
   607
sl@0
   608
/** Makes a nanothread ready.
sl@0
   609
	
sl@0
   610
	For use by RTOS personality layers.
sl@0
   611
sl@0
   612
	@pre	Kernel must be locked.
sl@0
   613
	@pre	Call either in a thread or an IDFC context.
sl@0
   614
	@pre	The thread being made ready must not be explicitly suspended
sl@0
   615
	
sl@0
   616
	@post	Kernel is locked.
sl@0
   617
 */
sl@0
   618
EXPORT_C void NThreadBase::Ready()
sl@0
   619
	{
sl@0
   620
#ifdef _DEBUG
sl@0
   621
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::Ready");	
sl@0
   622
	__ASSERT_WITH_MESSAGE_DEBUG(iSuspendCount==0,"The thread being made ready must not be explicitly suspended","NThreadBase::Ready");
sl@0
   623
sl@0
   624
	if (DEBUGNUM(KCRAZYSCHEDDELAY) && iPriority && TheTimerQ.iMsCount)
sl@0
   625
		{
sl@0
   626
		// Delay this thread, unless it's already on the delayed queue
sl@0
   627
		if ((i_ThrdAttr & KThreadAttDelayed) == 0)
sl@0
   628
			{
sl@0
   629
			i_ThrdAttr |= KThreadAttDelayed;
sl@0
   630
			TheScheduler.iDelayedQ.Add(this);
sl@0
   631
			}
sl@0
   632
		}
sl@0
   633
	else
sl@0
   634
		{
sl@0
   635
		// Delayed scheduler off
sl@0
   636
		// or idle thread, or the tick hasn't started yet
sl@0
   637
		DoReady();
sl@0
   638
		}
sl@0
   639
#else
sl@0
   640
	DoReady();
sl@0
   641
#endif
sl@0
   642
	}
sl@0
   643
sl@0
   644
void NThreadBase::DoReady()
sl@0
   645
	{
sl@0
   646
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::DoReady");	
sl@0
   647
	__ASSERT_WITH_MESSAGE_DEBUG(iSuspendCount==0,"The thread being made ready must not be explicitly suspended","NThreadBase::DoReady");
sl@0
   648
sl@0
   649
	TScheduler& s=TheScheduler;
sl@0
   650
	TInt p=iPriority;
sl@0
   651
//	__KTRACE_OPT(KSCHED,Kern::Printf("Ready(%O), priority %d status %d",this,p,iStatus));
sl@0
   652
	if (iNState==EDead)
sl@0
   653
		return;
sl@0
   654
	s.Add(this);
sl@0
   655
	iNState=EReady;
sl@0
   656
	if (!(s>p))	// s>p <=> highest ready priority > our priority so no preemption
sl@0
   657
		{
sl@0
   658
		// if no other thread at this priority or first thread at this priority has used its timeslice, reschedule
sl@0
   659
		// note iNext points to first thread at this priority since we got added to the end
sl@0
   660
		if (iNext==this || ((NThreadBase*)iNext)->iTime==0)
sl@0
   661
			RescheduleNeeded();
sl@0
   662
		}
sl@0
   663
	}
sl@0
   664
#endif
sl@0
   665
sl@0
   666
void NThreadBase::DoCsFunction()
sl@0
   667
	{
sl@0
   668
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::DoCsFunction %T %d",this,iCsFunction));
sl@0
   669
	TInt f=iCsFunction;
sl@0
   670
	iCsFunction=0;
sl@0
   671
	if (f>0)
sl@0
   672
		{
sl@0
   673
		// suspend this thread f times
sl@0
   674
		Suspend(f);
sl@0
   675
		return;
sl@0
   676
		}
sl@0
   677
	if (f==ECSExitPending)
sl@0
   678
		{
sl@0
   679
		// We need to exit now
sl@0
   680
		Exit();	// this won't return
sl@0
   681
		}
sl@0
   682
	UnknownState(ELeaveCS,f);	// call into RTOS personality
sl@0
   683
	}
sl@0
   684
sl@0
   685
sl@0
   686
/** Suspends a nanothread the specified number of times.
sl@0
   687
	
sl@0
   688
	For use by RTOS personality layers.
sl@0
   689
	Do not use this function directly on a Symbian OS thread.
sl@0
   690
	Since the kernel is locked on entry, any reschedule will be deferred until
sl@0
   691
	it is unlocked.
sl@0
   692
	The suspension will be deferred if the target thread is currently in a
sl@0
   693
	critical section; in this case the suspension will take effect when it exits
sl@0
   694
	the critical section.
sl@0
   695
	The thread's unknown state handler will be invoked with function ESuspend and
sl@0
   696
	parameter aCount if the current NState is not recognised and it is not in a
sl@0
   697
	critical section.
sl@0
   698
sl@0
   699
	@param	aCount = the number of times to suspend.
sl@0
   700
	@return	TRUE, if the suspension has taken immediate effect;
sl@0
   701
			FALSE, if the thread is in a critical section or is already suspended.
sl@0
   702
	
sl@0
   703
	@pre	Kernel must be locked.
sl@0
   704
	@pre	Call in a thread context.
sl@0
   705
	
sl@0
   706
	@post	Kernel is locked.
sl@0
   707
 */
sl@0
   708
EXPORT_C TBool NThreadBase::Suspend(TInt aCount)
sl@0
   709
	{
sl@0
   710
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NThreadBase::Suspend");		
sl@0
   711
	// If thread is executing a critical section, we must defer the suspend
sl@0
   712
	if (iNState==EDead)
sl@0
   713
		return FALSE;		// already dead so suspension is a no-op
sl@0
   714
	if (iCsCount || iHeldFastMutex)
sl@0
   715
		{
sl@0
   716
		__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Suspend %T (CSF %d) %d",this,iCsFunction,aCount));
sl@0
   717
		if (iCsFunction>=0)			// -ve means thread is about to exit
sl@0
   718
			{
sl@0
   719
			iCsFunction+=aCount;	// so thread will suspend itself when it leaves the critical section
sl@0
   720
			if (iHeldFastMutex && iCsCount==0)
sl@0
   721
				iHeldFastMutex->iWaiting=1;
sl@0
   722
			}
sl@0
   723
		return FALSE;
sl@0
   724
		}
sl@0
   725
sl@0
   726
	// thread not in critical section, so suspend it
sl@0
   727
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Suspend %T (NState %d) %d",this,iNState,aCount));
sl@0
   728
	switch (iNState)
sl@0
   729
		{
sl@0
   730
		case EReady:
sl@0
   731
			TheScheduler.Remove(this);
sl@0
   732
			RescheduleNeeded();
sl@0
   733
			iNState=ESuspended;
sl@0
   734
		case EWaitFastSemaphore:
sl@0
   735
		case EWaitDfc:
sl@0
   736
		case ESleep:
sl@0
   737
		case EBlocked:
sl@0
   738
		case ESuspended:
sl@0
   739
			break;
sl@0
   740
		default:
sl@0
   741
			UnknownState(ESuspend,aCount);
sl@0
   742
			break;
sl@0
   743
		}
sl@0
   744
	TInt old_suspend=iSuspendCount;
sl@0
   745
	iSuspendCount-=aCount;
sl@0
   746
	return (old_suspend==0);	// return TRUE if thread has changed from not-suspended to suspended.
sl@0
   747
	}
sl@0
   748
sl@0
   749
sl@0
   750
/** Resumes a nanothread, cancelling one suspension.
sl@0
   751
	
sl@0
   752
	For use by RTOS personality layers.
sl@0
   753
	Do not use this function directly on a Symbian OS thread.
sl@0
   754
	Since the kernel is locked on entry, any reschedule will be deferred until
sl@0
   755
	it is unlocked.
sl@0
   756
	If the target thread is currently in a critical section this will simply
sl@0
   757
	cancel one deferred suspension.
sl@0
   758
	The thread's unknown state handler will be invoked with function EResume if
sl@0
   759
	the current NState is not recognised and it is not in a	critical section.
sl@0
   760
sl@0
   761
	@return	TRUE, if the resumption has taken immediate effect;
sl@0
   762
			FALSE, if the thread is in a critical section or is still suspended.
sl@0
   763
	
sl@0
   764
	@pre	Kernel must be locked.
sl@0
   765
	@pre	Call either in a thread or an IDFC context.
sl@0
   766
	
sl@0
   767
	@post	Kernel must be locked.
sl@0
   768
 */
sl@0
   769
EXPORT_C TBool NThreadBase::Resume()
sl@0
   770
	{
sl@0
   771
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NThreadBase::Resume");		
sl@0
   772
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Resume %T, state %d CSC %d CSF %d",this,iNState,iCsCount,iCsFunction));
sl@0
   773
	if (iNState==EDead)
sl@0
   774
		return FALSE;
sl@0
   775
sl@0
   776
	// If thread is in critical section, just cancel deferred suspends
sl@0
   777
	if (iCsCount || iHeldFastMutex)
sl@0
   778
		{
sl@0
   779
		if (iCsFunction>0)
sl@0
   780
			--iCsFunction;	// one less deferred suspension
sl@0
   781
		return FALSE;
sl@0
   782
		}
sl@0
   783
	if (iSuspendCount<0 && ++iSuspendCount==0)
sl@0
   784
		{
sl@0
   785
		switch (iNState)
sl@0
   786
			{
sl@0
   787
			case ESuspended:
sl@0
   788
				Ready();
sl@0
   789
			case EReady:
sl@0
   790
			case EWaitFastSemaphore:
sl@0
   791
			case EWaitDfc:
sl@0
   792
			case ESleep:
sl@0
   793
			case EBlocked:
sl@0
   794
				break;
sl@0
   795
			default:
sl@0
   796
				UnknownState(EResume,0);
sl@0
   797
				break;
sl@0
   798
			}
sl@0
   799
		return TRUE;	// thread has changed from suspended to not-suspended
sl@0
   800
		}
sl@0
   801
	return FALSE;	// still suspended or not initially suspended so no higher level action required
sl@0
   802
	}
sl@0
   803
sl@0
   804
sl@0
   805
/** Resumes a nanothread, cancelling all outstanding suspensions.
sl@0
   806
	
sl@0
   807
	For use by RTOS personality layers.
sl@0
   808
	Do not use this function directly on a Symbian OS thread.
sl@0
   809
	Since the kernel is locked on entry, any reschedule will be deferred until
sl@0
   810
	it is unlocked.
sl@0
   811
	If the target thread is currently in a critical section this will simply
sl@0
   812
	cancel all deferred suspensions.
sl@0
   813
	The thread's unknown state handler will be invoked with function EForceResume
sl@0
   814
	if the current NState is not recognised and it is not in a	critical section.
sl@0
   815
sl@0
   816
	@return	TRUE, if the resumption has taken immediate effect;
sl@0
   817
			FALSE, if the thread is in a critical section.
sl@0
   818
sl@0
   819
	@pre	Kernel must be locked.
sl@0
   820
	@pre	Call either in a thread or an IDFC context.
sl@0
   821
sl@0
   822
	@post	Kernel is locked.
sl@0
   823
 */
sl@0
   824
EXPORT_C TBool NThreadBase::ForceResume()
sl@0
   825
	{
sl@0
   826
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::ForceResume");		
sl@0
   827
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::ForceResume %T, state %d CSC %d CSF %d",this,iNState,iCsCount,iCsFunction));
sl@0
   828
	if (iNState==EDead)
sl@0
   829
		return FALSE;
sl@0
   830
sl@0
   831
	// If thread is in critical section, just cancel deferred suspends
sl@0
   832
	if (iCsCount || iHeldFastMutex)
sl@0
   833
		{
sl@0
   834
		if (iCsFunction>0)
sl@0
   835
			iCsFunction=0;	// cancel all deferred suspensions
sl@0
   836
		return FALSE;
sl@0
   837
		}
sl@0
   838
	if (iSuspendCount<0)
sl@0
   839
		{
sl@0
   840
		iSuspendCount=0;
sl@0
   841
		switch (iNState)
sl@0
   842
			{
sl@0
   843
			case ESuspended:
sl@0
   844
				Ready();
sl@0
   845
			case EReady:
sl@0
   846
			case EWaitFastSemaphore:
sl@0
   847
			case EWaitDfc:
sl@0
   848
			case ESleep:
sl@0
   849
			case EBlocked:
sl@0
   850
			case EDead:
sl@0
   851
				break;
sl@0
   852
			default:
sl@0
   853
				UnknownState(EForceResume,0);
sl@0
   854
				break;
sl@0
   855
			}
sl@0
   856
		}
sl@0
   857
	return TRUE;
sl@0
   858
	}
sl@0
   859
sl@0
   860
sl@0
   861
/** Releases a waiting nanokernel thread.
sl@0
   862
sl@0
   863
	For use by RTOS personality layers.
sl@0
   864
	Do not use this function directly on a Symbian OS thread.
sl@0
   865
	This function should make the thread ready (provided it is not explicitly
sl@0
   866
	suspended) and cancel any wait timeout. It should also remove it from any
sl@0
   867
	wait queues.
sl@0
   868
	If aReturnCode is nonnegative it indicates normal completion of the wait.
sl@0
   869
	If aReturnCode is negative it indicates early/abnormal completion of the
sl@0
   870
	wait and so any wait object should be reverted as if the wait had never
sl@0
   871
	occurred (eg semaphore count should be incremented as this thread has not
sl@0
   872
	actually acquired the semaphore).
sl@0
   873
	The thread's unknown state handler will be invoked with function ERelease
sl@0
   874
	and parameter aReturnCode if the current NState is not recognised.
sl@0
   875
	
sl@0
   876
	@param aReturnCode	The reason code for release.
sl@0
   877
sl@0
   878
	@pre	Kernel must be locked.
sl@0
   879
	@pre	Call either in a thread or an IDFC context.
sl@0
   880
	
sl@0
   881
	@post	Kernel is locked.
sl@0
   882
 */
sl@0
   883
EXPORT_C void NThreadBase::Release(TInt aReturnCode)
sl@0
   884
	{
sl@0
   885
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::Release");		
sl@0
   886
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Release %T, state %d retcode %d",this,iNState,aReturnCode));
sl@0
   887
	switch(iNState)
sl@0
   888
		{
sl@0
   889
		case EDead:
sl@0
   890
			return;
sl@0
   891
		case EReady:
sl@0
   892
		case ESuspended:
sl@0
   893
			// don't release explicit suspensions
sl@0
   894
			break;
sl@0
   895
		case EWaitFastSemaphore:
sl@0
   896
			if (aReturnCode<0 && iWaitObj)
sl@0
   897
				((NFastSemaphore*)iWaitObj)->WaitCancel();
sl@0
   898
			break;
sl@0
   899
		case ESleep:
sl@0
   900
		case EBlocked:
sl@0
   901
		case EWaitDfc:
sl@0
   902
			CheckSuspendThenReady();
sl@0
   903
			break;
sl@0
   904
		default:
sl@0
   905
			UnknownState(ERelease,aReturnCode);
sl@0
   906
			break;
sl@0
   907
		}
sl@0
   908
	if (iTimer.iUserFlags)
sl@0
   909
		{
sl@0
   910
		if (iTimer.iState == NTimer::EIdle)
sl@0
   911
			{
sl@0
   912
			// Potential race condition - timer must have completed but expiry
sl@0
   913
			// handler has not yet run. Signal to the handler that it should do
sl@0
   914
			// nothing by flipping the bottom bit of iTimer.iPtr
sl@0
   915
			// This condition cannot possibly recur until the expiry handler has
sl@0
   916
			// run since all expiry handlers run in DfcThread1.
sl@0
   917
			TLinAddr& x = *(TLinAddr*)&iTimer.iPtr;
sl@0
   918
			x ^= 1;
sl@0
   919
			}
sl@0
   920
		iTimer.Cancel();
sl@0
   921
		iTimer.iUserFlags = FALSE;
sl@0
   922
		}
sl@0
   923
	iWaitObj=NULL;
sl@0
   924
	iReturnValue=aReturnCode;
sl@0
   925
	}
sl@0
   926
sl@0
   927
sl@0
   928
/** Signals a nanokernel thread's request semaphore.
sl@0
   929
sl@0
   930
	This can also be used on Symbian OS threads.
sl@0
   931
	
sl@0
   932
	@pre	Kernel must be locked.
sl@0
   933
	@pre	Call either in a thread or an IDFC context.
sl@0
   934
	
sl@0
   935
	@post	Kernel is locked.
sl@0
   936
 */
sl@0
   937
EXPORT_C void NThreadBase::RequestSignal()
sl@0
   938
	{
sl@0
   939
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::RequestSignal");		
sl@0
   940
	iRequestSemaphore.Signal();
sl@0
   941
	}
sl@0
   942
sl@0
   943
void NThreadBase::TimerExpired(TAny* aPtr)
sl@0
   944
	{
sl@0
   945
	TLinAddr cookie = (TLinAddr)aPtr;
sl@0
   946
	NThread* pT = (NThread*)(cookie &~ 3);
sl@0
   947
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::TimerExpired %T, state %d",pT,pT->iNState));
sl@0
   948
	NThreadTimeoutHandler th = pT->iHandlers->iTimeoutHandler;
sl@0
   949
	NKern::Lock();
sl@0
   950
	if (pT->iNState<ENumNStates && pT->iNState!=EBlocked)
sl@0
   951
		th = NULL;
sl@0
   952
	if (th)
sl@0
   953
		{
sl@0
   954
		// Use higher level timeout handler
sl@0
   955
		NKern::Unlock();
sl@0
   956
		(*th)(pT, ETimeoutPreamble);
sl@0
   957
		TInt param = ETimeoutPostamble;
sl@0
   958
		NKern::Lock();
sl@0
   959
		TLinAddr current_cookie = (TLinAddr)pT->iTimer.iPtr;
sl@0
   960
		if ((cookie ^ current_cookie) & 1)
sl@0
   961
			{
sl@0
   962
			// The timer was cancelled just after expiring but before this function
sl@0
   963
			// managed to call NKern::Lock(), so it's spurious
sl@0
   964
			param = ETimeoutSpurious;
sl@0
   965
			}
sl@0
   966
		else
sl@0
   967
			pT->iTimer.iUserFlags = FALSE;
sl@0
   968
		NKern::Unlock();
sl@0
   969
		(*th)(pT, param);
sl@0
   970
		return;
sl@0
   971
		}
sl@0
   972
	TLinAddr current_cookie = (TLinAddr)pT->iTimer.iPtr;
sl@0
   973
	if ((cookie ^ current_cookie) & 1)
sl@0
   974
		{
sl@0
   975
		// The timer was cancelled just after expiring but before this function
sl@0
   976
		// managed to call NKern::Lock(), so just return without doing anything.
sl@0
   977
		NKern::Unlock();
sl@0
   978
		return;
sl@0
   979
		}
sl@0
   980
	pT->iTimer.iUserFlags = FALSE;
sl@0
   981
	switch(pT->iNState)
sl@0
   982
		{
sl@0
   983
		case EDead:
sl@0
   984
		case EReady:
sl@0
   985
		case ESuspended:
sl@0
   986
			NKern::Unlock();
sl@0
   987
			return;
sl@0
   988
		case EWaitFastSemaphore:
sl@0
   989
			((NFastSemaphore*)pT->iWaitObj)->WaitCancel();
sl@0
   990
			break;
sl@0
   991
		case EBlocked:
sl@0
   992
		case ESleep:
sl@0
   993
		case EWaitDfc:
sl@0
   994
			pT->CheckSuspendThenReady();
sl@0
   995
			break;
sl@0
   996
		default:
sl@0
   997
			pT->UnknownState(ETimeout,0);
sl@0
   998
			break;
sl@0
   999
		}
sl@0
  1000
	pT->iWaitObj=NULL;
sl@0
  1001
	pT->iReturnValue=KErrTimedOut;
sl@0
  1002
	NKern::Unlock();
sl@0
  1003
	}
sl@0
  1004
sl@0
  1005
sl@0
  1006
/** Changes the priority of a nanokernel thread.
sl@0
  1007
sl@0
  1008
	For use by RTOS personality layers.
sl@0
  1009
	Do not use this function directly on a Symbian OS thread.
sl@0
  1010
sl@0
  1011
	The thread's unknown state handler will be invoked with function EChangePriority
sl@0
  1012
	and parameter newp if the current NState is not recognised and the new priority
sl@0
  1013
	is not equal to the original priority.
sl@0
  1014
	
sl@0
  1015
	@param	newp  The new nanokernel priority (0 <= newp < KNumPriorities).
sl@0
  1016
sl@0
  1017
	@pre	Kernel must be locked.
sl@0
  1018
	@pre	Call in a thread context.
sl@0
  1019
	
sl@0
  1020
	@post	Kernel is locked.
sl@0
  1021
 */
sl@0
  1022
EXPORT_C void NThreadBase::SetPriority(TInt newp)
sl@0
  1023
	{
sl@0
  1024
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::SetPriority");		
sl@0
  1025
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::SetPriority %T %d->%d, state %d",this,iPriority,newp,iNState));
sl@0
  1026
#ifdef _DEBUG
sl@0
  1027
	// When the crazy scheduler is active, refuse to set any priority higher than 1
sl@0
  1028
	if (KCrazySchedulerEnabled() && newp>1)
sl@0
  1029
		newp=1;
sl@0
  1030
#endif
sl@0
  1031
	if (newp==iPriority)
sl@0
  1032
		return;
sl@0
  1033
#ifdef BTRACE_THREAD_PRIORITY
sl@0
  1034
	BTrace8(BTrace::EThreadPriority,BTrace::ENThreadPriority,this,newp);
sl@0
  1035
#endif
sl@0
  1036
	switch(iNState)
sl@0
  1037
		{
sl@0
  1038
		case EReady:
sl@0
  1039
			{
sl@0
  1040
			TInt oldp=iPriority;
sl@0
  1041
			TheScheduler.ChangePriority(this,newp);
sl@0
  1042
			NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
  1043
			if (this==pC)
sl@0
  1044
				{
sl@0
  1045
				if (newp<oldp && (TheScheduler>newp || !TPriListLink::Alone()))	// can't have scheduler<newp
sl@0
  1046
					RescheduleNeeded();
sl@0
  1047
				}
sl@0
  1048
			else if (newp>oldp)
sl@0
  1049
				{
sl@0
  1050
				TInt cp=pC->iPriority;
sl@0
  1051
				if (newp>cp)
sl@0
  1052
					RescheduleNeeded();
sl@0
  1053
				else if (newp==cp && pC->iTime==0)
sl@0
  1054
					{
sl@0
  1055
					if (pC->iHeldFastMutex)
sl@0
  1056
						pC->iHeldFastMutex->iWaiting=1;	// don't round-robin now, wait until fast mutex released
sl@0
  1057
					else
sl@0
  1058
						RescheduleNeeded();
sl@0
  1059
					}
sl@0
  1060
				}
sl@0
  1061
			break;
sl@0
  1062
			}
sl@0
  1063
		case ESuspended:
sl@0
  1064
		case EWaitFastSemaphore:
sl@0
  1065
		case EWaitDfc:
sl@0
  1066
		case ESleep:
sl@0
  1067
		case EBlocked:
sl@0
  1068
		case EDead:
sl@0
  1069
			iPriority=TUint8(newp);
sl@0
  1070
			break;
sl@0
  1071
		default:
sl@0
  1072
			UnknownState(EChangePriority,newp);
sl@0
  1073
			break;
sl@0
  1074
		}
sl@0
  1075
	}
sl@0
  1076
sl@0
  1077
void NThreadBase::Exit()
sl@0
  1078
	{
sl@0
  1079
	// The current thread is exiting
sl@0
  1080
	// Enter with kernel locked, don't return
sl@0
  1081
	__NK_ASSERT_DEBUG(this==TheScheduler.iCurrentThread);
sl@0
  1082
sl@0
  1083
	OnExit();
sl@0
  1084
sl@0
  1085
	TInt threadCS=iCsCount;
sl@0
  1086
	TInt kernCS=TheScheduler.iKernCSLocked;
sl@0
  1087
	iCsCount=1;
sl@0
  1088
	iCsFunction=ECSExitInProgress;
sl@0
  1089
	NKern::Unlock();
sl@0
  1090
	__KTRACE_OPT(KSCHED,DEBUGPRINT("Exit %T %u",this,NTickCount()));
sl@0
  1091
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Exit %T, CSC %d HeldFM %M KernCS %d",this,threadCS,iHeldFastMutex,kernCS));
sl@0
  1092
	if (kernCS!=1)
sl@0
  1093
		FAULT();
sl@0
  1094
	if (iHeldFastMutex)
sl@0
  1095
		FAULT();
sl@0
  1096
	if (threadCS)
sl@0
  1097
		FAULT();
sl@0
  1098
	TDfc* pD=NULL;
sl@0
  1099
	NThreadExitHandler xh = iHandlers->iExitHandler;
sl@0
  1100
	if (xh)
sl@0
  1101
		pD=(*xh)((NThread*)this);		// call exit handler
sl@0
  1102
	NKern::Lock();
sl@0
  1103
	if (pD)
sl@0
  1104
		pD->DoEnque();
sl@0
  1105
	iNState=EDead;
sl@0
  1106
	TheScheduler.Remove(this);
sl@0
  1107
	RescheduleNeeded();
sl@0
  1108
#ifdef BTRACE_THREAD_IDENTIFICATION
sl@0
  1109
	BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadDestroy,this);
sl@0
  1110
#endif
sl@0
  1111
	__NK_ASSERT_ALWAYS(iCsFunction == ECSExitInProgress);
sl@0
  1112
	TScheduler::Reschedule();	// this won't return
sl@0
  1113
	FAULT();
sl@0
  1114
	}
sl@0
  1115
sl@0
  1116
sl@0
  1117
/** Kills a nanokernel thread.
sl@0
  1118
sl@0
  1119
	For use by RTOS personality layers.
sl@0
  1120
	Do not use this function directly on a Symbian OS thread.
sl@0
  1121
sl@0
  1122
	When acting on the calling thread, causes the calling thread to exit.
sl@0
  1123
sl@0
  1124
	When acting on another thread, causes that thread to exit unless it is
sl@0
  1125
	currently in a critical section. In this case the thread is marked as
sl@0
  1126
	"exit pending" and will exit as soon as it leaves the critical section.
sl@0
  1127
sl@0
  1128
	In either case the exiting thread first invokes its exit handler (if it
sl@0
  1129
	exists). The handler runs with preemption enabled and with the thread in a
sl@0
  1130
	critical section so that it may not be suspended or killed again. The
sl@0
  1131
	handler may return a pointer to a TDfc, which will be enqueued just before
sl@0
  1132
	the thread finally terminates (after the kernel has been relocked). This DFC
sl@0
  1133
	will therefore execute once the NThread has been safely removed from the
sl@0
  1134
	scheduler and is intended to be used to cleanup the NThread object and any
sl@0
  1135
	associated personality layer resources.
sl@0
  1136
	
sl@0
  1137
	@pre	Kernel must be locked.
sl@0
  1138
	@pre	Call in a thread context.
sl@0
  1139
	@pre	If acting on calling thread, calling thread must not be in a
sl@0
  1140
			critical section; if it is the kernel will fault. Also, the kernel
sl@0
  1141
			must be locked exactly once (iKernCSLocked = 1).
sl@0
  1142
	
sl@0
  1143
	@post	Kernel is locked, if not acting on calling thread.
sl@0
  1144
	@post	Does not return if it acts on the calling thread.
sl@0
  1145
 */
sl@0
  1146
EXPORT_C void NThreadBase::Kill()
sl@0
  1147
	{
sl@0
  1148
	// Kill a thread
sl@0
  1149
	// Enter with kernel locked
sl@0
  1150
	// Exit with kernel locked if not current thread, otherwise does not return
sl@0
  1151
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED_ONCE|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::Kill");
sl@0
  1152
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Kill %T, state %d CSC %d HeldFM %M",this,iNState,iCsCount,iHeldFastMutex));
sl@0
  1153
	OnKill(); // platform-specific hook
sl@0
  1154
	NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
  1155
	if (this==pC)
sl@0
  1156
		{
sl@0
  1157
		if (iCsFunction==ECSExitInProgress)
sl@0
  1158
			FAULT();
sl@0
  1159
		Exit();				// this will not return
sl@0
  1160
		}
sl@0
  1161
	if (iCsCount || iHeldFastMutex)
sl@0
  1162
		{
sl@0
  1163
		__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Kill %T deferred",this));
sl@0
  1164
		if (iCsFunction<0)
sl@0
  1165
			return;			// thread is already exiting
sl@0
  1166
		iCsFunction=ECSExitPending;		// zap any suspensions pending
sl@0
  1167
		if (iHeldFastMutex && iCsCount==0)
sl@0
  1168
			iHeldFastMutex->iWaiting=1;
sl@0
  1169
		return;
sl@0
  1170
		}
sl@0
  1171
sl@0
  1172
	// thread is not in critical section
sl@0
  1173
	// make the thread divert to Exit() when it next runs
sl@0
  1174
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Kill diverting %T",this));
sl@0
  1175
	Release(KErrDied);		// cancel any waits on semaphores etc.
sl@0
  1176
	ForceResume();			// release any suspensions
sl@0
  1177
	iWaitFastMutex=NULL;	// if thread was waiting for a fast mutex it needn't bother
sl@0
  1178
	iCsCount=1;				// stop anyone suspending the thread
sl@0
  1179
	iCsFunction=ECSExitPending;
sl@0
  1180
	ForceExit();			// get thread to call Exit when it is next scheduled
sl@0
  1181
	}
sl@0
  1182
sl@0
  1183
sl@0
  1184
/** Suspends the execution of a thread.
sl@0
  1185
sl@0
  1186
	This function is intended to be used by the EPOC layer and personality layers.
sl@0
  1187
	Do not use this function directly on a Symbian OS thread - use Kern::ThreadSuspend().
sl@0
  1188
sl@0
  1189
    If the thread is in a critical section or holds a fast mutex, the suspension will
sl@0
  1190
    be deferred until the thread leaves the critical section or signals the fast mutex.
sl@0
  1191
    Otherwise the thread will be suspended with immediate effect. If the thread it's
sl@0
  1192
    running, the execution of the thread will be suspended and a reschedule will occur.
sl@0
  1193
sl@0
  1194
    @param aThread Thread to be suspended.
sl@0
  1195
    @param aCount  Number of times to suspend this thread.
sl@0
  1196
    
sl@0
  1197
    @return TRUE, if the thread had changed the state from non-suspended to suspended;
sl@0
  1198
	        FALSE, otherwise.
sl@0
  1199
	     
sl@0
  1200
	@see Kern::ThreadSuspend()
sl@0
  1201
*/
sl@0
  1202
EXPORT_C TBool NKern::ThreadSuspend(NThread* aThread, TInt aCount)
sl@0
  1203
	{	
sl@0
  1204
	NKern::Lock();
sl@0
  1205
	TBool r=aThread->Suspend(aCount);
sl@0
  1206
	NKern::Unlock();
sl@0
  1207
	return r;
sl@0
  1208
	}
sl@0
  1209
sl@0
  1210
sl@0
  1211
/** Resumes the execution of a thread.
sl@0
  1212
sl@0
  1213
	This function is intended to be used by the EPOC layer and personality layers.
sl@0
  1214
	Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
sl@0
  1215
sl@0
  1216
    This function resumes the thread once. If the thread was suspended more than once
sl@0
  1217
    the thread will remain suspended.
sl@0
  1218
    If the thread is in a critical section, this function will decrease the number of
sl@0
  1219
    deferred suspensions.
sl@0
  1220
sl@0
  1221
    @param aThread Thread to be resumed.
sl@0
  1222
    
sl@0
  1223
    @return TRUE, if the thread had changed the state from suspended to non-suspended;
sl@0
  1224
            FALSE, otherwise.
sl@0
  1225
            
sl@0
  1226
	@see Kern::ThreadResume()
sl@0
  1227
*/
sl@0
  1228
EXPORT_C TBool NKern::ThreadResume(NThread* aThread)
sl@0
  1229
	{	
sl@0
  1230
	NKern::Lock();
sl@0
  1231
	TBool r=aThread->Resume();
sl@0
  1232
	NKern::Unlock();
sl@0
  1233
	return r;
sl@0
  1234
	}
sl@0
  1235
sl@0
  1236
sl@0
  1237
/** Resumes the execution of a thread and signals a mutex.
sl@0
  1238
sl@0
  1239
	This function is intended to be used by the EPOC layer and personality layers.
sl@0
  1240
	Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
sl@0
  1241
sl@0
  1242
    This function resumes the thread once. If the thread was suspended more than once
sl@0
  1243
    the thread will remain suspended.
sl@0
  1244
    If the thread is in a critical section, this function will decrease the number of
sl@0
  1245
    deferred suspensions.
sl@0
  1246
sl@0
  1247
    @param aThread Thread to be resumed.
sl@0
  1248
    @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
sl@0
  1249
sl@0
  1250
    @return TRUE, if the thread had changed the state from suspended to non-suspended;
sl@0
  1251
            FALSE, otherwise.
sl@0
  1252
           
sl@0
  1253
	@see Kern::ThreadResume()
sl@0
  1254
*/
sl@0
  1255
EXPORT_C TBool NKern::ThreadResume(NThread* aThread, NFastMutex* aMutex)
sl@0
  1256
	{
sl@0
  1257
	if (!aMutex)
sl@0
  1258
		aMutex=&TheScheduler.iLock;
sl@0
  1259
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadResume %T + FM %M",aThread,aMutex));
sl@0
  1260
	NKern::Lock();
sl@0
  1261
	TBool r=aThread->Resume();
sl@0
  1262
	aMutex->Signal();
sl@0
  1263
	NKern::Unlock();
sl@0
  1264
	return r;
sl@0
  1265
	}
sl@0
  1266
sl@0
  1267
sl@0
  1268
/** Forces the execution of a thread to be resumed.
sl@0
  1269
sl@0
  1270
	This function is intended to be used by the EPOC layer and personality layers.
sl@0
  1271
	Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
sl@0
  1272
sl@0
  1273
    This function cancels all suspensions on a thread.
sl@0
  1274
sl@0
  1275
    @param aThread Thread to be resumed.
sl@0
  1276
    
sl@0
  1277
    @return TRUE, if the thread had changed the state from suspended to non-suspended;
sl@0
  1278
            FALSE, otherwise.
sl@0
  1279
            
sl@0
  1280
	@see Kern::ThreadResume()
sl@0
  1281
*/
sl@0
  1282
EXPORT_C TBool NKern::ThreadForceResume(NThread* aThread)
sl@0
  1283
	{	
sl@0
  1284
	NKern::Lock();
sl@0
  1285
	TBool r=aThread->ForceResume();
sl@0
  1286
	NKern::Unlock();
sl@0
  1287
	return r;
sl@0
  1288
	}
sl@0
  1289
sl@0
  1290
sl@0
  1291
/** Forces the execution of a thread to be resumed and signals a mutex.
sl@0
  1292
sl@0
  1293
	This function is intended to be used by the EPOC layer and personality layers.
sl@0
  1294
	Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
sl@0
  1295
sl@0
  1296
    This function cancels all suspensions on a thread.
sl@0
  1297
sl@0
  1298
    @param aThread Thread to be resumed.
sl@0
  1299
    @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
sl@0
  1300
    
sl@0
  1301
    @return TRUE, if the thread had changed the state from suspended to non-suspended;
sl@0
  1302
            FALSE, otherwise.
sl@0
  1303
            
sl@0
  1304
    @see Kern::ThreadResume()
sl@0
  1305
*/
sl@0
  1306
EXPORT_C TBool NKern::ThreadForceResume(NThread* aThread, NFastMutex* aMutex)
sl@0
  1307
	{
sl@0
  1308
	if (!aMutex)
sl@0
  1309
		aMutex=&TheScheduler.iLock;
sl@0
  1310
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadForceResume %T + FM %M",aThread,aMutex));
sl@0
  1311
	NKern::Lock();
sl@0
  1312
	TBool r=aThread->ForceResume();
sl@0
  1313
	aMutex->Signal();
sl@0
  1314
	NKern::Unlock();
sl@0
  1315
	return r;
sl@0
  1316
	}
sl@0
  1317
sl@0
  1318
sl@0
  1319
/** Awakens a nanothread.
sl@0
  1320
sl@0
  1321
	This function is used to implement synchronisation primitives in the EPOC
sl@0
  1322
	kernel (e.g. DMutex and DSemaphore) and in personality layers.  It is not
sl@0
  1323
	intended to be used directly by device drivers.
sl@0
  1324
sl@0
  1325
	If the nanothread is waiting on a fast semaphore, waiting for a DFC, or is
sl@0
  1326
	blocked in a call to NKern::Block, it is awakened and put back on the ready
sl@0
  1327
	list.  Otherwise, the thread state is unchanged.  In particular, nothing
sl@0
  1328
	happens if the nanothread has been explicitly suspended.
sl@0
  1329
sl@0
  1330
	@param aThread Thread to release.
sl@0
  1331
	@param aReturnValue Value returned by NKern::Block if the thread was blocked.
sl@0
  1332
sl@0
  1333
	@see NKern::Block()
sl@0
  1334
sl@0
  1335
	@pre Interrupts must be enabled.
sl@0
  1336
	@pre Do not call from an ISR
sl@0
  1337
 */
sl@0
  1338
EXPORT_C void NKern::ThreadRelease(NThread* aThread, TInt aReturnValue)
sl@0
  1339
	{
sl@0
  1340
	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::ThreadRelease(NThread*, TInt)");
sl@0
  1341
	NKern::Lock();
sl@0
  1342
	aThread->Release(aReturnValue);
sl@0
  1343
	NKern::Unlock();
sl@0
  1344
	}
sl@0
  1345
sl@0
  1346
sl@0
  1347
/** Atomically awakens a nanothread and signals a fast mutex.
sl@0
  1348
sl@0
  1349
	This function is used to implement synchronisation primitives in the EPOC
sl@0
  1350
	kernel (e.g. DMutex and DSemaphore) and in personality layers.  It is not
sl@0
  1351
	intended to be used directly by device drivers.
sl@0
  1352
sl@0
  1353
	@param aThread Thread to release.
sl@0
  1354
	@param aReturnValue Value returned by NKern::Block if the thread was blocked.
sl@0
  1355
	@param aMutex Fast mutex to signal. If NULL, the system lock is signalled.
sl@0
  1356
sl@0
  1357
	@see NKern::ThreadRelease(NThread*, TInt)
sl@0
  1358
	@see NKern::Block()
sl@0
  1359
sl@0
  1360
	@pre	Call in a thread context.
sl@0
  1361
	@pre	Interrupts must be enabled.
sl@0
  1362
	@pre	Kernel must be unlocked.
sl@0
  1363
	@pre	Specified mutex must be held
sl@0
  1364
 */
sl@0
  1365
EXPORT_C void NKern::ThreadRelease(NThread* aThread, TInt aReturnValue, NFastMutex* aMutex)
sl@0
  1366
	{
sl@0
  1367
	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadRelease(NThread*,TInt,NFastMutex*)");
sl@0
  1368
	if (!aMutex)
sl@0
  1369
		aMutex=&TheScheduler.iLock;
sl@0
  1370
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadRelease %T ret %d + FM %M",aThread,aReturnValue,aMutex));
sl@0
  1371
	NKern::Lock();
sl@0
  1372
	aThread->Release(aReturnValue);
sl@0
  1373
	aMutex->Signal();
sl@0
  1374
	NKern::Unlock();
sl@0
  1375
	}
sl@0
  1376
sl@0
  1377
sl@0
  1378
/** Changes the priority of a thread.
sl@0
  1379
sl@0
  1380
	This function is intended to be used by the EPOC layer and personality layers.
sl@0
  1381
	Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority().
sl@0
  1382
sl@0
  1383
    @param aThread Thread to receive the new priority.
sl@0
  1384
    @param aPriority New priority for aThread.
sl@0
  1385
    
sl@0
  1386
	@see Kern::SetThreadPriority()
sl@0
  1387
*/
sl@0
  1388
EXPORT_C void NKern::ThreadSetPriority(NThread* aThread, TInt aPriority)
sl@0
  1389
	{
sl@0
  1390
	NKern::Lock();
sl@0
  1391
	aThread->SetPriority(aPriority);
sl@0
  1392
	NKern::Unlock();
sl@0
  1393
	}
sl@0
  1394
sl@0
  1395
sl@0
  1396
/** Changes the priority of a thread and signals a mutex.
sl@0
  1397
sl@0
  1398
	This function is intended to be used by the EPOC layer and personality layers.
sl@0
  1399
	Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority().
sl@0
  1400
sl@0
  1401
    @param aThread Thread to receive the new priority.
sl@0
  1402
    @param aPriority New priority for aThread.
sl@0
  1403
    @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
sl@0
  1404
        
sl@0
  1405
	@see Kern::SetThreadPriority()
sl@0
  1406
*/
sl@0
  1407
EXPORT_C void NKern::ThreadSetPriority(NThread* aThread, TInt aPriority, NFastMutex* aMutex)
sl@0
  1408
	{	
sl@0
  1409
	if (!aMutex)
sl@0
  1410
		aMutex=&TheScheduler.iLock;
sl@0
  1411
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadSetPriority %T->%d + FM %M",aThread,aPriority,aMutex));
sl@0
  1412
	NKern::Lock();
sl@0
  1413
	aThread->SetPriority(aPriority);
sl@0
  1414
	aMutex->Signal();
sl@0
  1415
	NKern::Unlock();
sl@0
  1416
	}
sl@0
  1417
sl@0
  1418
#ifndef __SCHEDULER_MACHINE_CODED__
sl@0
  1419
sl@0
  1420
/** Signals the request semaphore of a nanothread.
sl@0
  1421
sl@0
  1422
	This function is intended to be used by the EPOC layer and personality
sl@0
  1423
	layers.  Device drivers should use Kern::RequestComplete instead.
sl@0
  1424
sl@0
  1425
	@param aThread Nanothread to signal. Must be non NULL.
sl@0
  1426
sl@0
  1427
	@see Kern::RequestComplete()
sl@0
  1428
sl@0
  1429
	@pre Interrupts must be enabled.
sl@0
  1430
	@pre Do not call from an ISR
sl@0
  1431
 */
sl@0
  1432
EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread)
sl@0
  1433
	{
sl@0
  1434
	CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::ThreadRequestSignal(NThread*)");
sl@0
  1435
	NKern::Lock();
sl@0
  1436
	aThread->iRequestSemaphore.Signal();
sl@0
  1437
	NKern::Unlock();
sl@0
  1438
	}
sl@0
  1439
sl@0
  1440
sl@0
  1441
/** Atomically signals the request semaphore of a nanothread and a fast mutex.
sl@0
  1442
sl@0
  1443
	This function is intended to be used by the EPOC layer and personality
sl@0
  1444
	layers.  Device drivers should use Kern::RequestComplete instead.
sl@0
  1445
sl@0
  1446
	@param aThread Nanothread to signal.  Must be non NULL.
sl@0
  1447
	@param aMutex Fast mutex to signal.  If NULL, the system lock is signaled.
sl@0
  1448
sl@0
  1449
	@see Kern::RequestComplete()
sl@0
  1450
sl@0
  1451
	@pre	Call in a thread context.
sl@0
  1452
	@pre	Interrupts must be enabled.
sl@0
  1453
	@pre	Kernel must be unlocked.
sl@0
  1454
	@pre	Specified mutex must be held
sl@0
  1455
 */
sl@0
  1456
EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread, NFastMutex* aMutex)
sl@0
  1457
	{
sl@0
  1458
	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadRequestSignal(NThread*,NFastMutex*)");
sl@0
  1459
	if (!aMutex)
sl@0
  1460
		aMutex=&TheScheduler.iLock;
sl@0
  1461
	NKern::Lock();
sl@0
  1462
	aThread->iRequestSemaphore.Signal();
sl@0
  1463
	aMutex->Signal();
sl@0
  1464
	NKern::Unlock();
sl@0
  1465
	}
sl@0
  1466
#endif
sl@0
  1467
sl@0
  1468
sl@0
  1469
/** Signals the request semaphore of a nanothread several times.
sl@0
  1470
sl@0
  1471
	This function is intended to be used by the EPOC layer and personality
sl@0
  1472
	layers.  Device drivers should use Kern::RequestComplete instead.
sl@0
  1473
sl@0
  1474
	@param aThread Nanothread to signal.  If NULL, the current thread is signaled.
sl@0
  1475
	@param aCount Number of times the request semaphore must be signaled.
sl@0
  1476
	
sl@0
  1477
	@pre aCount >= 0
sl@0
  1478
sl@0
  1479
	@see Kern::RequestComplete()
sl@0
  1480
 */
sl@0
  1481
EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread, TInt aCount)
sl@0
  1482
	{
sl@0
  1483
	__ASSERT_WITH_MESSAGE_DEBUG(aCount >= 0,"aCount >= 0","NKern::ThreadRequestSignal");
sl@0
  1484
	if (!aThread)
sl@0
  1485
		aThread=(NThread*)TheScheduler.iCurrentThread;
sl@0
  1486
	NKern::Lock();
sl@0
  1487
	aThread->iRequestSemaphore.SignalN(aCount);
sl@0
  1488
	NKern::Unlock();
sl@0
  1489
	}
sl@0
  1490
sl@0
  1491
sl@0
  1492
/**	Kills a nanothread.
sl@0
  1493
sl@0
  1494
	This function is intended to be used by the EPOC layer and personality layers.
sl@0
  1495
	Do not use this function directly on a Symbian OS thread - use Kern::ThreadKill().
sl@0
  1496
sl@0
  1497
	This function does not return if the current thread is killed.  
sl@0
  1498
	This function is asynchronous (i.e. the thread to kill may still be alive when the call returns).
sl@0
  1499
sl@0
  1500
	@param aThread Thread to kill.  Must be non NULL.
sl@0
  1501
sl@0
  1502
	@pre If acting on calling thread, calling thread must not be in a
sl@0
  1503
			critical section
sl@0
  1504
	@pre Thread must not already be exiting.
sl@0
  1505
sl@0
  1506
	@see Kern::ThreadKill()
sl@0
  1507
 */
sl@0
  1508
EXPORT_C void NKern::ThreadKill(NThread* aThread)
sl@0
  1509
	{
sl@0
  1510
	NKern::Lock();
sl@0
  1511
	aThread->Kill();
sl@0
  1512
	NKern::Unlock();
sl@0
  1513
	}
sl@0
  1514
sl@0
  1515
sl@0
  1516
/**	Atomically kills a nanothread and signals a fast mutex.
sl@0
  1517
sl@0
  1518
	This function is intended to be used by the EPOC layer and personality layers.
sl@0
  1519
	Do not use this function directly on a Symbian OS thread - use Kern::ThreadKill().
sl@0
  1520
sl@0
  1521
	@param aThread Thread to kill.  Must be non NULL.
sl@0
  1522
	@param aMutex Fast mutex to signal.  If NULL, the system lock is signalled.
sl@0
  1523
sl@0
  1524
	@pre	If acting on calling thread, calling thread must not be in a
sl@0
  1525
			critical section
sl@0
  1526
	@pre Thread must not already be exiting.
sl@0
  1527
sl@0
  1528
	@see NKern::ThreadKill(NThread*)
sl@0
  1529
 */
sl@0
  1530
EXPORT_C void NKern::ThreadKill(NThread* aThread, NFastMutex* aMutex)
sl@0
  1531
	{
sl@0
  1532
	if (!aMutex)
sl@0
  1533
		aMutex=&TheScheduler.iLock;
sl@0
  1534
	NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
  1535
	NKern::Lock();
sl@0
  1536
	if (aThread==pC)
sl@0
  1537
		{
sl@0
  1538
		__NK_ASSERT_DEBUG(pC->iCsCount==0);	// Make sure thread isn't in critical section
sl@0
  1539
		aThread->iCsFunction=NThreadBase::ECSExitPending;
sl@0
  1540
		aMutex->iWaiting=1;
sl@0
  1541
		aMutex->Signal();	// this will make us exit
sl@0
  1542
		FAULT();			// should never get here
sl@0
  1543
		}
sl@0
  1544
	else
sl@0
  1545
		{
sl@0
  1546
		aThread->Kill();
sl@0
  1547
		aMutex->Signal();
sl@0
  1548
		}
sl@0
  1549
	NKern::Unlock();
sl@0
  1550
	}
sl@0
  1551
sl@0
  1552
sl@0
  1553
/** Enters thread critical section.
sl@0
  1554
sl@0
  1555
	This function can safely be used in device drivers.
sl@0
  1556
sl@0
  1557
    The current thread will enter its critical section. While in critical section
sl@0
  1558
    the thread cannot be suspended or killed. Any suspension or kill will be deferred
sl@0
  1559
    until the thread leaves the critical section.
sl@0
  1560
    Some API explicitly require threads to be in critical section before calling that
sl@0
  1561
    API.
sl@0
  1562
    Only User threads need to call this function as the concept of thread critical
sl@0
  1563
    section applies to User threads only.
sl@0
  1564
sl@0
  1565
	@pre	Call in a thread context.
sl@0
  1566
	@pre	Kernel must be unlocked.
sl@0
  1567
*/
sl@0
  1568
EXPORT_C void NKern::ThreadEnterCS()
sl@0
  1569
	{
sl@0
  1570
	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadEnterCS");
sl@0
  1571
	NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
  1572
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadEnterCS %T",pC));
sl@0
  1573
	__NK_ASSERT_DEBUG(pC->iCsCount>=0);
sl@0
  1574
	++pC->iCsCount;
sl@0
  1575
	}
sl@0
  1576
sl@0
  1577
sl@0
  1578
NThread* NKern::_ThreadEnterCS()
sl@0
  1579
	{
sl@0
  1580
	NThread* pC = (NThread*)TheScheduler.iCurrentThread;
sl@0
  1581
	__NK_ASSERT_DEBUG(pC->iCsCount>=0);
sl@0
  1582
	++pC->iCsCount;
sl@0
  1583
	return pC;
sl@0
  1584
	}
sl@0
  1585
sl@0
  1586
sl@0
  1587
/** Leaves thread critical section.
sl@0
  1588
sl@0
  1589
	This function can safely be used in device drivers.
sl@0
  1590
sl@0
  1591
    The current thread will leave its critical section. If the thread was suspended/killed
sl@0
  1592
    while in critical section, the thread will be suspended/killed after leaving the
sl@0
  1593
    critical section by calling this function.
sl@0
  1594
    Only User threads need to call this function as the concept of thread critical
sl@0
  1595
    section applies to User threads only.
sl@0
  1596
sl@0
  1597
	@pre	Call in a thread context.
sl@0
  1598
	@pre	Kernel must be unlocked.
sl@0
  1599
*/
sl@0
  1600
sl@0
  1601
EXPORT_C void NKern::ThreadLeaveCS()
sl@0
  1602
	{
sl@0
  1603
	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadLeaveCS");
sl@0
  1604
	NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
  1605
	NKern::Lock();
sl@0
  1606
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadLeaveCS %T",pC));
sl@0
  1607
	__NK_ASSERT_DEBUG(pC->iCsCount>0);
sl@0
  1608
	if (--pC->iCsCount==0 && pC->iCsFunction!=0)
sl@0
  1609
		{
sl@0
  1610
		if (pC->iHeldFastMutex)
sl@0
  1611
			pC->iHeldFastMutex->iWaiting=1;
sl@0
  1612
		else
sl@0
  1613
			pC->DoCsFunction();
sl@0
  1614
		}
sl@0
  1615
	NKern::Unlock();
sl@0
  1616
	}
sl@0
  1617
sl@0
  1618
void NKern::_ThreadLeaveCS()
sl@0
  1619
	{
sl@0
  1620
	NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
  1621
	NKern::Lock();
sl@0
  1622
	__NK_ASSERT_DEBUG(pC->iCsCount>0);
sl@0
  1623
	if (--pC->iCsCount==0 && pC->iCsFunction!=0)
sl@0
  1624
		{
sl@0
  1625
		if (pC->iHeldFastMutex)
sl@0
  1626
			pC->iHeldFastMutex->iWaiting=1;
sl@0
  1627
		else
sl@0
  1628
			pC->DoCsFunction();
sl@0
  1629
		}
sl@0
  1630
	NKern::Unlock();
sl@0
  1631
	}
sl@0
  1632
sl@0
  1633
/** Freeze the CPU of the current thread
sl@0
  1634
sl@0
  1635
	After this the current thread will not migrate to another processor
sl@0
  1636
sl@0
  1637
	On uniprocessor builds does nothing and returns 0
sl@0
  1638
sl@0
  1639
	@return	A cookie to be passed to NKern::EndFreezeCpu() to allow nesting
sl@0
  1640
*/
sl@0
  1641
EXPORT_C TInt NKern::FreezeCpu()
sl@0
  1642
	{
sl@0
  1643
	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::FreezeCpu");
sl@0
  1644
	return 0;
sl@0
  1645
	}
sl@0
  1646
sl@0
  1647
sl@0
  1648
/** Unfreeze the current thread's CPU
sl@0
  1649
sl@0
  1650
	After this the current thread will again be eligible to migrate to another processor
sl@0
  1651
sl@0
  1652
	On uniprocessor builds does nothing
sl@0
  1653
sl@0
  1654
	@param	aCookie the value returned by NKern::FreezeCpu()
sl@0
  1655
*/
sl@0
  1656
EXPORT_C void NKern::EndFreezeCpu(TInt /*aCookie*/)
sl@0
  1657
	{
sl@0
  1658
	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::EndFreezeCpu");
sl@0
  1659
	}
sl@0
  1660
sl@0
  1661
sl@0
  1662
/** Change the CPU affinity of a thread
sl@0
  1663
sl@0
  1664
	On uniprocessor builds does nothing
sl@0
  1665
sl@0
  1666
	@pre	Call in a thread context.
sl@0
  1667
sl@0
  1668
	@param	The new CPU affinity mask
sl@0
  1669
	@return The old affinity mask
sl@0
  1670
 */
sl@0
  1671
EXPORT_C TUint32 NKern::ThreadSetCpuAffinity(NThread*, TUint32)
sl@0
  1672
	{
sl@0
  1673
	return 0;	// lock to processor 0
sl@0
  1674
	}
sl@0
  1675
sl@0
  1676
sl@0
  1677
/** Modify a thread's timeslice
sl@0
  1678
sl@0
  1679
	@pre	Call in a thread context.
sl@0
  1680
sl@0
  1681
	@param	aTimeslice	The new timeslice value
sl@0
  1682
 */
sl@0
  1683
EXPORT_C void NKern::ThreadSetTimeslice(NThread* aThread, TInt aTimeslice)
sl@0
  1684
	{
sl@0
  1685
	NKern::Lock();
sl@0
  1686
	if (aThread->iTimeslice == aThread->iTime || aTimeslice<0)
sl@0
  1687
		aThread->iTime = aTimeslice;
sl@0
  1688
	aThread->iTimeslice = aTimeslice;
sl@0
  1689
	NKern::Unlock();
sl@0
  1690
	}
sl@0
  1691
sl@0
  1692
sl@0
  1693
/** Blocks current nanothread.
sl@0
  1694
sl@0
  1695
	This function is used to implement synchronisation primitives in the EPOC
sl@0
  1696
	layer and in personality layers.  It is not intended to be used directly by
sl@0
  1697
	device drivers.  
sl@0
  1698
sl@0
  1699
	@param aTimeout If greater than 0, the nanothread will be blocked for at most
sl@0
  1700
					aTimeout microseconds.
sl@0
  1701
	@param aMode	Bitmask whose possible values are documented in TBlockMode.  
sl@0
  1702
	@param aMutex	Fast mutex to operate on.  If NULL, the system lock is used.
sl@0
  1703
sl@0
  1704
	@see NKern::ThreadRelease()
sl@0
  1705
	@see TBlockMode
sl@0
  1706
sl@0
  1707
	@pre	Call in a thread context.
sl@0
  1708
	@pre	Interrupts must be enabled.
sl@0
  1709
	@pre	Kernel must be unlocked.
sl@0
  1710
	@pre	Specified mutex must be held
sl@0
  1711
 */
sl@0
  1712
EXPORT_C TInt NKern::Block(TUint32 aTimeout, TUint aMode, NFastMutex* aMutex)
sl@0
  1713
	{
sl@0
  1714
	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::Block(TUint32,TUint,NFastMutex*)");
sl@0
  1715
	if (!aMutex)
sl@0
  1716
		aMutex=&TheScheduler.iLock;
sl@0
  1717
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Block time %d mode %d FM %M",aTimeout,aMode,aMutex));
sl@0
  1718
	NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
  1719
	pC->iReturnValue=0;
sl@0
  1720
	NKern::Lock();
sl@0
  1721
	if (aMode & EEnterCS)
sl@0
  1722
		++pC->iCsCount;
sl@0
  1723
	if (aMode & ERelease)
sl@0
  1724
		{
sl@0
  1725
#ifdef BTRACE_FAST_MUTEX
sl@0
  1726
		BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexSignal,aMutex);
sl@0
  1727
#endif
sl@0
  1728
		aMutex->iHoldingThread=NULL;
sl@0
  1729
		TBool w=aMutex->iWaiting;
sl@0
  1730
		aMutex->iWaiting=0;
sl@0
  1731
		pC->iHeldFastMutex=NULL;
sl@0
  1732
		if (w && !pC->iCsCount && pC->iCsFunction)
sl@0
  1733
			pC->DoCsFunction();
sl@0
  1734
		}
sl@0
  1735
	RescheduleNeeded();
sl@0
  1736
	if (aTimeout)
sl@0
  1737
		{
sl@0
  1738
		pC->iTimer.iUserFlags = TRUE;
sl@0
  1739
		pC->iTimer.OneShot(aTimeout,TRUE);
sl@0
  1740
		}
sl@0
  1741
	if (pC->iNState==NThread::EReady)
sl@0
  1742
		TheScheduler.Remove(pC);
sl@0
  1743
	pC->iNState=NThread::EBlocked;
sl@0
  1744
	NKern::Unlock();
sl@0
  1745
	if (aMode & EClaim)
sl@0
  1746
		FMWait(aMutex);
sl@0
  1747
	return pC->iReturnValue;
sl@0
  1748
	}
sl@0
  1749
sl@0
  1750
/**
sl@0
  1751
@pre	Call in a thread context.
sl@0
  1752
@pre	Interrupts must be enabled.
sl@0
  1753
@pre	Kernel must be unlocked.
sl@0
  1754
@pre	No fast mutex can be held
sl@0
  1755
*/
sl@0
  1756
/** @see NKern::Block(TUint32, TUint, NFastMutex*) */
sl@0
  1757
EXPORT_C TInt NKern::Block(TUint32 aTimeout, TUint aMode)
sl@0
  1758
	{
sl@0
  1759
	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::Block(TUint32,TUint)");
sl@0
  1760
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Block time %d mode %d",aTimeout,aMode));
sl@0
  1761
	NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
  1762
	pC->iReturnValue=0;
sl@0
  1763
	NKern::Lock();
sl@0
  1764
	if (aMode & EEnterCS)
sl@0
  1765
		++pC->iCsCount;
sl@0
  1766
	RescheduleNeeded();
sl@0
  1767
	if (aTimeout)
sl@0
  1768
		{
sl@0
  1769
		pC->iTimer.iUserFlags = TRUE;
sl@0
  1770
		pC->iTimer.OneShot(aTimeout,TRUE);
sl@0
  1771
		}
sl@0
  1772
	pC->iNState=NThread::EBlocked;
sl@0
  1773
	TheScheduler.Remove(pC);
sl@0
  1774
	NKern::Unlock();
sl@0
  1775
	return pC->iReturnValue;
sl@0
  1776
	}
sl@0
  1777
sl@0
  1778
sl@0
  1779
sl@0
  1780
sl@0
  1781
EXPORT_C void NKern::NanoBlock(TUint32 aTimeout, TUint aState, TAny* aWaitObj)
sl@0
  1782
/**
sl@0
  1783
Places the current nanothread into a wait state on an externally
sl@0
  1784
defined wait object.
sl@0
  1785
	
sl@0
  1786
For use by RTOS personality layers.
sl@0
  1787
Do not use this function directly on a Symbian OS thread.
sl@0
  1788
sl@0
  1789
Since the kernel is locked on entry, any reschedule will be deferred until
sl@0
  1790
it is unlocked. The thread should be added to any necessary wait queue after
sl@0
  1791
a call to this function, since this function removes it from the ready list.
sl@0
  1792
The thread's wait timer is started if aTimeout is nonzero.
sl@0
  1793
The thread's NState and wait object are updated.
sl@0
  1794
sl@0
  1795
Call NThreadBase::Release() when the wait condition is resolved.
sl@0
  1796
sl@0
  1797
@param aTimeout The maximum time for which the thread should block, in nanokernel timer ticks.
sl@0
  1798
                A zero value means wait forever.
sl@0
  1799
                If the thread is still blocked when the timeout expires,
sl@0
  1800
                then the timeout state handler will be called.
sl@0
  1801
@param aState   The nanokernel thread state (N-State) value to be set.
sl@0
  1802
                This state corresponds to the externally defined wait object.
sl@0
  1803
                This value will be written into the member NThreadBase::iNState.
sl@0
  1804
@param aWaitObj A pointer to an externally defined wait object.
sl@0
  1805
                This value will be written into the member NThreadBase::iWaitObj.
sl@0
  1806
sl@0
  1807
@pre	Kernel must be locked.
sl@0
  1808
@pre	Call in a thread context.
sl@0
  1809
sl@0
  1810
@post	Kernel is locked.
sl@0
  1811
sl@0
  1812
@see	NThreadBase::Release()
sl@0
  1813
*/
sl@0
  1814
	{
sl@0
  1815
	CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::NanoBlock");		
sl@0
  1816
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::NanoBlock time %d state %d obj %08x", aTimeout, aState, aWaitObj));
sl@0
  1817
	NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
  1818
	if (aTimeout)
sl@0
  1819
		{
sl@0
  1820
		pC->iTimer.iUserFlags = TRUE;
sl@0
  1821
		pC->iTimer.OneShot(aTimeout,TRUE);
sl@0
  1822
		}
sl@0
  1823
	pC->iNState = (TUint8)aState;
sl@0
  1824
	pC->iWaitObj = aWaitObj;
sl@0
  1825
	pC->iReturnValue = 0;
sl@0
  1826
	TheScheduler.Remove(pC);
sl@0
  1827
	RescheduleNeeded();
sl@0
  1828
	}
sl@0
  1829
sl@0
  1830
sl@0
  1831
sl@0
  1832
sl@0
  1833
EXPORT_C void NKern::Sleep(TUint32 aTime)
sl@0
  1834
/**
sl@0
  1835
Puts the current nanothread to sleep for the specified duration.
sl@0
  1836
sl@0
  1837
It can be called from Symbian OS threads.
sl@0
  1838
sl@0
  1839
@param	aTime sleep time in nanokernel timer ticks.
sl@0
  1840
sl@0
  1841
@pre    No fast mutex can be held.
sl@0
  1842
@pre    Kernel must be unlocked.
sl@0
  1843
@pre	Call in a thread context.
sl@0
  1844
@pre	Interrupts must be enabled.
sl@0
  1845
*/
sl@0
  1846
	{
sl@0
  1847
	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::Sleep");		
sl@0
  1848
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Sleep %d",aTime));
sl@0
  1849
	NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
  1850
	NKern::Lock();
sl@0
  1851
	pC->iTimer.iUserFlags = TRUE;
sl@0
  1852
	pC->iTimer.OneShot(aTime,TRUE);
sl@0
  1853
	pC->iNState=NThread::ESleep;
sl@0
  1854
	TheScheduler.Remove(pC);
sl@0
  1855
	RescheduleNeeded();
sl@0
  1856
	NKern::Unlock();
sl@0
  1857
	}
sl@0
  1858
sl@0
  1859
sl@0
  1860
/**	Terminates the current nanothread.
sl@0
  1861
sl@0
  1862
	Calls to this function never return.
sl@0
  1863
sl@0
  1864
	For use by RTOS personality layers.
sl@0
  1865
	Do not use this function directly on a Symbian OS thread.
sl@0
  1866
sl@0
  1867
	@pre	Call in a thread context.
sl@0
  1868
	@pre	Interrupts must be enabled.
sl@0
  1869
	@pre	Kernel must be unlocked.	
sl@0
  1870
 */
sl@0
  1871
EXPORT_C void NKern::Exit()
sl@0
  1872
	{
sl@0
  1873
	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::Exit");
sl@0
  1874
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Exit"));
sl@0
  1875
	NThreadBase* pC=TheScheduler.iCurrentThread;
sl@0
  1876
	NKern::Lock();
sl@0
  1877
	pC->Exit();			// this won't return
sl@0
  1878
	FAULT();
sl@0
  1879
	}
sl@0
  1880
sl@0
  1881
sl@0
  1882
/**	Terminates the current nanothread at the next possible point.
sl@0
  1883
sl@0
  1884
	If the calling thread is not currently in a critical section and does not
sl@0
  1885
	currently hold a fast mutex, it exits immediately and this function does
sl@0
  1886
	not return. On the other hand if the thread is in a critical section or
sl@0
  1887
	holds a fast mutex the thread continues executing but it will exit as soon
sl@0
  1888
	as it leaves the critical section and/or releases the fast mutex.
sl@0
  1889
sl@0
  1890
	@pre	Call in a thread context.
sl@0
  1891
	@pre	Interrupts must be enabled.
sl@0
  1892
	@pre	Kernel must be unlocked.	
sl@0
  1893
 */
sl@0
  1894
EXPORT_C void NKern::DeferredExit()
sl@0
  1895
	{
sl@0
  1896
	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::DeferredExit");
sl@0
  1897
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NDefExit"));
sl@0
  1898
	NFastMutex* m = HeldFastMutex();
sl@0
  1899
	NThreadBase* pC = NKern::LockC();
sl@0
  1900
	if (!m && !pC->iCsCount)
sl@0
  1901
		pC->Exit();			// this won't return
sl@0
  1902
	if (pC->iCsFunction >= 0)	// don't touch it if we are already exiting
sl@0
  1903
		pC->iCsFunction = NThreadBase::ECSExitPending;
sl@0
  1904
	if (m && !pC->iCsCount)
sl@0
  1905
		m->iWaiting = TRUE;
sl@0
  1906
	NKern::Unlock();
sl@0
  1907
	}
sl@0
  1908
sl@0
  1909
sl@0
  1910
/** Prematurely terminates the current thread's timeslice
sl@0
  1911
sl@0
  1912
	@pre	Kernel must be unlocked.
sl@0
  1913
	@pre	Call in a thread context.
sl@0
  1914
	
sl@0
  1915
	@post	Kernel is unlocked.
sl@0
  1916
 */
sl@0
  1917
EXPORT_C void NKern::YieldTimeslice()
sl@0
  1918
	{
sl@0
  1919
	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::YieldTimeslice");
sl@0
  1920
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::YieldTimeslice"));
sl@0
  1921
	NThreadBase* t = NKern::LockC();
sl@0
  1922
	t->iTime = 0;
sl@0
  1923
	if (t->iNext != t)
sl@0
  1924
		RescheduleNeeded();
sl@0
  1925
	NKern::Unlock();
sl@0
  1926
	}
sl@0
  1927
sl@0
  1928
sl@0
  1929
/** Rotates the ready list for threads at the specified priority.
sl@0
  1930
	
sl@0
  1931
	For use by RTOS personality layers to allow external control of round-robin
sl@0
  1932
	scheduling. Not intended for direct use by device drivers.
sl@0
  1933
sl@0
  1934
	@param	aPriority = priority at which threads should be rotated.
sl@0
  1935
						-1 means use calling thread's priority.
sl@0
  1936
	
sl@0
  1937
	@pre	Kernel must be unlocked.
sl@0
  1938
	@pre	Call in a thread context.
sl@0
  1939
	
sl@0
  1940
	@post	Kernel is unlocked.
sl@0
  1941
 */
sl@0
  1942
EXPORT_C void NKern::RotateReadyList(TInt aPriority)
sl@0
  1943
	{
sl@0
  1944
	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::RotateReadyList");		
sl@0
  1945
	__KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::RotateReadyList %d",aPriority));
sl@0
  1946
	if (aPriority<0 || aPriority>=KNumPriorities)
sl@0
  1947
		aPriority=TheScheduler.iCurrentThread->iPriority;
sl@0
  1948
	NKern::Lock();
sl@0
  1949
	TheScheduler.RotateReadyList(aPriority);
sl@0
  1950
	NKern::Unlock();
sl@0
  1951
	}
sl@0
  1952
sl@0
  1953
/** Rotates the ready list for threads at the specified priority.
sl@0
  1954
	
sl@0
  1955
	For use by RTOS personality layers to allow external control of round-robin
sl@0
  1956
	scheduling. Not intended for direct use by device drivers.
sl@0
  1957
sl@0
  1958
	@param	aPriority = priority at which threads should be rotated.
sl@0
  1959
						-1 means use calling thread's priority.
sl@0
  1960
	@param	aCpu = which CPU's ready list should be rotated
sl@0
  1961
					ignored on UP systems.
sl@0
  1962
	
sl@0
  1963
	@pre	Kernel must be unlocked.
sl@0
  1964
	@pre	Call in a thread context.
sl@0
  1965
	
sl@0
  1966
	@post	Kernel is unlocked.
sl@0
  1967
 */
sl@0
  1968
EXPORT_C void NKern::RotateReadyList(TInt aPriority, TInt /*aCpu*/)
sl@0
  1969
	{
sl@0
  1970
	RotateReadyList(aPriority);
sl@0
  1971
	}
sl@0
  1972
sl@0
  1973
sl@0
  1974
/** Returns the NThread control block for the currently scheduled thread.
sl@0
  1975
sl@0
  1976
    Note that this is the calling thread if called from a thread context, or the
sl@0
  1977
	interrupted thread if called from an interrupt context.
sl@0
  1978
	
sl@0
  1979
	@return A pointer to the NThread for the currently scheduled thread.
sl@0
  1980
	
sl@0
  1981
	@pre Call in any context.
sl@0
  1982
*/
sl@0
  1983
EXPORT_C NThread* NKern::CurrentThread()
sl@0
  1984
	{
sl@0
  1985
	return (NThread*)TheScheduler.iCurrentThread;
sl@0
  1986
	}
sl@0
  1987
sl@0
  1988
sl@0
  1989
/** Returns the CPU number of the calling CPU.
sl@0
  1990
sl@0
  1991
	@return the CPU number of the calling CPU.
sl@0
  1992
	
sl@0
  1993
	@pre Call in any context.
sl@0
  1994
*/
sl@0
  1995
EXPORT_C TInt NKern::CurrentCpu()
sl@0
  1996
	{
sl@0
  1997
	return 0;
sl@0
  1998
	}
sl@0
  1999
sl@0
  2000
sl@0
  2001
/** Returns the number of CPUs available to Symbian OS
sl@0
  2002
sl@0
  2003
	@return the number of CPUs
sl@0
  2004
	
sl@0
  2005
	@pre Call in any context.
sl@0
  2006
*/
sl@0
  2007
EXPORT_C TInt NKern::NumberOfCpus()
sl@0
  2008
	{
sl@0
  2009
	return 1;
sl@0
  2010
	}
sl@0
  2011
sl@0
  2012
sl@0
  2013
/** Check if the kernel is locked the specified number of times.
sl@0
  2014
sl@0
  2015
	@param aCount	The number of times the kernel should be locked
sl@0
  2016
					If zero, tests if it is locked at all
sl@0
  2017
	@return TRUE if the tested condition is true.
sl@0
  2018
sl@0
  2019
	@internalTechnology
sl@0
  2020
*/
sl@0
  2021
EXPORT_C TBool NKern::KernelLocked(TInt aCount)
sl@0
  2022
	{
sl@0
  2023
	if (aCount)
sl@0
  2024
		return TheScheduler.iKernCSLocked == aCount;
sl@0
  2025
	return TheScheduler.iKernCSLocked!=0;
sl@0
  2026
	}
sl@0
  2027
sl@0
  2028
sl@0
  2029
/******************************************************************************
sl@0
  2030
 * Priority lists
sl@0
  2031
 ******************************************************************************/
sl@0
  2032
sl@0
  2033
#ifndef __PRI_LIST_MACHINE_CODED__
sl@0
  2034
/** Returns the priority of the highest priority item present on a priority list.
sl@0
  2035
sl@0
  2036
	@return	The highest priority present or -1 if the list is empty.
sl@0
  2037
 */
sl@0
  2038
EXPORT_C TInt TPriListBase::HighestPriority()
sl@0
  2039
	{
sl@0
  2040
//	TUint64 present = MAKE_TUINT64(iPresent[1], iPresent[0]);
sl@0
  2041
//	return __e32_find_ms1_64(present);
sl@0
  2042
	return __e32_find_ms1_64(iPresent64);
sl@0
  2043
	}
sl@0
  2044
sl@0
  2045
sl@0
  2046
/** Finds the highest priority item present on a priority list.
sl@0
  2047
sl@0
  2048
	If multiple items at the same priority are present, return the first to be
sl@0
  2049
	added in chronological order.
sl@0
  2050
sl@0
  2051
	@return	A pointer to the item or NULL if the list is empty.
sl@0
  2052
 */
sl@0
  2053
EXPORT_C TPriListLink* TPriListBase::First()
sl@0
  2054
	{
sl@0
  2055
	TInt p = HighestPriority();
sl@0
  2056
	return p >=0 ? static_cast<TPriListLink*>(iQueue[p]) : NULL;
sl@0
  2057
	}
sl@0
  2058
sl@0
  2059
sl@0
  2060
/** Adds an item to a priority list.
sl@0
  2061
sl@0
  2062
	@param aLink A pointer to the item - must not be NULL.
sl@0
  2063
 */
sl@0
  2064
EXPORT_C void TPriListBase::Add(TPriListLink* aLink)
sl@0
  2065
	{
sl@0
  2066
	TInt p = aLink->iPriority;
sl@0
  2067
	SDblQueLink* head = iQueue[p];
sl@0
  2068
	if (head)
sl@0
  2069
		{
sl@0
  2070
		// already some at this priority
sl@0
  2071
		aLink->InsertBefore(head);
sl@0
  2072
		}
sl@0
  2073
	else
sl@0
  2074
		{
sl@0
  2075
		// 'create' new list
sl@0
  2076
		iQueue[p] = aLink;
sl@0
  2077
		aLink->iNext = aLink->iPrev = aLink;
sl@0
  2078
		iPresent[p>>5] |= 1u << (p & 0x1f);
sl@0
  2079
		}
sl@0
  2080
	}
sl@0
  2081
sl@0
  2082
sl@0
  2083
/** Removes an item from a priority list.
sl@0
  2084
sl@0
  2085
	@param aLink A pointer to the item - must not be NULL.
sl@0
  2086
 */
sl@0
  2087
EXPORT_C void TPriListBase::Remove(TPriListLink* aLink)
sl@0
  2088
	{
sl@0
  2089
	if (!aLink->Alone())
sl@0
  2090
		{
sl@0
  2091
		// not the last on this list
sl@0
  2092
		TInt p = aLink->iPriority;
sl@0
  2093
		if (iQueue[p] == aLink)
sl@0
  2094
			iQueue[p] = aLink->iNext;
sl@0
  2095
		aLink->Deque();
sl@0
  2096
		}
sl@0
  2097
	else
sl@0
  2098
		{
sl@0
  2099
		TInt p = aLink->iPriority;
sl@0
  2100
		iQueue[p] = 0;
sl@0
  2101
		iPresent[p>>5] &= ~(1u << (p & 0x1f));
sl@0
  2102
		KILL_LINK(aLink);
sl@0
  2103
		}
sl@0
  2104
	}
sl@0
  2105
sl@0
  2106
sl@0
  2107
/** Changes the priority of an item on a priority list.
sl@0
  2108
sl@0
  2109
	@param	aLink A pointer to the item to act on - must not be NULL.
sl@0
  2110
	@param	aNewPriority A new priority for the item.
sl@0
  2111
 */
sl@0
  2112
EXPORT_C void TPriListBase::ChangePriority(TPriListLink* aLink, TInt aNewPriority)
sl@0
  2113
	{
sl@0
  2114
	if (aLink->iPriority!=aNewPriority)
sl@0
  2115
		{
sl@0
  2116
		Remove(aLink);
sl@0
  2117
		aLink->iPriority=TUint8(aNewPriority);
sl@0
  2118
		Add(aLink);
sl@0
  2119
		}
sl@0
  2120
	}
sl@0
  2121
#endif
sl@0
  2122
sl@0
  2123
/** Adds an item to a priority list at the head of the queue for its priority.
sl@0
  2124
sl@0
  2125
	@param aLink A pointer to the item - must not be NULL.
sl@0
  2126
 */
sl@0
  2127
EXPORT_C void TPriListBase::AddHead(TPriListLink* aLink)
sl@0
  2128
	{
sl@0
  2129
	TInt p = aLink->iPriority;
sl@0
  2130
	SDblQueLink* head = iQueue[p];
sl@0
  2131
	iQueue[p] = aLink;
sl@0
  2132
	if (head)
sl@0
  2133
		{
sl@0
  2134
		// already some at this priority
sl@0
  2135
		aLink->InsertBefore(head);
sl@0
  2136
		}
sl@0
  2137
	else
sl@0
  2138
		{
sl@0
  2139
		// 'create' new list
sl@0
  2140
		aLink->iNext = aLink->iPrev = aLink;
sl@0
  2141
		iPresent[p>>5] |= 1u << (p & 0x1f);
sl@0
  2142
		}
sl@0
  2143
	}
sl@0
  2144
sl@0
  2145