os/kernelhwsrv/kernel/eka/nkern/win32/ncsched.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 // e32\nkern\win32\ncsched.cpp
    15 // 
    16 //
    17 
    18 // NThreadBase member data
    19 #define __INCLUDE_NTHREADBASE_DEFINES__
    20 
    21 #include <e32cmn.h>
    22 #include <e32cmn_private.h>
    23 #include "nk_priv.h"
    24 
    25 #ifdef __EMI_SUPPORT__
    26 extern void EMI_AddTaskSwitchEvent(TAny* aPrevious, TAny* aNext);
    27 extern void EMI_CheckDfcTag(TAny* aNext);
    28 #endif
    29 typedef void (*ProcessHandler)(TAny* aAddressSpace);
    30 
    31 static DWORD TlsIndex = TLS_OUT_OF_INDEXES;
    32 
    33 static NThreadBase* SelectThread(TScheduler& aS)
    34 //
    35 // Select the next thread to run.
    36 // This is the heart of the rescheduling algorithm.
    37 //
    38 	{
    39 	NThreadBase* t = static_cast<NThreadBase*>(aS.First());
    40 	__NK_ASSERT_DEBUG(t);
    41 #ifdef _DEBUG
    42 	if (t->iHeldFastMutex)
    43 		{
    44 		__KTRACE_OPT(KSCHED2,DEBUGPRINT("Resched init->%T, Holding %M",t,t->iHeldFastMutex));
    45 		}
    46 	else
    47 		{
    48 		__KTRACE_OPT(KSCHED2,DEBUGPRINT("Resched init->%T",t));
    49 		}
    50 #endif
    51 	if (t->iTime == 0 && !t->Alone())
    52 		{
    53 		// round robin
    54 		// get here if thread's timeslice has expired and there is another
    55 		// thread ready at the same priority
    56 		if (t->iHeldFastMutex)
    57 			{
    58 			// round-robin deferred due to fast mutex held
    59 			t->iHeldFastMutex->iWaiting = 1;
    60 			return t;
    61 			}
    62 		t->iTime = t->iTimeslice;		// reset old thread time slice
    63 		t = static_cast<NThreadBase*>(t->iNext);					// next thread
    64 		aS.iQueue[t->iPriority] = t;		// make it first in list
    65 		__KTRACE_OPT(KSCHED2,DEBUGPRINT("RoundRobin->%T",t));
    66 		}
    67 	if (t->iHeldFastMutex)
    68 		{
    69 		if (t->iHeldFastMutex == &aS.iLock)
    70 			{
    71 			// thread holds system lock: use it
    72 			return t;
    73 			}
    74 		if ((t->i_ThrdAttr & KThreadAttImplicitSystemLock) != 0 && aS.iLock.iHoldingThread)
    75 			t->iHeldFastMutex->iWaiting = 1;
    76 		__NK_ASSERT_DEBUG((t->i_ThrdAttr & KThreadAttAddressSpace) == 0);
    77 /*
    78 		Check for an address space change. Not implemented for Win32, but useful as
    79 		documentaiton of the algorithm.
    80 
    81 		if ((t->i_ThrdAttr & KThreadAttAddressSpace) != 0 && t->iAddressSpace != aS.iAddressSpace)
    82 			t->iHeldFastMutex->iWaiting = 1;
    83 */
    84 		}
    85 	else if (t->iWaitFastMutex && t->iWaitFastMutex->iHoldingThread)
    86 		{
    87 		__KTRACE_OPT(KSCHED2,DEBUGPRINT("Resched inter->%T, Blocked on %M",t->iWaitFastMutex->iHoldingThread,t->iWaitFastMutex));
    88 		t = t->iWaitFastMutex->iHoldingThread;
    89 		}
    90 	else if (t->i_ThrdAttr & KThreadAttImplicitSystemLock)
    91 		{
    92 		// implicit system lock required
    93 		if (aS.iLock.iHoldingThread)
    94 			{
    95 			// system lock held, switch to that thread
    96 			t = aS.iLock.iHoldingThread;
    97 			__KTRACE_OPT(KSCHED2,DEBUGPRINT("Resched inter->%T (IMP SYS)",t));
    98 			t->iHeldFastMutex->iWaiting = 1;	// aS.iLock.iWaiting = 1;
    99 			return t;
   100 			}
   101 		__NK_ASSERT_DEBUG((t->i_ThrdAttr & KThreadAttAddressSpace) == 0);
   102 /*
   103 		Check for an address space change. Not implemented for Win32, but useful as
   104 		documentaiton of the algorithm.
   105 
   106 		if ((t->i_ThrdAttr & KThreadAttAddressSpace) != 0 || t->iAddressSpace != aS.iAddressSpace)
   107 			{
   108 			// what do we do now?
   109 			__NK_ASSERT_DEBUG(FALSE);
   110 			}
   111 */
   112 		}
   113 	return t;
   114 	}
   115 
   116 // from NThread
   117 #undef i_ThrdAttr
   118 
   119 TBool NThread::WakeUp()
   120 //
   121 // Wake up the thread. What to do depends on whether we were preempted or voluntarily
   122 // rescheduled.
   123 //
   124 // Return TRUE if we need to immediately reschedule again because we had to unlock
   125 // the kernel but there are DFCs pending. In this case, the thread does not wake up.
   126 //
   127 // NB. kernel is locked
   128 //
   129 	{
   130 	switch (iWakeup)
   131 		{
   132 	default:
   133 		FAULT();
   134 	case EIdle:
   135 		__NK_ASSERT_ALWAYS(TheScheduler.iCurrentThread == this);
   136 		__NK_ASSERT_ALWAYS(SetEvent(iScheduleLock));
   137 		break;
   138 	case ERelease:
   139 		TheScheduler.iCurrentThread = this;
   140 		__NK_ASSERT_ALWAYS(SetEvent(iScheduleLock));
   141 		break;
   142 	case EResumeLocked:
   143 		// The thread is Win32 suspended and must be resumed.
   144 		//
   145 		// A newly created thread does not need the kernel unlocked so we can
   146 		// just resume the suspended thread
   147 		//
   148 		__KTRACE_OPT(KSCHED,DEBUGPRINT("Win32Resume->%T",this));
   149 		iWakeup = ERelease;
   150 		TheScheduler.iCurrentThread = this;
   151 		if (TheScheduler.iProcessHandler)
   152 			(*ProcessHandler(TheScheduler.iProcessHandler))(iAddressSpace); // new thread will need to have its static data updated
   153 		__NK_ASSERT_ALWAYS(TInt(ResumeThread(iWinThread)) > 0);	// check thread was previously suspended
   154 		break;
   155 	case EResumeDiverted:
   156 		// The thread is Win32 suspended and must be resumed.
   157 		//
   158 		// The thread needs to be diverted, and does not need the kernel
   159 		// unlocked.
   160 		//
   161 		// It's safe the divert the thread here because we called
   162 		// IsSafeToPreempt() when we suspended it - otherwise the diversion
   163 		// could get lost.
   164 		//
   165 		__KTRACE_OPT(KSCHED,DEBUGPRINT("Win32Resume->%T (Resuming diverted thread)",this));
   166 		iWakeup = ERelease;
   167 		ApplyDiversion();
   168 		TheScheduler.iCurrentThread = this;
   169 		__NK_ASSERT_ALWAYS(TInt(ResumeThread(iWinThread)) == 1);
   170 		break;
   171 	case EResume:
   172 		// The thread is Win32 suspended and must be resumed.
   173 		//
   174 		// the complication here is that we have to unlock the kernel on behalf of the
   175 		// pre-empted thread. This means that we have to check to see if there are more DFCs
   176 		// pending or a reschedule required, as we unlock the kernel. That check is
   177 		// carried out with interrupts disabled.
   178 		//
   179 		// If so, we go back around the loop in this thread context
   180 		//
   181 		// Otherwise, we unlock the kernel (having marked us as not-preempted),
   182 		// enable interrupts and then resume the thread. If pre-emption occurs before the thread
   183 		// is resumed, it is the new thread that is pre-empted, not the running thread, so we are guaranteed
   184 		// to be able to call ResumeThread. If pre-emption occurs, and we are rescheduled to run before
   185 		// that occurs, we will once again be running with the kernel locked and the other thread will
   186 		// have been re-suspended by Win32: so all is well.
   187 		//		
   188 		{
   189 		__KTRACE_OPT(KSCHED,DEBUGPRINT("Win32Resume->%T",this));
   190 		TInt irq = NKern::DisableAllInterrupts();
   191 		if (TheScheduler.iDfcPendingFlag || TheScheduler.iRescheduleNeededFlag)
   192 			{
   193 			// we were interrrupted... back to the top
   194 			TheScheduler.iRescheduleNeededFlag = TRUE;	// ensure we do the reschedule
   195 			return TRUE;
   196 			}
   197 		iWakeup = ERelease;
   198 		TheScheduler.iCurrentThread = this;
   199 		if (TheScheduler.iProcessHandler)
   200 			(*ProcessHandler(TheScheduler.iProcessHandler))(iAddressSpace); // threads resumed after interrupt or locks need to have static data updated
   201 
   202 		if (iInKernel == 0 && iUserModeCallbacks != NULL)
   203 			ApplyDiversion();
   204 		else 
   205 			TheScheduler.iKernCSLocked = 0;		// have to unlock the kernel on behalf of the new thread
   206 		
   207 		TheScheduler.iCurrentThread = this;
   208 		NKern::RestoreInterrupts(irq);
   209 		__NK_ASSERT_ALWAYS(TInt(ResumeThread(iWinThread)) > 0);	// check thread was previously suspended
   210 		}
   211 		break;
   212 		}
   213 	return FALSE;
   214 	}
   215 
   216 static void ThreadExit(NThread& aCurrent, NThread& aNext)
   217 //
   218 // The final context switch of a thread.
   219 // Wake up the next thread and then destroy this one's Win32 resources.
   220 //
   221 // Return without terminating if we need to immediately reschedule again because
   222 // we had to unlock the kernel but there are DFCs pending.
   223 //
   224 	{
   225 	// the thread is dead
   226 	// extract win32 handles from dying NThread object before rescheduling
   227 	HANDLE sl = aCurrent.iScheduleLock;
   228 	HANDLE th = aCurrent.iWinThread;
   229 
   230 	// wake up the next thread
   231 	if (aNext.WakeUp())
   232 		return;			// need to re-reschedule in this thread
   233 
   234 	// we are now a vanilla win32 thread, nKern no longer knows about us
   235 	// release resources and exit cleanly
   236 	CloseHandle(sl);
   237 	CloseHandle(th);
   238 	ExitThread(0);		// does not return
   239 	}
   240 
   241 #ifdef MONITOR_THREAD_CPU_TIME
   242 static inline void UpdateThreadCpuTime(NThread& aCurrent, NThread& aNext)
   243 	{	
   244 	TUint32 timestamp = NKern::FastCounter();
   245 	if (aCurrent.iLastStartTime)
   246 		aCurrent.iTotalCpuTime += timestamp - aCurrent.iLastStartTime;
   247 	aNext.iLastStartTime = timestamp;
   248 	}
   249 #else
   250 static inline void UpdateThreadCpuTime(NThread& /*aCurrent*/, NThread& /*aNext*/)
   251 	{	
   252 	}
   253 #endif
   254 
   255 static void SwitchThreads(NThread& aCurrent, NThread& aNext)
   256 //
   257 // The fundamental context switch - wake up the next thread and wait for reschedule
   258 // trivially is aNext.WakeUp(), Wait(aCurrent.iScheduleLock), but we may be able to
   259 // optimise the signal-and-wait
   260 //
   261 	{
   262 	UpdateThreadCpuTime(aCurrent, aNext);
   263 	if (aCurrent.iNState == NThread::EDead)
   264 		ThreadExit(aCurrent, aNext);
   265 	else if (Win32AtomicSOAW && aNext.iWakeup==NThread::ERelease)
   266 		{
   267 		// special case optimization for normally blocked threads using atomic Win32 primitive
   268 		TheScheduler.iCurrentThread = &aNext;
   269 		DWORD result=SignalObjectAndWait(aNext.iScheduleLock,aCurrent.iScheduleLock, INFINITE, FALSE);
   270 		if (result != WAIT_OBJECT_0)
   271 			{
   272 			__NK_ASSERT_ALWAYS(result == 0xFFFFFFFF);
   273 			KPrintf("SignalObjectAndWait() failed with %d (%T->%T)",GetLastError(),&aCurrent,&aNext);
   274 			FAULT();
   275 			}
   276 		}
   277 	else
   278 		{
   279 		if (aNext.WakeUp())
   280 			return;			// need to re-reschedule in this thread
   281 		__NK_ASSERT_ALWAYS(WaitForSingleObject(aCurrent.iScheduleLock, INFINITE) == WAIT_OBJECT_0);
   282 		}
   283 	}
   284 
   285 void TScheduler::YieldTo(NThreadBase*)
   286 //
   287 // Directed context switch to the nominated thread.
   288 // Enter with kernel locked, exit with kernel unlocked but interrupts disabled.
   289 //
   290 	{
   291 	RescheduleNeeded();
   292 	TScheduler::Reschedule();
   293 	}
   294 
   295 void TScheduler::Reschedule()
   296 //
   297 // Enter with kernel locked, exit with kernel unlocked, interrupts disabled.
   298 // If the thread is dead do not return, but terminate the thread.
   299 //
   300 	{
   301 	__NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 1);
   302 	NThread& me = *static_cast<NThread*>(TheScheduler.iCurrentThread);
   303 	for (;;)
   304 		{
   305 		NKern::DisableAllInterrupts();
   306 		if (TheScheduler.iDfcPendingFlag)
   307 			TheScheduler.QueueDfcs();
   308 		if (!TheScheduler.iRescheduleNeededFlag)
   309 			break;
   310 		NKern::EnableAllInterrupts();
   311 		TheScheduler.iRescheduleNeededFlag = FALSE;
   312 		NThread* t = static_cast<NThread*>(SelectThread(TheScheduler));
   313 		__KTRACE_OPT(KSCHED,DEBUGPRINT("Reschedule->%T (%08x%08x)",t,TheScheduler.iPresent[1],TheScheduler.iPresent[0]));
   314 #ifdef __EMI_SUPPORT__
   315 		EMI_AddTaskSwitchEvent(&me,t);
   316 		EMI_CheckDfcTag(t);
   317 #endif
   318 #ifdef BTRACE_CPU_USAGE
   319 		if(TheScheduler.iCpuUsageFilter)
   320 			TheScheduler.iBTraceHandler(BTRACE_HEADER_C(4,BTrace::ECpuUsage,BTrace::ENewThreadContext),0,(TUint32)t,0,0,0,0,0);
   321 #endif
   322 		SwitchThreads(me, *t);
   323 
   324 		// we have just been scheduled to run... check for diversion/new Dfcs
   325 		NThread::TDivert divert = me.iDivert;
   326 		if (divert)
   327 			{
   328 			// diversion (e.g. force exit)
   329 			me.iDivert = NULL;
   330 			divert();						// does not return
   331 			}
   332 		}
   333 	if (TheScheduler.iProcessHandler)
   334 		(*ProcessHandler(TheScheduler.iProcessHandler))(me.iAddressSpace);
   335 	// interrrupts are disabled, the kernel is still locked
   336 	TheScheduler.iKernCSLocked = 0;
   337 	}
   338 
   339 /**	Put the emulator into 'idle'.
   340 	This is called by the idle thread when there is nothing else to do.
   341 
   342 	@internalTechnology
   343  */
   344 EXPORT_C void NThread::Idle()
   345 //
   346 // Rather than spin, we go to sleep on the schedule lock. Preemption detects
   347 // this state (Win32Idling) and pokes the event rather than diverting the thread.
   348 //
   349 // enter and exit with kernel locked
   350 //
   351 	{
   352 	NThread& me = *static_cast<NThread*>(TheScheduler.iCurrentThread);
   353 	me.iWakeup = EIdle;
   354 	__NK_ASSERT_ALWAYS(WaitForSingleObject(me.iScheduleLock, INFINITE) == WAIT_OBJECT_0);
   355 	// something happened, and we've been prodded by an interrupt
   356 	// the kernel was locked by the interrupt, and now reschedule
   357 	me.iWakeup = ERelease;
   358 	TScheduler::Reschedule();
   359 	NKern::EnableAllInterrupts();
   360 	}
   361 
   362 void SchedulerInit(NThread& aInit)
   363 //
   364 // Initialise the win32 nKern scheduler
   365 //
   366 	{
   367 	DWORD procaffin,sysaffin;
   368 	if (GetProcessAffinityMask(GetCurrentProcess(),&procaffin,&sysaffin))
   369 		{
   370 		DWORD cpu;
   371 		switch (Win32SingleCpu)
   372 			{
   373 		default:
   374 			// bind the emulator to a nominated CPU on the host PC
   375 			cpu = (1<<Win32SingleCpu);
   376 			if (!(sysaffin & cpu))
   377 				cpu = procaffin;	// CPU selection invalid
   378 			break;
   379 		case NThread::ECpuSingle:
   380 			// bind the emulator to a single CPU on the host PC, pick one
   381 			cpu = procaffin ^ (procaffin & (procaffin-1));
   382 			break;
   383 		case NThread::ECpuAll:
   384 			// run the emulator on all CPUs on the host PC
   385 			cpu=sysaffin;
   386 			break;
   387 			}
   388 		SetProcessAffinityMask(GetCurrentProcess(), cpu);
   389 		}
   390 	// identify if we can use the atomic SignalObjectAndWait API in Win32 for rescheduling
   391 	Win32AtomicSOAW = (SignalObjectAndWait(aInit.iScheduleLock, aInit.iScheduleLock, INFINITE, FALSE) == WAIT_OBJECT_0);
   392 	//
   393 	// allocate the TLS used for thread identification, and set it for the init thread
   394 	TlsIndex = TlsAlloc();
   395 	__NK_ASSERT_ALWAYS(TlsIndex != TLS_OUT_OF_INDEXES);
   396 	SchedulerRegister(aInit);
   397 	//
   398 	Interrupt.Init();
   399 
   400 	Win32FindNonPreemptibleFunctions();
   401 	}
   402 
   403 void SchedulerRegister(NThread& aSelf)
   404 	{
   405 	TlsSetValue(TlsIndex,&aSelf);
   406 	}
   407 
   408 NThread* SchedulerThread()
   409 	{
   410 	if (TlsIndex != TLS_OUT_OF_INDEXES)
   411 		return static_cast<NThread*>(TlsGetValue(TlsIndex));
   412 	else
   413 		return NULL;  // not yet initialised
   414 	}
   415 
   416 inline TBool IsScheduledThread()
   417 	{
   418 	return SchedulerThread() == TheScheduler.iCurrentThread;
   419 	}
   420 	
   421 NThread& CheckedCurrentThread()
   422 	{
   423 	NThread* t = SchedulerThread();
   424 	__NK_ASSERT_ALWAYS(t == TheScheduler.iCurrentThread);
   425 	return *t;
   426 	}
   427 
   428 
   429 /**	Disable normal 'interrupts'.
   430 
   431 	@param	aLevel Ignored
   432 	@return	Cookie to be passed into RestoreInterrupts()
   433  */
   434 EXPORT_C TInt NKern::DisableInterrupts(TInt /*aLevel*/)
   435 	{
   436 	return Interrupt.Mask();
   437 	}
   438 
   439 
   440 /**	Disable all maskable 'interrupts'.
   441 
   442 	@return	Cookie to be passed into RestoreInterrupts()
   443  */
   444 EXPORT_C TInt NKern::DisableAllInterrupts()
   445 	{
   446 	return Interrupt.Mask();
   447 	}
   448 
   449 
   450 /**	Enable all maskable 'interrupts'
   451 
   452 	@internalComponent
   453  */
   454 EXPORT_C void NKern::EnableAllInterrupts()
   455 	{
   456 	Interrupt.Restore(0);
   457 	}
   458 
   459 
   460 /** Restore interrupt mask to state preceding a DisableInterrupts() call
   461 
   462 	@param	aLevel Cookie returned by Disable(All)Interrupts()
   463  */
   464 EXPORT_C void NKern::RestoreInterrupts(TInt aLevel)
   465 	{
   466 	Interrupt.Restore(aLevel);
   467 	}
   468 
   469 
   470 /**	Unlocks the kernel.
   471 
   472 	Decrements iKernCSLocked; if it becomes zero and IDFCs or a reschedule are
   473 	pending, calls the scheduler to process them.
   474 
   475     @pre    Call either in a thread or an IDFC context.
   476     @pre    Do not call from an ISR.
   477 	@pre	Do not call from bare Win32 threads.
   478  */
   479 EXPORT_C void NKern::Unlock()
   480 //
   481 // using this coding sequence it is possible to call Reschedule unnecessarily
   482 // if we are preempted after testing the flags (lock is zero at this point).
   483 // However, in the common case this is much faster because 'disabling interrupts'
   484 // can be very expensive.
   485 //
   486 	{
   487 	CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::Unlock");	
   488 	__ASSERT_WITH_MESSAGE_DEBUG(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::Unlock");	// check that we are a scheduled thread
   489 	__NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked > 0);	// Can't unlock if it isn't locked!
   490 	if (--TheScheduler.iKernCSLocked == 0)
   491 		{
   492 		if (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag)
   493 			{
   494 			TheScheduler.iKernCSLocked = 1;
   495 			TScheduler::Reschedule();
   496 			NKern::EnableAllInterrupts();
   497 			}
   498 		}
   499 	}
   500 
   501 
   502 /**	Locks the kernel.
   503 
   504 	Increments iKernCSLocked, thereby deferring IDFCs and preemption.
   505 
   506     @pre    Call either in a thread or an IDFC context.
   507     @pre    Do not call from an ISR.
   508 	@pre	Do not call from bare Win32 threads.
   509  */
   510 EXPORT_C void NKern::Lock()
   511 	{
   512 	CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::Lock");		
   513 	__ASSERT_WITH_MESSAGE_ALWAYS(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::Lock");	// check that we are a scheduled thread
   514 	++TheScheduler.iKernCSLocked;
   515 	}
   516 
   517 
   518 /**	Locks the kernel and returns a pointer to the current thread
   519 	Increments iKernCSLocked, thereby deferring IDFCs and preemption.
   520 
   521     @pre    Call either in a thread or an IDFC context.
   522     @pre    Do not call from an ISR.
   523 	@pre	Do not call from bare Win32 threads.
   524  */
   525 EXPORT_C NThread* NKern::LockC()
   526 	{
   527 	CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::Lock");		
   528 	__ASSERT_WITH_MESSAGE_ALWAYS(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::Lock");	// check that we are a scheduled thread
   529 	++TheScheduler.iKernCSLocked;
   530 	return (NThread*)TheScheduler.iCurrentThread;
   531 	}
   532 
   533 
   534 /**	Allows IDFCs and rescheduling if they are pending.
   535 
   536 	If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1
   537 	calls the scheduler to process the IDFCs and possibly reschedule.
   538 
   539 	@return	Nonzero if a reschedule actually occurred, zero if not.
   540 	
   541     @pre    Call either in a thread or an IDFC context.
   542     @pre    Do not call from an ISR.
   543 	@pre	Do not call from bare Win32 threads.
   544  */
   545 EXPORT_C TInt NKern::PreemptionPoint()
   546 	{
   547 	CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::PreemptionPoint");		
   548 	__ASSERT_WITH_MESSAGE_DEBUG(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::PreemptionPoint");	// check that we are a scheduled thread
   549 	if (TheScheduler.iKernCSLocked == 1 && 
   550 		(TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag))
   551 		{
   552 		TScheduler::Reschedule();
   553 		TheScheduler.iKernCSLocked = 1;
   554 		NKern::EnableAllInterrupts();
   555 		return TRUE;
   556 		}
   557 	return FALSE;
   558 	}
   559 
   560 
   561 /**	Mark the start of an 'interrupt' in the Win32 emulator.
   562 	This must be called in interrupt threads before using any other kernel APIs,
   563 	and should be paired with a call to EndOfInterrupt().
   564 
   565 	@pre	Win32 'interrupt' thread context
   566  */
   567 EXPORT_C void StartOfInterrupt()
   568 	{
   569 	__ASSERT_WITH_MESSAGE_DEBUG(!IsScheduledThread(),"Win32 'interrupt' thread context","StartOfInterrupt");	// check that we are a scheduled thread
   570 	Interrupt.Begin();
   571 	}
   572 
   573 
   574 /**	Mark the end of an 'interrupt' in the Win32 emulator.
   575 	This checks to see if we need to reschedule.
   576 
   577 	@pre	Win32 'interrupt' thread context
   578  */
   579 EXPORT_C void EndOfInterrupt()
   580 	{
   581 	__ASSERT_WITH_MESSAGE_DEBUG(!IsScheduledThread(),"Win32 'interrupt' thread context","EndOfInterrupt");	// check that we are a scheduled thread
   582 	Interrupt.End();
   583 	}
   584 
   585 
   586 void Win32Interrupt::Init()
   587 	{
   588 	iQ=CreateSemaphoreA(NULL, 0, KMaxTInt, NULL);
   589 	__NK_ASSERT_ALWAYS(iQ);
   590 	//
   591 	// create the NThread which exists solely to service reschedules for interrupts
   592 	// this makes the End() much simpler as it merely needs to kick this thread
   593 	SNThreadCreateInfo ni;
   594 	memclr(&ni, sizeof(ni));
   595 	ni.iFunction=&Reschedule;
   596 	ni.iTimeslice=-1;
   597 	ni.iPriority=1;
   598 	NKern::ThreadCreate(&iScheduler, ni);
   599 	NKern::Lock();
   600 	TScheduler::YieldTo(&iScheduler);
   601 	Restore(0);
   602 	}
   603 
   604 TInt Win32Interrupt::Mask()
   605 	{
   606 	if (!iQ)
   607 		return 0;				// interrupt scheme not enabled yet
   608 	DWORD id=GetCurrentThreadId();
   609 	if (__e32_atomic_add_ord32(&iLock, 1))
   610 		{
   611 		if (id==iOwner)
   612 			return iLevel++;
   613 		__NK_ASSERT_ALWAYS(WaitForSingleObject(iQ,INFINITE) == WAIT_OBJECT_0);
   614 		iRescheduleOnExit=IsScheduledThread() &&
   615 				(TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag);
   616 		}
   617 	else
   618 		iRescheduleOnExit=FALSE;
   619 	__NK_ASSERT_ALWAYS(iOwner==0 && iLevel==0);
   620 	iOwner=id;
   621 	iLevel=1;
   622 	return 0;
   623 	}
   624 
   625 void Win32Interrupt::Restore(TInt aLevel)
   626 	{
   627 	if (!iQ)
   628 		return;				// interrupt scheme not enabled yet
   629 	DWORD id=GetCurrentThreadId();
   630 	for (;;)
   631 		{
   632 		__NK_ASSERT_ALWAYS(id == iOwner);
   633 		TInt count = iLevel - aLevel;
   634 		if (count <= 0)
   635 			return;						// alredy restored to that level
   636 		TBool reschedule = FALSE;
   637 		iLevel = aLevel;		// update this value before releasing the lock
   638 		if (aLevel == 0)
   639 			{
   640 			// we release the lock
   641 			iOwner = 0;
   642 			if (iRescheduleOnExit && TheScheduler.iKernCSLocked == 0)
   643 				reschedule = TRUE;		// need to trigger reschedule on full release
   644 			}
   645 		// now release the lock
   646 		if (__e32_atomic_add_ord32(&iLock, TUint32(-count)) == (TUint32)count)
   647 			{	// fully released, check for reschedule
   648 			if (!reschedule)
   649 				return;
   650 			}
   651 		else
   652 			{	// not fully released
   653 			if (aLevel == 0)
   654 				__NK_ASSERT_ALWAYS(ReleaseSemaphore(iQ,1,NULL));
   655 			return;
   656 			}
   657 		// unlocked everything but a reschedule may be required
   658 		TheScheduler.iKernCSLocked = 1;
   659 		TScheduler::Reschedule();
   660 		// return with the kernel unlocked, but interrupts disabled
   661 		// instead of going recursive with a call to EnableAllInterrupts() we iterate
   662 		aLevel=0;
   663 		}
   664 	}
   665 
   666 void Win32Interrupt::Begin()
   667 	{
   668 	Mask();
   669 	__NK_ASSERT_ALWAYS(iInterrupted==0);	// check we haven't done this already
   670 	__NK_ASSERT_ALWAYS(!IsScheduledThread());	// check that we aren't a scheduled thread
   671 	NThread* pC;
   672 	for (;;)
   673 		{
   674 		pC=static_cast<NThread*>(TheScheduler.iCurrentThread);
   675 		DWORD r=SuspendThread(pC->iWinThread);
   676 		if (pC == TheScheduler.iCurrentThread)
   677 			{
   678 			// there was no race while suspending the thread, so we can carry on
   679 			__NK_ASSERT_ALWAYS(r != 0xffffffff);
   680 			break;
   681 			}
   682 		// We suspended the thread while doing a context switch, resume it and try again
   683 		if (r != 0xffffffff)
   684 			__NK_ASSERT_ALWAYS(TInt(ResumeThread(pC->iWinThread)) > 0);	// check thread was previously suspended
   685 		}
   686 #ifdef BTRACE_CPU_USAGE
   687 	BTrace0(BTrace::ECpuUsage,BTrace::EIrqStart);
   688 #endif
   689 	iInterrupted = pC;
   690 	}
   691 
   692 void Win32Interrupt::End()
   693 	{
   694 	__NK_ASSERT_ALWAYS(iOwner == GetCurrentThreadId());	// check we are the interrupting thread
   695 	NThread* pC = iInterrupted;
   696 	__NK_ASSERT_ALWAYS(pC==TheScheduler.iCurrentThread);
   697 	iInterrupted = 0;
   698 	if (iLock == 1 && TheScheduler.iKernCSLocked == 0 &&
   699 		(TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag) &&
   700 		pC->IsSafeToPreempt())
   701 		{
   702 		TheScheduler.iKernCSLocked = 1;		// prevent further pre-emption
   703 		if (pC->iWakeup == NThread::EIdle)
   704 			{
   705 			// wake up the NULL thread, it will always reschedule immediately
   706 			pC->WakeUp();
   707 			}
   708 		else
   709 			{
   710 			// pre-empt the current thread and poke the 'scheduler' thread
   711 			__NK_ASSERT_ALWAYS(pC->iWakeup == NThread::ERelease);
   712 			pC->iWakeup = NThread::EResume;
   713 			UpdateThreadCpuTime(*pC, iScheduler);
   714 			RescheduleNeeded();
   715 			NKern::EnableAllInterrupts();
   716 			iScheduler.WakeUp();
   717 			return;
   718 			}
   719 		}
   720 	else
   721 		{
   722 		// no thread reschedle, so emit trace...
   723 #ifdef BTRACE_CPU_USAGE
   724 		BTrace0(BTrace::ECpuUsage,BTrace::EIrqEnd);
   725 #endif
   726 		}
   727 
   728 	if (((NThread*)pC)->iInKernel == 0 &&		// thread is running in user mode
   729 		pC->iUserModeCallbacks != NULL && 		// and has callbacks queued
   730 		TheScheduler.iKernCSLocked == 0 &&		// and is not currently processing a diversion
   731 		pC->IsSafeToPreempt())					// and can be safely prempted at this point
   732 		{
   733 		TheScheduler.iKernCSLocked = 1;
   734 		pC->ApplyDiversion();
   735 		}
   736 	NKern::EnableAllInterrupts();
   737 	__NK_ASSERT_ALWAYS(TInt(ResumeThread(pC->iWinThread)) > 0);	// check thread was previously suspended
   738 	}
   739 
   740 void Win32Interrupt::Reschedule(TAny*)
   741 //
   742 // The entry-point for the interrupt-rescheduler thread.
   743 //
   744 // This spends its whole life going around the TScheduler::Reschedule() loop
   745 // selecting another thread to run.
   746 //
   747 	{
   748 	TheScheduler.iKernCSLocked = 1;
   749 	RescheduleNeeded();
   750 	TScheduler::Reschedule();
   751 	FAULT();
   752 	}
   753 
   754 void Win32Interrupt::ForceReschedule()
   755 	{
   756 	RescheduleNeeded();
   757 	iScheduler.WakeUp();
   758 	}
   759 
   760 void SchedulerEscape()
   761 	{
   762 	NThread& me=CheckedCurrentThread();
   763 	EnterKernel();
   764 	__NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked==0);	// Can't call Escape() with the Emulator/kernel already locked
   765 	NKern::ThreadEnterCS();
   766 	NKern::Lock();
   767 	me.iNState=NThreadBase::EBlocked;
   768 	TheScheduler.Remove(&me);
   769 	me.iWakeup=NThread::EEscaped;
   770 	SetThreadPriority(me.iWinThread,THREAD_PRIORITY_ABOVE_NORMAL);
   771 	Interrupt.ForceReschedule();	// schedules some other thread so we can carry on outside the scheduler domain
   772 	// this will change the value of iCurrentThread to ensure the 'escaped' invariants are set
   773 	}
   774 
   775 void ReenterDfc(TAny* aPtr)
   776 	{
   777 	NThread& me = *static_cast<NThread*>(aPtr);
   778 	me.iWakeup = NThread::ERelease;
   779 	me.CheckSuspendThenReady();
   780 	}
   781 
   782 void SchedulerReenter()
   783 	{
   784 	NThread* me=SchedulerThread();
   785 	__NK_ASSERT_ALWAYS(me);
   786 	__NK_ASSERT_ALWAYS(me->iWakeup == NThread::EEscaped);
   787 	TDfc idfc(&ReenterDfc, me);
   788 	StartOfInterrupt();
   789 	idfc.Add();
   790 	EndOfInterrupt();
   791 	SetThreadPriority(me->iWinThread,THREAD_PRIORITY_NORMAL);
   792 	__NK_ASSERT_ALWAYS(WaitForSingleObject(me->iScheduleLock, INFINITE) == WAIT_OBJECT_0);
   793 	// when released, the kernel is locked and handed over to us
   794 	// need to complete the reschedule protocol in this thread now
   795 	TScheduler::Reschedule();
   796 	NKern::EnableAllInterrupts();
   797 	NKern::ThreadLeaveCS();
   798 	LeaveKernel();
   799 	}
   800 
   801 
   802 /**	Return the current processor context type
   803 	(thread, IDFC, interrupt or escaped thread)
   804 
   805 	@return	A value from NKern::TContext enumeration (including EEscaped)
   806 	@pre	Any context
   807 
   808 	@see	NKern::TContext
   809  */
   810 EXPORT_C TInt NKern::CurrentContext()
   811 	{
   812 	NThread* t = SchedulerThread();
   813 	if (!t)
   814 		return NKern::EInterrupt;
   815 	if (TheScheduler.iInIDFC)
   816 		return NKern::EIDFC;
   817 	if (t->iWakeup == NThread::EEscaped)
   818 		return NKern::EEscaped;
   819 	__NK_ASSERT_ALWAYS(NKern::Crashed() || t == TheScheduler.iCurrentThread);
   820 	return NKern::EThread;
   821 	}
   822 
   823 //
   824 // We use SuspendThread and ResumeThread to preempt threads.  This can cause
   825 // deadlock if the thread is using windows synchronisation primitives (eg
   826 // critical sections).  This isn't too much of a problem most of the time,
   827 // because threads generally use the symbian environment rather than the native
   828 // windows APIs.  However exceptions are an issue - they can happen at any time,
   829 // and cause execution of native windows code over which we have no control.
   830 //
   831 // To work around this we examine the call stack to see if the thread is inside
   832 // one of the windows exception handling functions.  If so, preemption is
   833 // deferred.
   834 //
   835 
   836 #include <winnt.h>
   837 
   838 const TInt KWin32NonPreemptibleFunctionCount = 2;
   839 
   840 struct TWin32FunctionInfo
   841 	{
   842 	TUint iStartAddr;
   843 	TUint iLength;
   844 	};
   845 
   846 static TWin32FunctionInfo Win32NonPreemptibleFunctions[KWin32NonPreemptibleFunctionCount];
   847 
   848 TWin32FunctionInfo Win32FindExportedFunction(const char* aModuleName, const char* aFunctionName)
   849 	{
   850 	HMODULE library = GetModuleHandleA(aModuleName);
   851 	__NK_ASSERT_ALWAYS(library != NULL);
   852 
   853 	// Find the start address of the function
   854 	TUint start = (TUint)GetProcAddress(library, aFunctionName);
   855 	__NK_ASSERT_ALWAYS(start);
   856 
   857 	// Now have to check all other exports to find the end of the function
   858 	TUint end = 0xffffffff;
   859 	TInt i = 1;
   860 	for (;;)
   861 		{
   862 		TUint addr = (TUint)GetProcAddress(library, MAKEINTRESOURCEA(i));
   863 		if (!addr)
   864 			break;
   865 		if (addr > start && addr < end)
   866 			end = addr;
   867 		++i;
   868 		}
   869 	__NK_ASSERT_ALWAYS(end != 0xffffffff);
   870 
   871 	TWin32FunctionInfo result = { start, end - start };
   872 	return result;
   873 	}
   874 
   875 void Win32FindNonPreemptibleFunctions()
   876 	{
   877 	Win32NonPreemptibleFunctions[0] = Win32FindExportedFunction("kernel32.dll", "RaiseException");
   878 	Win32NonPreemptibleFunctions[1] = Win32FindExportedFunction("ntdll.dll", "KiUserExceptionDispatcher");
   879 	}
   880 	
   881 TBool Win32IsThreadInNonPreemptibleFunction(HANDLE aWinThread, TLinAddr aStackTop)
   882 	{
   883 	const TInt KMaxSearchDepth = 16;		 // 12 max observed while handling exceptions
   884 	const TInt KMaxStackSize = 1024 * 1024;  // Default reserved stack size on windows
   885 	const TInt KMaxFrameSize = 4096;
   886 
   887 	CONTEXT c;
   888  	c.ContextFlags=CONTEXT_FULL;
   889 	GetThreadContext(aWinThread, &c);
   890 
   891 	TUint eip = c.Eip;
   892 	TUint ebp = c.Ebp;
   893 	TUint lastEbp = c.Esp;
   894 
   895 	// Walk the call stack
   896 	for (TInt i = 0 ; i < KMaxSearchDepth ; ++i)
   897 		{
   898 		for (TInt j = 0 ; j < KWin32NonPreemptibleFunctionCount ; ++j)
   899 			{
   900 			const TWin32FunctionInfo& info = Win32NonPreemptibleFunctions[j];
   901 			if (TUint(eip - info.iStartAddr) < info.iLength)
   902 				{
   903 				__KTRACE_OPT(KSCHED, DEBUGPRINT("Thread is in non-preemptible function %d at frame %d: eip == %08x", j, i, eip));
   904 				return TRUE;
   905 				}
   906 			}
   907 		
   908 		// Check frame pointer is valid before dereferencing it
   909 		if (TUint(aStackTop - ebp) > KMaxStackSize || TUint(ebp - lastEbp) > KMaxFrameSize || ebp & 3)
   910 			break;
   911 
   912 		TUint* frame = (TUint*)ebp;
   913 		lastEbp = ebp;
   914 		ebp = frame[0];
   915 		eip = frame[1];
   916 		}
   917 	
   918 	return FALSE;
   919 	}
   920 
   921 TBool NThread::IsSafeToPreempt()
   922 	{
   923 	return !Win32IsThreadInNonPreemptibleFunction(iWinThread, iUserStackBase);
   924 	}
   925 
   926 void LeaveKernel()
   927 	{
   928 	TInt& k=CheckedCurrentThread().iInKernel;
   929 	__NK_ASSERT_DEBUG(k>0);
   930 	if (k==1)  // just about to leave kernel
   931 		{
   932 		NThread& t = CheckedCurrentThread();
   933 		__NK_ASSERT_ALWAYS(t.iCsCount==0);
   934 		__NK_ASSERT_ALWAYS(t.iHeldFastMutex==0);
   935 		__NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked==0);
   936 		NKern::DisableAllInterrupts();
   937 		t.CallUserModeCallbacks();
   938 		NKern::EnableAllInterrupts();
   939 		}
   940 	--k;
   941 	}
   942