Update contrib.
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\nkern\win32\ncsched.cpp
18 // NThreadBase member data
19 #define __INCLUDE_NTHREADBASE_DEFINES__
22 #include <e32cmn_private.h>
25 #ifdef __EMI_SUPPORT__
26 extern void EMI_AddTaskSwitchEvent(TAny* aPrevious, TAny* aNext);
27 extern void EMI_CheckDfcTag(TAny* aNext);
29 typedef void (*ProcessHandler)(TAny* aAddressSpace);
31 static DWORD TlsIndex = TLS_OUT_OF_INDEXES;
33 static NThreadBase* SelectThread(TScheduler& aS)
35 // Select the next thread to run.
36 // This is the heart of the rescheduling algorithm.
39 NThreadBase* t = static_cast<NThreadBase*>(aS.First());
42 if (t->iHeldFastMutex)
44 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Resched init->%T, Holding %M",t,t->iHeldFastMutex));
48 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Resched init->%T",t));
51 if (t->iTime == 0 && !t->Alone())
54 // get here if thread's timeslice has expired and there is another
55 // thread ready at the same priority
56 if (t->iHeldFastMutex)
58 // round-robin deferred due to fast mutex held
59 t->iHeldFastMutex->iWaiting = 1;
62 t->iTime = t->iTimeslice; // reset old thread time slice
63 t = static_cast<NThreadBase*>(t->iNext); // next thread
64 aS.iQueue[t->iPriority] = t; // make it first in list
65 __KTRACE_OPT(KSCHED2,DEBUGPRINT("RoundRobin->%T",t));
67 if (t->iHeldFastMutex)
69 if (t->iHeldFastMutex == &aS.iLock)
71 // thread holds system lock: use it
74 if ((t->i_ThrdAttr & KThreadAttImplicitSystemLock) != 0 && aS.iLock.iHoldingThread)
75 t->iHeldFastMutex->iWaiting = 1;
76 __NK_ASSERT_DEBUG((t->i_ThrdAttr & KThreadAttAddressSpace) == 0);
78 Check for an address space change. Not implemented for Win32, but useful as
79 documentaiton of the algorithm.
81 if ((t->i_ThrdAttr & KThreadAttAddressSpace) != 0 && t->iAddressSpace != aS.iAddressSpace)
82 t->iHeldFastMutex->iWaiting = 1;
85 else if (t->iWaitFastMutex && t->iWaitFastMutex->iHoldingThread)
87 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Resched inter->%T, Blocked on %M",t->iWaitFastMutex->iHoldingThread,t->iWaitFastMutex));
88 t = t->iWaitFastMutex->iHoldingThread;
90 else if (t->i_ThrdAttr & KThreadAttImplicitSystemLock)
92 // implicit system lock required
93 if (aS.iLock.iHoldingThread)
95 // system lock held, switch to that thread
96 t = aS.iLock.iHoldingThread;
97 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Resched inter->%T (IMP SYS)",t));
98 t->iHeldFastMutex->iWaiting = 1; // aS.iLock.iWaiting = 1;
101 __NK_ASSERT_DEBUG((t->i_ThrdAttr & KThreadAttAddressSpace) == 0);
103 Check for an address space change. Not implemented for Win32, but useful as
104 documentaiton of the algorithm.
106 if ((t->i_ThrdAttr & KThreadAttAddressSpace) != 0 || t->iAddressSpace != aS.iAddressSpace)
108 // what do we do now?
109 __NK_ASSERT_DEBUG(FALSE);
119 TBool NThread::WakeUp()
121 // Wake up the thread. What to do depends on whether we were preempted or voluntarily
124 // Return TRUE if we need to immediately reschedule again because we had to unlock
125 // the kernel but there are DFCs pending. In this case, the thread does not wake up.
127 // NB. kernel is locked
135 __NK_ASSERT_ALWAYS(TheScheduler.iCurrentThread == this);
136 __NK_ASSERT_ALWAYS(SetEvent(iScheduleLock));
139 TheScheduler.iCurrentThread = this;
140 __NK_ASSERT_ALWAYS(SetEvent(iScheduleLock));
143 // The thread is Win32 suspended and must be resumed.
145 // A newly created thread does not need the kernel unlocked so we can
146 // just resume the suspended thread
148 __KTRACE_OPT(KSCHED,DEBUGPRINT("Win32Resume->%T",this));
150 TheScheduler.iCurrentThread = this;
151 if (TheScheduler.iProcessHandler)
152 (*ProcessHandler(TheScheduler.iProcessHandler))(iAddressSpace); // new thread will need to have its static data updated
153 __NK_ASSERT_ALWAYS(TInt(ResumeThread(iWinThread)) > 0); // check thread was previously suspended
155 case EResumeDiverted:
156 // The thread is Win32 suspended and must be resumed.
158 // The thread needs to be diverted, and does not need the kernel
161 // It's safe the divert the thread here because we called
162 // IsSafeToPreempt() when we suspended it - otherwise the diversion
165 __KTRACE_OPT(KSCHED,DEBUGPRINT("Win32Resume->%T (Resuming diverted thread)",this));
168 TheScheduler.iCurrentThread = this;
169 __NK_ASSERT_ALWAYS(TInt(ResumeThread(iWinThread)) == 1);
172 // The thread is Win32 suspended and must be resumed.
174 // the complication here is that we have to unlock the kernel on behalf of the
175 // pre-empted thread. This means that we have to check to see if there are more DFCs
176 // pending or a reschedule required, as we unlock the kernel. That check is
177 // carried out with interrupts disabled.
179 // If so, we go back around the loop in this thread context
181 // Otherwise, we unlock the kernel (having marked us as not-preempted),
182 // enable interrupts and then resume the thread. If pre-emption occurs before the thread
183 // is resumed, it is the new thread that is pre-empted, not the running thread, so we are guaranteed
184 // to be able to call ResumeThread. If pre-emption occurs, and we are rescheduled to run before
185 // that occurs, we will once again be running with the kernel locked and the other thread will
186 // have been re-suspended by Win32: so all is well.
189 __KTRACE_OPT(KSCHED,DEBUGPRINT("Win32Resume->%T",this));
190 TInt irq = NKern::DisableAllInterrupts();
191 if (TheScheduler.iDfcPendingFlag || TheScheduler.iRescheduleNeededFlag)
193 // we were interrrupted... back to the top
194 TheScheduler.iRescheduleNeededFlag = TRUE; // ensure we do the reschedule
198 TheScheduler.iCurrentThread = this;
199 if (TheScheduler.iProcessHandler)
200 (*ProcessHandler(TheScheduler.iProcessHandler))(iAddressSpace); // threads resumed after interrupt or locks need to have static data updated
202 if (iInKernel == 0 && iUserModeCallbacks != NULL)
205 TheScheduler.iKernCSLocked = 0; // have to unlock the kernel on behalf of the new thread
207 TheScheduler.iCurrentThread = this;
208 NKern::RestoreInterrupts(irq);
209 __NK_ASSERT_ALWAYS(TInt(ResumeThread(iWinThread)) > 0); // check thread was previously suspended
216 static void ThreadExit(NThread& aCurrent, NThread& aNext)
218 // The final context switch of a thread.
219 // Wake up the next thread and then destroy this one's Win32 resources.
221 // Return without terminating if we need to immediately reschedule again because
222 // we had to unlock the kernel but there are DFCs pending.
225 // the thread is dead
226 // extract win32 handles from dying NThread object before rescheduling
227 HANDLE sl = aCurrent.iScheduleLock;
228 HANDLE th = aCurrent.iWinThread;
230 // wake up the next thread
232 return; // need to re-reschedule in this thread
234 // we are now a vanilla win32 thread, nKern no longer knows about us
235 // release resources and exit cleanly
238 ExitThread(0); // does not return
241 #ifdef MONITOR_THREAD_CPU_TIME
242 static inline void UpdateThreadCpuTime(NThread& aCurrent, NThread& aNext)
244 TUint32 timestamp = NKern::FastCounter();
245 if (aCurrent.iLastStartTime)
246 aCurrent.iTotalCpuTime += timestamp - aCurrent.iLastStartTime;
247 aNext.iLastStartTime = timestamp;
250 static inline void UpdateThreadCpuTime(NThread& /*aCurrent*/, NThread& /*aNext*/)
255 static void SwitchThreads(NThread& aCurrent, NThread& aNext)
257 // The fundamental context switch - wake up the next thread and wait for reschedule
258 // trivially is aNext.WakeUp(), Wait(aCurrent.iScheduleLock), but we may be able to
259 // optimise the signal-and-wait
262 UpdateThreadCpuTime(aCurrent, aNext);
263 if (aCurrent.iNState == NThread::EDead)
264 ThreadExit(aCurrent, aNext);
265 else if (Win32AtomicSOAW && aNext.iWakeup==NThread::ERelease)
267 // special case optimization for normally blocked threads using atomic Win32 primitive
268 TheScheduler.iCurrentThread = &aNext;
269 DWORD result=SignalObjectAndWait(aNext.iScheduleLock,aCurrent.iScheduleLock, INFINITE, FALSE);
270 if (result != WAIT_OBJECT_0)
272 __NK_ASSERT_ALWAYS(result == 0xFFFFFFFF);
273 KPrintf("SignalObjectAndWait() failed with %d (%T->%T)",GetLastError(),&aCurrent,&aNext);
280 return; // need to re-reschedule in this thread
281 __NK_ASSERT_ALWAYS(WaitForSingleObject(aCurrent.iScheduleLock, INFINITE) == WAIT_OBJECT_0);
285 void TScheduler::YieldTo(NThreadBase*)
287 // Directed context switch to the nominated thread.
288 // Enter with kernel locked, exit with kernel unlocked but interrupts disabled.
292 TScheduler::Reschedule();
295 void TScheduler::Reschedule()
297 // Enter with kernel locked, exit with kernel unlocked, interrupts disabled.
298 // If the thread is dead do not return, but terminate the thread.
301 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 1);
302 NThread& me = *static_cast<NThread*>(TheScheduler.iCurrentThread);
305 NKern::DisableAllInterrupts();
306 if (TheScheduler.iDfcPendingFlag)
307 TheScheduler.QueueDfcs();
308 if (!TheScheduler.iRescheduleNeededFlag)
310 NKern::EnableAllInterrupts();
311 TheScheduler.iRescheduleNeededFlag = FALSE;
312 NThread* t = static_cast<NThread*>(SelectThread(TheScheduler));
313 __KTRACE_OPT(KSCHED,DEBUGPRINT("Reschedule->%T (%08x%08x)",t,TheScheduler.iPresent[1],TheScheduler.iPresent[0]));
314 #ifdef __EMI_SUPPORT__
315 EMI_AddTaskSwitchEvent(&me,t);
318 #ifdef BTRACE_CPU_USAGE
319 if(TheScheduler.iCpuUsageFilter)
320 TheScheduler.iBTraceHandler(BTRACE_HEADER_C(4,BTrace::ECpuUsage,BTrace::ENewThreadContext),0,(TUint32)t,0,0,0,0,0);
322 SwitchThreads(me, *t);
324 // we have just been scheduled to run... check for diversion/new Dfcs
325 NThread::TDivert divert = me.iDivert;
328 // diversion (e.g. force exit)
330 divert(); // does not return
333 if (TheScheduler.iProcessHandler)
334 (*ProcessHandler(TheScheduler.iProcessHandler))(me.iAddressSpace);
335 // interrrupts are disabled, the kernel is still locked
336 TheScheduler.iKernCSLocked = 0;
339 /** Put the emulator into 'idle'.
340 This is called by the idle thread when there is nothing else to do.
344 EXPORT_C void NThread::Idle()
346 // Rather than spin, we go to sleep on the schedule lock. Preemption detects
347 // this state (Win32Idling) and pokes the event rather than diverting the thread.
349 // enter and exit with kernel locked
352 NThread& me = *static_cast<NThread*>(TheScheduler.iCurrentThread);
354 __NK_ASSERT_ALWAYS(WaitForSingleObject(me.iScheduleLock, INFINITE) == WAIT_OBJECT_0);
355 // something happened, and we've been prodded by an interrupt
356 // the kernel was locked by the interrupt, and now reschedule
357 me.iWakeup = ERelease;
358 TScheduler::Reschedule();
359 NKern::EnableAllInterrupts();
362 void SchedulerInit(NThread& aInit)
364 // Initialise the win32 nKern scheduler
367 DWORD procaffin,sysaffin;
368 if (GetProcessAffinityMask(GetCurrentProcess(),&procaffin,&sysaffin))
371 switch (Win32SingleCpu)
374 // bind the emulator to a nominated CPU on the host PC
375 cpu = (1<<Win32SingleCpu);
376 if (!(sysaffin & cpu))
377 cpu = procaffin; // CPU selection invalid
379 case NThread::ECpuSingle:
380 // bind the emulator to a single CPU on the host PC, pick one
381 cpu = procaffin ^ (procaffin & (procaffin-1));
383 case NThread::ECpuAll:
384 // run the emulator on all CPUs on the host PC
388 SetProcessAffinityMask(GetCurrentProcess(), cpu);
390 // identify if we can use the atomic SignalObjectAndWait API in Win32 for rescheduling
391 Win32AtomicSOAW = (SignalObjectAndWait(aInit.iScheduleLock, aInit.iScheduleLock, INFINITE, FALSE) == WAIT_OBJECT_0);
393 // allocate the TLS used for thread identification, and set it for the init thread
394 TlsIndex = TlsAlloc();
395 __NK_ASSERT_ALWAYS(TlsIndex != TLS_OUT_OF_INDEXES);
396 SchedulerRegister(aInit);
400 Win32FindNonPreemptibleFunctions();
403 void SchedulerRegister(NThread& aSelf)
405 TlsSetValue(TlsIndex,&aSelf);
408 NThread* SchedulerThread()
410 if (TlsIndex != TLS_OUT_OF_INDEXES)
411 return static_cast<NThread*>(TlsGetValue(TlsIndex));
413 return NULL; // not yet initialised
416 inline TBool IsScheduledThread()
418 return SchedulerThread() == TheScheduler.iCurrentThread;
421 NThread& CheckedCurrentThread()
423 NThread* t = SchedulerThread();
424 __NK_ASSERT_ALWAYS(t == TheScheduler.iCurrentThread);
429 /** Disable normal 'interrupts'.
431 @param aLevel Ignored
432 @return Cookie to be passed into RestoreInterrupts()
434 EXPORT_C TInt NKern::DisableInterrupts(TInt /*aLevel*/)
436 return Interrupt.Mask();
440 /** Disable all maskable 'interrupts'.
442 @return Cookie to be passed into RestoreInterrupts()
444 EXPORT_C TInt NKern::DisableAllInterrupts()
446 return Interrupt.Mask();
450 /** Enable all maskable 'interrupts'
454 EXPORT_C void NKern::EnableAllInterrupts()
456 Interrupt.Restore(0);
460 /** Restore interrupt mask to state preceding a DisableInterrupts() call
462 @param aLevel Cookie returned by Disable(All)Interrupts()
464 EXPORT_C void NKern::RestoreInterrupts(TInt aLevel)
466 Interrupt.Restore(aLevel);
470 /** Unlocks the kernel.
472 Decrements iKernCSLocked; if it becomes zero and IDFCs or a reschedule are
473 pending, calls the scheduler to process them.
475 @pre Call either in a thread or an IDFC context.
476 @pre Do not call from an ISR.
477 @pre Do not call from bare Win32 threads.
479 EXPORT_C void NKern::Unlock()
481 // using this coding sequence it is possible to call Reschedule unnecessarily
482 // if we are preempted after testing the flags (lock is zero at this point).
483 // However, in the common case this is much faster because 'disabling interrupts'
484 // can be very expensive.
487 CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::Unlock");
488 __ASSERT_WITH_MESSAGE_DEBUG(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::Unlock"); // check that we are a scheduled thread
489 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked > 0); // Can't unlock if it isn't locked!
490 if (--TheScheduler.iKernCSLocked == 0)
492 if (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag)
494 TheScheduler.iKernCSLocked = 1;
495 TScheduler::Reschedule();
496 NKern::EnableAllInterrupts();
502 /** Locks the kernel.
504 Increments iKernCSLocked, thereby deferring IDFCs and preemption.
506 @pre Call either in a thread or an IDFC context.
507 @pre Do not call from an ISR.
508 @pre Do not call from bare Win32 threads.
510 EXPORT_C void NKern::Lock()
512 CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::Lock");
513 __ASSERT_WITH_MESSAGE_ALWAYS(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::Lock"); // check that we are a scheduled thread
514 ++TheScheduler.iKernCSLocked;
518 /** Locks the kernel and returns a pointer to the current thread
519 Increments iKernCSLocked, thereby deferring IDFCs and preemption.
521 @pre Call either in a thread or an IDFC context.
522 @pre Do not call from an ISR.
523 @pre Do not call from bare Win32 threads.
525 EXPORT_C NThread* NKern::LockC()
527 CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::Lock");
528 __ASSERT_WITH_MESSAGE_ALWAYS(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::Lock"); // check that we are a scheduled thread
529 ++TheScheduler.iKernCSLocked;
530 return (NThread*)TheScheduler.iCurrentThread;
534 /** Allows IDFCs and rescheduling if they are pending.
536 If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1
537 calls the scheduler to process the IDFCs and possibly reschedule.
539 @return Nonzero if a reschedule actually occurred, zero if not.
541 @pre Call either in a thread or an IDFC context.
542 @pre Do not call from an ISR.
543 @pre Do not call from bare Win32 threads.
545 EXPORT_C TInt NKern::PreemptionPoint()
547 CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::PreemptionPoint");
548 __ASSERT_WITH_MESSAGE_DEBUG(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::PreemptionPoint"); // check that we are a scheduled thread
549 if (TheScheduler.iKernCSLocked == 1 &&
550 (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag))
552 TScheduler::Reschedule();
553 TheScheduler.iKernCSLocked = 1;
554 NKern::EnableAllInterrupts();
561 /** Mark the start of an 'interrupt' in the Win32 emulator.
562 This must be called in interrupt threads before using any other kernel APIs,
563 and should be paired with a call to EndOfInterrupt().
565 @pre Win32 'interrupt' thread context
567 EXPORT_C void StartOfInterrupt()
569 __ASSERT_WITH_MESSAGE_DEBUG(!IsScheduledThread(),"Win32 'interrupt' thread context","StartOfInterrupt"); // check that we are a scheduled thread
574 /** Mark the end of an 'interrupt' in the Win32 emulator.
575 This checks to see if we need to reschedule.
577 @pre Win32 'interrupt' thread context
579 EXPORT_C void EndOfInterrupt()
581 __ASSERT_WITH_MESSAGE_DEBUG(!IsScheduledThread(),"Win32 'interrupt' thread context","EndOfInterrupt"); // check that we are a scheduled thread
586 void Win32Interrupt::Init()
588 iQ=CreateSemaphoreA(NULL, 0, KMaxTInt, NULL);
589 __NK_ASSERT_ALWAYS(iQ);
591 // create the NThread which exists solely to service reschedules for interrupts
592 // this makes the End() much simpler as it merely needs to kick this thread
593 SNThreadCreateInfo ni;
594 memclr(&ni, sizeof(ni));
595 ni.iFunction=&Reschedule;
598 NKern::ThreadCreate(&iScheduler, ni);
600 TScheduler::YieldTo(&iScheduler);
604 TInt Win32Interrupt::Mask()
607 return 0; // interrupt scheme not enabled yet
608 DWORD id=GetCurrentThreadId();
609 if (__e32_atomic_add_ord32(&iLock, 1))
613 __NK_ASSERT_ALWAYS(WaitForSingleObject(iQ,INFINITE) == WAIT_OBJECT_0);
614 iRescheduleOnExit=IsScheduledThread() &&
615 (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag);
618 iRescheduleOnExit=FALSE;
619 __NK_ASSERT_ALWAYS(iOwner==0 && iLevel==0);
625 void Win32Interrupt::Restore(TInt aLevel)
628 return; // interrupt scheme not enabled yet
629 DWORD id=GetCurrentThreadId();
632 __NK_ASSERT_ALWAYS(id == iOwner);
633 TInt count = iLevel - aLevel;
635 return; // alredy restored to that level
636 TBool reschedule = FALSE;
637 iLevel = aLevel; // update this value before releasing the lock
640 // we release the lock
642 if (iRescheduleOnExit && TheScheduler.iKernCSLocked == 0)
643 reschedule = TRUE; // need to trigger reschedule on full release
645 // now release the lock
646 if (__e32_atomic_add_ord32(&iLock, TUint32(-count)) == (TUint32)count)
647 { // fully released, check for reschedule
652 { // not fully released
654 __NK_ASSERT_ALWAYS(ReleaseSemaphore(iQ,1,NULL));
657 // unlocked everything but a reschedule may be required
658 TheScheduler.iKernCSLocked = 1;
659 TScheduler::Reschedule();
660 // return with the kernel unlocked, but interrupts disabled
661 // instead of going recursive with a call to EnableAllInterrupts() we iterate
666 void Win32Interrupt::Begin()
669 __NK_ASSERT_ALWAYS(iInterrupted==0); // check we haven't done this already
670 __NK_ASSERT_ALWAYS(!IsScheduledThread()); // check that we aren't a scheduled thread
674 pC=static_cast<NThread*>(TheScheduler.iCurrentThread);
675 DWORD r=SuspendThread(pC->iWinThread);
676 if (pC == TheScheduler.iCurrentThread)
678 // there was no race while suspending the thread, so we can carry on
679 __NK_ASSERT_ALWAYS(r != 0xffffffff);
682 // We suspended the thread while doing a context switch, resume it and try again
684 __NK_ASSERT_ALWAYS(TInt(ResumeThread(pC->iWinThread)) > 0); // check thread was previously suspended
686 #ifdef BTRACE_CPU_USAGE
687 BTrace0(BTrace::ECpuUsage,BTrace::EIrqStart);
692 void Win32Interrupt::End()
694 __NK_ASSERT_ALWAYS(iOwner == GetCurrentThreadId()); // check we are the interrupting thread
695 NThread* pC = iInterrupted;
696 __NK_ASSERT_ALWAYS(pC==TheScheduler.iCurrentThread);
698 if (iLock == 1 && TheScheduler.iKernCSLocked == 0 &&
699 (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag) &&
700 pC->IsSafeToPreempt())
702 TheScheduler.iKernCSLocked = 1; // prevent further pre-emption
703 if (pC->iWakeup == NThread::EIdle)
705 // wake up the NULL thread, it will always reschedule immediately
710 // pre-empt the current thread and poke the 'scheduler' thread
711 __NK_ASSERT_ALWAYS(pC->iWakeup == NThread::ERelease);
712 pC->iWakeup = NThread::EResume;
713 UpdateThreadCpuTime(*pC, iScheduler);
715 NKern::EnableAllInterrupts();
722 // no thread reschedle, so emit trace...
723 #ifdef BTRACE_CPU_USAGE
724 BTrace0(BTrace::ECpuUsage,BTrace::EIrqEnd);
728 if (((NThread*)pC)->iInKernel == 0 && // thread is running in user mode
729 pC->iUserModeCallbacks != NULL && // and has callbacks queued
730 TheScheduler.iKernCSLocked == 0 && // and is not currently processing a diversion
731 pC->IsSafeToPreempt()) // and can be safely prempted at this point
733 TheScheduler.iKernCSLocked = 1;
734 pC->ApplyDiversion();
736 NKern::EnableAllInterrupts();
737 __NK_ASSERT_ALWAYS(TInt(ResumeThread(pC->iWinThread)) > 0); // check thread was previously suspended
740 void Win32Interrupt::Reschedule(TAny*)
742 // The entry-point for the interrupt-rescheduler thread.
744 // This spends its whole life going around the TScheduler::Reschedule() loop
745 // selecting another thread to run.
748 TheScheduler.iKernCSLocked = 1;
750 TScheduler::Reschedule();
754 void Win32Interrupt::ForceReschedule()
760 void SchedulerEscape()
762 NThread& me=CheckedCurrentThread();
764 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked==0); // Can't call Escape() with the Emulator/kernel already locked
765 NKern::ThreadEnterCS();
767 me.iNState=NThreadBase::EBlocked;
768 TheScheduler.Remove(&me);
769 me.iWakeup=NThread::EEscaped;
770 SetThreadPriority(me.iWinThread,THREAD_PRIORITY_ABOVE_NORMAL);
771 Interrupt.ForceReschedule(); // schedules some other thread so we can carry on outside the scheduler domain
772 // this will change the value of iCurrentThread to ensure the 'escaped' invariants are set
775 void ReenterDfc(TAny* aPtr)
777 NThread& me = *static_cast<NThread*>(aPtr);
778 me.iWakeup = NThread::ERelease;
779 me.CheckSuspendThenReady();
782 void SchedulerReenter()
784 NThread* me=SchedulerThread();
785 __NK_ASSERT_ALWAYS(me);
786 __NK_ASSERT_ALWAYS(me->iWakeup == NThread::EEscaped);
787 TDfc idfc(&ReenterDfc, me);
791 SetThreadPriority(me->iWinThread,THREAD_PRIORITY_NORMAL);
792 __NK_ASSERT_ALWAYS(WaitForSingleObject(me->iScheduleLock, INFINITE) == WAIT_OBJECT_0);
793 // when released, the kernel is locked and handed over to us
794 // need to complete the reschedule protocol in this thread now
795 TScheduler::Reschedule();
796 NKern::EnableAllInterrupts();
797 NKern::ThreadLeaveCS();
802 /** Return the current processor context type
803 (thread, IDFC, interrupt or escaped thread)
805 @return A value from NKern::TContext enumeration (including EEscaped)
810 EXPORT_C TInt NKern::CurrentContext()
812 NThread* t = SchedulerThread();
814 return NKern::EInterrupt;
815 if (TheScheduler.iInIDFC)
817 if (t->iWakeup == NThread::EEscaped)
818 return NKern::EEscaped;
819 __NK_ASSERT_ALWAYS(NKern::Crashed() || t == TheScheduler.iCurrentThread);
820 return NKern::EThread;
824 // We use SuspendThread and ResumeThread to preempt threads. This can cause
825 // deadlock if the thread is using windows synchronisation primitives (eg
826 // critical sections). This isn't too much of a problem most of the time,
827 // because threads generally use the symbian environment rather than the native
828 // windows APIs. However exceptions are an issue - they can happen at any time,
829 // and cause execution of native windows code over which we have no control.
831 // To work around this we examine the call stack to see if the thread is inside
832 // one of the windows exception handling functions. If so, preemption is
838 const TInt KWin32NonPreemptibleFunctionCount = 2;
840 struct TWin32FunctionInfo
846 static TWin32FunctionInfo Win32NonPreemptibleFunctions[KWin32NonPreemptibleFunctionCount];
848 TWin32FunctionInfo Win32FindExportedFunction(const char* aModuleName, const char* aFunctionName)
850 HMODULE library = GetModuleHandleA(aModuleName);
851 __NK_ASSERT_ALWAYS(library != NULL);
853 // Find the start address of the function
854 TUint start = (TUint)GetProcAddress(library, aFunctionName);
855 __NK_ASSERT_ALWAYS(start);
857 // Now have to check all other exports to find the end of the function
858 TUint end = 0xffffffff;
862 TUint addr = (TUint)GetProcAddress(library, MAKEINTRESOURCEA(i));
865 if (addr > start && addr < end)
869 __NK_ASSERT_ALWAYS(end != 0xffffffff);
871 TWin32FunctionInfo result = { start, end - start };
875 void Win32FindNonPreemptibleFunctions()
877 Win32NonPreemptibleFunctions[0] = Win32FindExportedFunction("kernel32.dll", "RaiseException");
878 Win32NonPreemptibleFunctions[1] = Win32FindExportedFunction("ntdll.dll", "KiUserExceptionDispatcher");
881 TBool Win32IsThreadInNonPreemptibleFunction(HANDLE aWinThread, TLinAddr aStackTop)
883 const TInt KMaxSearchDepth = 16; // 12 max observed while handling exceptions
884 const TInt KMaxStackSize = 1024 * 1024; // Default reserved stack size on windows
885 const TInt KMaxFrameSize = 4096;
888 c.ContextFlags=CONTEXT_FULL;
889 GetThreadContext(aWinThread, &c);
893 TUint lastEbp = c.Esp;
895 // Walk the call stack
896 for (TInt i = 0 ; i < KMaxSearchDepth ; ++i)
898 for (TInt j = 0 ; j < KWin32NonPreemptibleFunctionCount ; ++j)
900 const TWin32FunctionInfo& info = Win32NonPreemptibleFunctions[j];
901 if (TUint(eip - info.iStartAddr) < info.iLength)
903 __KTRACE_OPT(KSCHED, DEBUGPRINT("Thread is in non-preemptible function %d at frame %d: eip == %08x", j, i, eip));
908 // Check frame pointer is valid before dereferencing it
909 if (TUint(aStackTop - ebp) > KMaxStackSize || TUint(ebp - lastEbp) > KMaxFrameSize || ebp & 3)
912 TUint* frame = (TUint*)ebp;
921 TBool NThread::IsSafeToPreempt()
923 return !Win32IsThreadInNonPreemptibleFunction(iWinThread, iUserStackBase);
928 TInt& k=CheckedCurrentThread().iInKernel;
929 __NK_ASSERT_DEBUG(k>0);
930 if (k==1) // just about to leave kernel
932 NThread& t = CheckedCurrentThread();
933 __NK_ASSERT_ALWAYS(t.iCsCount==0);
934 __NK_ASSERT_ALWAYS(t.iHeldFastMutex==0);
935 __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked==0);
936 NKern::DisableAllInterrupts();
937 t.CallUserModeCallbacks();
938 NKern::EnableAllInterrupts();