1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/nkern/win32/ncsched.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,942 @@
1.4 +// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\nkern\win32\ncsched.cpp
1.18 +//
1.19 +//
1.20 +
1.21 +// NThreadBase member data
1.22 +#define __INCLUDE_NTHREADBASE_DEFINES__
1.23 +
1.24 +#include <e32cmn.h>
1.25 +#include <e32cmn_private.h>
1.26 +#include "nk_priv.h"
1.27 +
1.28 +#ifdef __EMI_SUPPORT__
1.29 +extern void EMI_AddTaskSwitchEvent(TAny* aPrevious, TAny* aNext);
1.30 +extern void EMI_CheckDfcTag(TAny* aNext);
1.31 +#endif
1.32 +typedef void (*ProcessHandler)(TAny* aAddressSpace);
1.33 +
1.34 +static DWORD TlsIndex = TLS_OUT_OF_INDEXES;
1.35 +
1.36 +static NThreadBase* SelectThread(TScheduler& aS)
1.37 +//
1.38 +// Select the next thread to run.
1.39 +// This is the heart of the rescheduling algorithm.
1.40 +//
1.41 + {
1.42 + NThreadBase* t = static_cast<NThreadBase*>(aS.First());
1.43 + __NK_ASSERT_DEBUG(t);
1.44 +#ifdef _DEBUG
1.45 + if (t->iHeldFastMutex)
1.46 + {
1.47 + __KTRACE_OPT(KSCHED2,DEBUGPRINT("Resched init->%T, Holding %M",t,t->iHeldFastMutex));
1.48 + }
1.49 + else
1.50 + {
1.51 + __KTRACE_OPT(KSCHED2,DEBUGPRINT("Resched init->%T",t));
1.52 + }
1.53 +#endif
1.54 + if (t->iTime == 0 && !t->Alone())
1.55 + {
1.56 + // round robin
1.57 + // get here if thread's timeslice has expired and there is another
1.58 + // thread ready at the same priority
1.59 + if (t->iHeldFastMutex)
1.60 + {
1.61 + // round-robin deferred due to fast mutex held
1.62 + t->iHeldFastMutex->iWaiting = 1;
1.63 + return t;
1.64 + }
1.65 + t->iTime = t->iTimeslice; // reset old thread time slice
1.66 + t = static_cast<NThreadBase*>(t->iNext); // next thread
1.67 + aS.iQueue[t->iPriority] = t; // make it first in list
1.68 + __KTRACE_OPT(KSCHED2,DEBUGPRINT("RoundRobin->%T",t));
1.69 + }
1.70 + if (t->iHeldFastMutex)
1.71 + {
1.72 + if (t->iHeldFastMutex == &aS.iLock)
1.73 + {
1.74 + // thread holds system lock: use it
1.75 + return t;
1.76 + }
1.77 + if ((t->i_ThrdAttr & KThreadAttImplicitSystemLock) != 0 && aS.iLock.iHoldingThread)
1.78 + t->iHeldFastMutex->iWaiting = 1;
1.79 + __NK_ASSERT_DEBUG((t->i_ThrdAttr & KThreadAttAddressSpace) == 0);
1.80 +/*
1.81 + Check for an address space change. Not implemented for Win32, but useful as
1.82 + documentaiton of the algorithm.
1.83 +
1.84 + if ((t->i_ThrdAttr & KThreadAttAddressSpace) != 0 && t->iAddressSpace != aS.iAddressSpace)
1.85 + t->iHeldFastMutex->iWaiting = 1;
1.86 +*/
1.87 + }
1.88 + else if (t->iWaitFastMutex && t->iWaitFastMutex->iHoldingThread)
1.89 + {
1.90 + __KTRACE_OPT(KSCHED2,DEBUGPRINT("Resched inter->%T, Blocked on %M",t->iWaitFastMutex->iHoldingThread,t->iWaitFastMutex));
1.91 + t = t->iWaitFastMutex->iHoldingThread;
1.92 + }
1.93 + else if (t->i_ThrdAttr & KThreadAttImplicitSystemLock)
1.94 + {
1.95 + // implicit system lock required
1.96 + if (aS.iLock.iHoldingThread)
1.97 + {
1.98 + // system lock held, switch to that thread
1.99 + t = aS.iLock.iHoldingThread;
1.100 + __KTRACE_OPT(KSCHED2,DEBUGPRINT("Resched inter->%T (IMP SYS)",t));
1.101 + t->iHeldFastMutex->iWaiting = 1; // aS.iLock.iWaiting = 1;
1.102 + return t;
1.103 + }
1.104 + __NK_ASSERT_DEBUG((t->i_ThrdAttr & KThreadAttAddressSpace) == 0);
1.105 +/*
1.106 + Check for an address space change. Not implemented for Win32, but useful as
1.107 + documentaiton of the algorithm.
1.108 +
1.109 + if ((t->i_ThrdAttr & KThreadAttAddressSpace) != 0 || t->iAddressSpace != aS.iAddressSpace)
1.110 + {
1.111 + // what do we do now?
1.112 + __NK_ASSERT_DEBUG(FALSE);
1.113 + }
1.114 +*/
1.115 + }
1.116 + return t;
1.117 + }
1.118 +
1.119 +// from NThread
1.120 +#undef i_ThrdAttr
1.121 +
1.122 +TBool NThread::WakeUp()
1.123 +//
1.124 +// Wake up the thread. What to do depends on whether we were preempted or voluntarily
1.125 +// rescheduled.
1.126 +//
1.127 +// Return TRUE if we need to immediately reschedule again because we had to unlock
1.128 +// the kernel but there are DFCs pending. In this case, the thread does not wake up.
1.129 +//
1.130 +// NB. kernel is locked
1.131 +//
1.132 + {
1.133 + switch (iWakeup)
1.134 + {
1.135 + default:
1.136 + FAULT();
1.137 + case EIdle:
1.138 + __NK_ASSERT_ALWAYS(TheScheduler.iCurrentThread == this);
1.139 + __NK_ASSERT_ALWAYS(SetEvent(iScheduleLock));
1.140 + break;
1.141 + case ERelease:
1.142 + TheScheduler.iCurrentThread = this;
1.143 + __NK_ASSERT_ALWAYS(SetEvent(iScheduleLock));
1.144 + break;
1.145 + case EResumeLocked:
1.146 + // The thread is Win32 suspended and must be resumed.
1.147 + //
1.148 + // A newly created thread does not need the kernel unlocked so we can
1.149 + // just resume the suspended thread
1.150 + //
1.151 + __KTRACE_OPT(KSCHED,DEBUGPRINT("Win32Resume->%T",this));
1.152 + iWakeup = ERelease;
1.153 + TheScheduler.iCurrentThread = this;
1.154 + if (TheScheduler.iProcessHandler)
1.155 + (*ProcessHandler(TheScheduler.iProcessHandler))(iAddressSpace); // new thread will need to have its static data updated
1.156 + __NK_ASSERT_ALWAYS(TInt(ResumeThread(iWinThread)) > 0); // check thread was previously suspended
1.157 + break;
1.158 + case EResumeDiverted:
1.159 + // The thread is Win32 suspended and must be resumed.
1.160 + //
1.161 + // The thread needs to be diverted, and does not need the kernel
1.162 + // unlocked.
1.163 + //
1.164 + // It's safe the divert the thread here because we called
1.165 + // IsSafeToPreempt() when we suspended it - otherwise the diversion
1.166 + // could get lost.
1.167 + //
1.168 + __KTRACE_OPT(KSCHED,DEBUGPRINT("Win32Resume->%T (Resuming diverted thread)",this));
1.169 + iWakeup = ERelease;
1.170 + ApplyDiversion();
1.171 + TheScheduler.iCurrentThread = this;
1.172 + __NK_ASSERT_ALWAYS(TInt(ResumeThread(iWinThread)) == 1);
1.173 + break;
1.174 + case EResume:
1.175 + // The thread is Win32 suspended and must be resumed.
1.176 + //
1.177 + // the complication here is that we have to unlock the kernel on behalf of the
1.178 + // pre-empted thread. This means that we have to check to see if there are more DFCs
1.179 + // pending or a reschedule required, as we unlock the kernel. That check is
1.180 + // carried out with interrupts disabled.
1.181 + //
1.182 + // If so, we go back around the loop in this thread context
1.183 + //
1.184 + // Otherwise, we unlock the kernel (having marked us as not-preempted),
1.185 + // enable interrupts and then resume the thread. If pre-emption occurs before the thread
1.186 + // is resumed, it is the new thread that is pre-empted, not the running thread, so we are guaranteed
1.187 + // to be able to call ResumeThread. If pre-emption occurs, and we are rescheduled to run before
1.188 + // that occurs, we will once again be running with the kernel locked and the other thread will
1.189 + // have been re-suspended by Win32: so all is well.
1.190 + //
1.191 + {
1.192 + __KTRACE_OPT(KSCHED,DEBUGPRINT("Win32Resume->%T",this));
1.193 + TInt irq = NKern::DisableAllInterrupts();
1.194 + if (TheScheduler.iDfcPendingFlag || TheScheduler.iRescheduleNeededFlag)
1.195 + {
1.196 + // we were interrrupted... back to the top
1.197 + TheScheduler.iRescheduleNeededFlag = TRUE; // ensure we do the reschedule
1.198 + return TRUE;
1.199 + }
1.200 + iWakeup = ERelease;
1.201 + TheScheduler.iCurrentThread = this;
1.202 + if (TheScheduler.iProcessHandler)
1.203 + (*ProcessHandler(TheScheduler.iProcessHandler))(iAddressSpace); // threads resumed after interrupt or locks need to have static data updated
1.204 +
1.205 + if (iInKernel == 0 && iUserModeCallbacks != NULL)
1.206 + ApplyDiversion();
1.207 + else
1.208 + TheScheduler.iKernCSLocked = 0; // have to unlock the kernel on behalf of the new thread
1.209 +
1.210 + TheScheduler.iCurrentThread = this;
1.211 + NKern::RestoreInterrupts(irq);
1.212 + __NK_ASSERT_ALWAYS(TInt(ResumeThread(iWinThread)) > 0); // check thread was previously suspended
1.213 + }
1.214 + break;
1.215 + }
1.216 + return FALSE;
1.217 + }
1.218 +
1.219 +static void ThreadExit(NThread& aCurrent, NThread& aNext)
1.220 +//
1.221 +// The final context switch of a thread.
1.222 +// Wake up the next thread and then destroy this one's Win32 resources.
1.223 +//
1.224 +// Return without terminating if we need to immediately reschedule again because
1.225 +// we had to unlock the kernel but there are DFCs pending.
1.226 +//
1.227 + {
1.228 + // the thread is dead
1.229 + // extract win32 handles from dying NThread object before rescheduling
1.230 + HANDLE sl = aCurrent.iScheduleLock;
1.231 + HANDLE th = aCurrent.iWinThread;
1.232 +
1.233 + // wake up the next thread
1.234 + if (aNext.WakeUp())
1.235 + return; // need to re-reschedule in this thread
1.236 +
1.237 + // we are now a vanilla win32 thread, nKern no longer knows about us
1.238 + // release resources and exit cleanly
1.239 + CloseHandle(sl);
1.240 + CloseHandle(th);
1.241 + ExitThread(0); // does not return
1.242 + }
1.243 +
1.244 +#ifdef MONITOR_THREAD_CPU_TIME
1.245 +static inline void UpdateThreadCpuTime(NThread& aCurrent, NThread& aNext)
1.246 + {
1.247 + TUint32 timestamp = NKern::FastCounter();
1.248 + if (aCurrent.iLastStartTime)
1.249 + aCurrent.iTotalCpuTime += timestamp - aCurrent.iLastStartTime;
1.250 + aNext.iLastStartTime = timestamp;
1.251 + }
1.252 +#else
1.253 +static inline void UpdateThreadCpuTime(NThread& /*aCurrent*/, NThread& /*aNext*/)
1.254 + {
1.255 + }
1.256 +#endif
1.257 +
1.258 +static void SwitchThreads(NThread& aCurrent, NThread& aNext)
1.259 +//
1.260 +// The fundamental context switch - wake up the next thread and wait for reschedule
1.261 +// trivially is aNext.WakeUp(), Wait(aCurrent.iScheduleLock), but we may be able to
1.262 +// optimise the signal-and-wait
1.263 +//
1.264 + {
1.265 + UpdateThreadCpuTime(aCurrent, aNext);
1.266 + if (aCurrent.iNState == NThread::EDead)
1.267 + ThreadExit(aCurrent, aNext);
1.268 + else if (Win32AtomicSOAW && aNext.iWakeup==NThread::ERelease)
1.269 + {
1.270 + // special case optimization for normally blocked threads using atomic Win32 primitive
1.271 + TheScheduler.iCurrentThread = &aNext;
1.272 + DWORD result=SignalObjectAndWait(aNext.iScheduleLock,aCurrent.iScheduleLock, INFINITE, FALSE);
1.273 + if (result != WAIT_OBJECT_0)
1.274 + {
1.275 + __NK_ASSERT_ALWAYS(result == 0xFFFFFFFF);
1.276 + KPrintf("SignalObjectAndWait() failed with %d (%T->%T)",GetLastError(),&aCurrent,&aNext);
1.277 + FAULT();
1.278 + }
1.279 + }
1.280 + else
1.281 + {
1.282 + if (aNext.WakeUp())
1.283 + return; // need to re-reschedule in this thread
1.284 + __NK_ASSERT_ALWAYS(WaitForSingleObject(aCurrent.iScheduleLock, INFINITE) == WAIT_OBJECT_0);
1.285 + }
1.286 + }
1.287 +
1.288 +void TScheduler::YieldTo(NThreadBase*)
1.289 +//
1.290 +// Directed context switch to the nominated thread.
1.291 +// Enter with kernel locked, exit with kernel unlocked but interrupts disabled.
1.292 +//
1.293 + {
1.294 + RescheduleNeeded();
1.295 + TScheduler::Reschedule();
1.296 + }
1.297 +
1.298 +void TScheduler::Reschedule()
1.299 +//
1.300 +// Enter with kernel locked, exit with kernel unlocked, interrupts disabled.
1.301 +// If the thread is dead do not return, but terminate the thread.
1.302 +//
1.303 + {
1.304 + __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked == 1);
1.305 + NThread& me = *static_cast<NThread*>(TheScheduler.iCurrentThread);
1.306 + for (;;)
1.307 + {
1.308 + NKern::DisableAllInterrupts();
1.309 + if (TheScheduler.iDfcPendingFlag)
1.310 + TheScheduler.QueueDfcs();
1.311 + if (!TheScheduler.iRescheduleNeededFlag)
1.312 + break;
1.313 + NKern::EnableAllInterrupts();
1.314 + TheScheduler.iRescheduleNeededFlag = FALSE;
1.315 + NThread* t = static_cast<NThread*>(SelectThread(TheScheduler));
1.316 + __KTRACE_OPT(KSCHED,DEBUGPRINT("Reschedule->%T (%08x%08x)",t,TheScheduler.iPresent[1],TheScheduler.iPresent[0]));
1.317 +#ifdef __EMI_SUPPORT__
1.318 + EMI_AddTaskSwitchEvent(&me,t);
1.319 + EMI_CheckDfcTag(t);
1.320 +#endif
1.321 +#ifdef BTRACE_CPU_USAGE
1.322 + if(TheScheduler.iCpuUsageFilter)
1.323 + TheScheduler.iBTraceHandler(BTRACE_HEADER_C(4,BTrace::ECpuUsage,BTrace::ENewThreadContext),0,(TUint32)t,0,0,0,0,0);
1.324 +#endif
1.325 + SwitchThreads(me, *t);
1.326 +
1.327 + // we have just been scheduled to run... check for diversion/new Dfcs
1.328 + NThread::TDivert divert = me.iDivert;
1.329 + if (divert)
1.330 + {
1.331 + // diversion (e.g. force exit)
1.332 + me.iDivert = NULL;
1.333 + divert(); // does not return
1.334 + }
1.335 + }
1.336 + if (TheScheduler.iProcessHandler)
1.337 + (*ProcessHandler(TheScheduler.iProcessHandler))(me.iAddressSpace);
1.338 + // interrrupts are disabled, the kernel is still locked
1.339 + TheScheduler.iKernCSLocked = 0;
1.340 + }
1.341 +
1.342 +/** Put the emulator into 'idle'.
1.343 + This is called by the idle thread when there is nothing else to do.
1.344 +
1.345 + @internalTechnology
1.346 + */
1.347 +EXPORT_C void NThread::Idle()
1.348 +//
1.349 +// Rather than spin, we go to sleep on the schedule lock. Preemption detects
1.350 +// this state (Win32Idling) and pokes the event rather than diverting the thread.
1.351 +//
1.352 +// enter and exit with kernel locked
1.353 +//
1.354 + {
1.355 + NThread& me = *static_cast<NThread*>(TheScheduler.iCurrentThread);
1.356 + me.iWakeup = EIdle;
1.357 + __NK_ASSERT_ALWAYS(WaitForSingleObject(me.iScheduleLock, INFINITE) == WAIT_OBJECT_0);
1.358 + // something happened, and we've been prodded by an interrupt
1.359 + // the kernel was locked by the interrupt, and now reschedule
1.360 + me.iWakeup = ERelease;
1.361 + TScheduler::Reschedule();
1.362 + NKern::EnableAllInterrupts();
1.363 + }
1.364 +
1.365 +void SchedulerInit(NThread& aInit)
1.366 +//
1.367 +// Initialise the win32 nKern scheduler
1.368 +//
1.369 + {
1.370 + DWORD procaffin,sysaffin;
1.371 + if (GetProcessAffinityMask(GetCurrentProcess(),&procaffin,&sysaffin))
1.372 + {
1.373 + DWORD cpu;
1.374 + switch (Win32SingleCpu)
1.375 + {
1.376 + default:
1.377 + // bind the emulator to a nominated CPU on the host PC
1.378 + cpu = (1<<Win32SingleCpu);
1.379 + if (!(sysaffin & cpu))
1.380 + cpu = procaffin; // CPU selection invalid
1.381 + break;
1.382 + case NThread::ECpuSingle:
1.383 + // bind the emulator to a single CPU on the host PC, pick one
1.384 + cpu = procaffin ^ (procaffin & (procaffin-1));
1.385 + break;
1.386 + case NThread::ECpuAll:
1.387 + // run the emulator on all CPUs on the host PC
1.388 + cpu=sysaffin;
1.389 + break;
1.390 + }
1.391 + SetProcessAffinityMask(GetCurrentProcess(), cpu);
1.392 + }
1.393 + // identify if we can use the atomic SignalObjectAndWait API in Win32 for rescheduling
1.394 + Win32AtomicSOAW = (SignalObjectAndWait(aInit.iScheduleLock, aInit.iScheduleLock, INFINITE, FALSE) == WAIT_OBJECT_0);
1.395 + //
1.396 + // allocate the TLS used for thread identification, and set it for the init thread
1.397 + TlsIndex = TlsAlloc();
1.398 + __NK_ASSERT_ALWAYS(TlsIndex != TLS_OUT_OF_INDEXES);
1.399 + SchedulerRegister(aInit);
1.400 + //
1.401 + Interrupt.Init();
1.402 +
1.403 + Win32FindNonPreemptibleFunctions();
1.404 + }
1.405 +
1.406 +void SchedulerRegister(NThread& aSelf)
1.407 + {
1.408 + TlsSetValue(TlsIndex,&aSelf);
1.409 + }
1.410 +
1.411 +NThread* SchedulerThread()
1.412 + {
1.413 + if (TlsIndex != TLS_OUT_OF_INDEXES)
1.414 + return static_cast<NThread*>(TlsGetValue(TlsIndex));
1.415 + else
1.416 + return NULL; // not yet initialised
1.417 + }
1.418 +
1.419 +inline TBool IsScheduledThread()
1.420 + {
1.421 + return SchedulerThread() == TheScheduler.iCurrentThread;
1.422 + }
1.423 +
1.424 +NThread& CheckedCurrentThread()
1.425 + {
1.426 + NThread* t = SchedulerThread();
1.427 + __NK_ASSERT_ALWAYS(t == TheScheduler.iCurrentThread);
1.428 + return *t;
1.429 + }
1.430 +
1.431 +
1.432 +/** Disable normal 'interrupts'.
1.433 +
1.434 + @param aLevel Ignored
1.435 + @return Cookie to be passed into RestoreInterrupts()
1.436 + */
1.437 +EXPORT_C TInt NKern::DisableInterrupts(TInt /*aLevel*/)
1.438 + {
1.439 + return Interrupt.Mask();
1.440 + }
1.441 +
1.442 +
1.443 +/** Disable all maskable 'interrupts'.
1.444 +
1.445 + @return Cookie to be passed into RestoreInterrupts()
1.446 + */
1.447 +EXPORT_C TInt NKern::DisableAllInterrupts()
1.448 + {
1.449 + return Interrupt.Mask();
1.450 + }
1.451 +
1.452 +
1.453 +/** Enable all maskable 'interrupts'
1.454 +
1.455 + @internalComponent
1.456 + */
1.457 +EXPORT_C void NKern::EnableAllInterrupts()
1.458 + {
1.459 + Interrupt.Restore(0);
1.460 + }
1.461 +
1.462 +
1.463 +/** Restore interrupt mask to state preceding a DisableInterrupts() call
1.464 +
1.465 + @param aLevel Cookie returned by Disable(All)Interrupts()
1.466 + */
1.467 +EXPORT_C void NKern::RestoreInterrupts(TInt aLevel)
1.468 + {
1.469 + Interrupt.Restore(aLevel);
1.470 + }
1.471 +
1.472 +
1.473 +/** Unlocks the kernel.
1.474 +
1.475 + Decrements iKernCSLocked; if it becomes zero and IDFCs or a reschedule are
1.476 + pending, calls the scheduler to process them.
1.477 +
1.478 + @pre Call either in a thread or an IDFC context.
1.479 + @pre Do not call from an ISR.
1.480 + @pre Do not call from bare Win32 threads.
1.481 + */
1.482 +EXPORT_C void NKern::Unlock()
1.483 +//
1.484 +// using this coding sequence it is possible to call Reschedule unnecessarily
1.485 +// if we are preempted after testing the flags (lock is zero at this point).
1.486 +// However, in the common case this is much faster because 'disabling interrupts'
1.487 +// can be very expensive.
1.488 +//
1.489 + {
1.490 + CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::Unlock");
1.491 + __ASSERT_WITH_MESSAGE_DEBUG(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::Unlock"); // check that we are a scheduled thread
1.492 + __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked > 0); // Can't unlock if it isn't locked!
1.493 + if (--TheScheduler.iKernCSLocked == 0)
1.494 + {
1.495 + if (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag)
1.496 + {
1.497 + TheScheduler.iKernCSLocked = 1;
1.498 + TScheduler::Reschedule();
1.499 + NKern::EnableAllInterrupts();
1.500 + }
1.501 + }
1.502 + }
1.503 +
1.504 +
1.505 +/** Locks the kernel.
1.506 +
1.507 + Increments iKernCSLocked, thereby deferring IDFCs and preemption.
1.508 +
1.509 + @pre Call either in a thread or an IDFC context.
1.510 + @pre Do not call from an ISR.
1.511 + @pre Do not call from bare Win32 threads.
1.512 + */
1.513 +EXPORT_C void NKern::Lock()
1.514 + {
1.515 + CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::Lock");
1.516 + __ASSERT_WITH_MESSAGE_ALWAYS(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::Lock"); // check that we are a scheduled thread
1.517 + ++TheScheduler.iKernCSLocked;
1.518 + }
1.519 +
1.520 +
1.521 +/** Locks the kernel and returns a pointer to the current thread
1.522 + Increments iKernCSLocked, thereby deferring IDFCs and preemption.
1.523 +
1.524 + @pre Call either in a thread or an IDFC context.
1.525 + @pre Do not call from an ISR.
1.526 + @pre Do not call from bare Win32 threads.
1.527 + */
1.528 +EXPORT_C NThread* NKern::LockC()
1.529 + {
1.530 + CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::Lock");
1.531 + __ASSERT_WITH_MESSAGE_ALWAYS(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::Lock"); // check that we are a scheduled thread
1.532 + ++TheScheduler.iKernCSLocked;
1.533 + return (NThread*)TheScheduler.iCurrentThread;
1.534 + }
1.535 +
1.536 +
1.537 +/** Allows IDFCs and rescheduling if they are pending.
1.538 +
1.539 + If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1
1.540 + calls the scheduler to process the IDFCs and possibly reschedule.
1.541 +
1.542 + @return Nonzero if a reschedule actually occurred, zero if not.
1.543 +
1.544 + @pre Call either in a thread or an IDFC context.
1.545 + @pre Do not call from an ISR.
1.546 + @pre Do not call from bare Win32 threads.
1.547 + */
1.548 +EXPORT_C TInt NKern::PreemptionPoint()
1.549 + {
1.550 + CHECK_PRECONDITIONS(MASK_NOT_ISR,"NKern::PreemptionPoint");
1.551 + __ASSERT_WITH_MESSAGE_DEBUG(IsScheduledThread(),"Do not call from bare Win32 threads","NKern::PreemptionPoint"); // check that we are a scheduled thread
1.552 + if (TheScheduler.iKernCSLocked == 1 &&
1.553 + (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag))
1.554 + {
1.555 + TScheduler::Reschedule();
1.556 + TheScheduler.iKernCSLocked = 1;
1.557 + NKern::EnableAllInterrupts();
1.558 + return TRUE;
1.559 + }
1.560 + return FALSE;
1.561 + }
1.562 +
1.563 +
1.564 +/** Mark the start of an 'interrupt' in the Win32 emulator.
1.565 + This must be called in interrupt threads before using any other kernel APIs,
1.566 + and should be paired with a call to EndOfInterrupt().
1.567 +
1.568 + @pre Win32 'interrupt' thread context
1.569 + */
1.570 +EXPORT_C void StartOfInterrupt()
1.571 + {
1.572 + __ASSERT_WITH_MESSAGE_DEBUG(!IsScheduledThread(),"Win32 'interrupt' thread context","StartOfInterrupt"); // check that we are a scheduled thread
1.573 + Interrupt.Begin();
1.574 + }
1.575 +
1.576 +
1.577 +/** Mark the end of an 'interrupt' in the Win32 emulator.
1.578 + This checks to see if we need to reschedule.
1.579 +
1.580 + @pre Win32 'interrupt' thread context
1.581 + */
1.582 +EXPORT_C void EndOfInterrupt()
1.583 + {
1.584 + __ASSERT_WITH_MESSAGE_DEBUG(!IsScheduledThread(),"Win32 'interrupt' thread context","EndOfInterrupt"); // check that we are a scheduled thread
1.585 + Interrupt.End();
1.586 + }
1.587 +
1.588 +
1.589 +void Win32Interrupt::Init()
1.590 + {
1.591 + iQ=CreateSemaphoreA(NULL, 0, KMaxTInt, NULL);
1.592 + __NK_ASSERT_ALWAYS(iQ);
1.593 + //
1.594 + // create the NThread which exists solely to service reschedules for interrupts
1.595 + // this makes the End() much simpler as it merely needs to kick this thread
1.596 + SNThreadCreateInfo ni;
1.597 + memclr(&ni, sizeof(ni));
1.598 + ni.iFunction=&Reschedule;
1.599 + ni.iTimeslice=-1;
1.600 + ni.iPriority=1;
1.601 + NKern::ThreadCreate(&iScheduler, ni);
1.602 + NKern::Lock();
1.603 + TScheduler::YieldTo(&iScheduler);
1.604 + Restore(0);
1.605 + }
1.606 +
1.607 +TInt Win32Interrupt::Mask()
1.608 + {
1.609 + if (!iQ)
1.610 + return 0; // interrupt scheme not enabled yet
1.611 + DWORD id=GetCurrentThreadId();
1.612 + if (__e32_atomic_add_ord32(&iLock, 1))
1.613 + {
1.614 + if (id==iOwner)
1.615 + return iLevel++;
1.616 + __NK_ASSERT_ALWAYS(WaitForSingleObject(iQ,INFINITE) == WAIT_OBJECT_0);
1.617 + iRescheduleOnExit=IsScheduledThread() &&
1.618 + (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag);
1.619 + }
1.620 + else
1.621 + iRescheduleOnExit=FALSE;
1.622 + __NK_ASSERT_ALWAYS(iOwner==0 && iLevel==0);
1.623 + iOwner=id;
1.624 + iLevel=1;
1.625 + return 0;
1.626 + }
1.627 +
1.628 +void Win32Interrupt::Restore(TInt aLevel)
1.629 + {
1.630 + if (!iQ)
1.631 + return; // interrupt scheme not enabled yet
1.632 + DWORD id=GetCurrentThreadId();
1.633 + for (;;)
1.634 + {
1.635 + __NK_ASSERT_ALWAYS(id == iOwner);
1.636 + TInt count = iLevel - aLevel;
1.637 + if (count <= 0)
1.638 + return; // alredy restored to that level
1.639 + TBool reschedule = FALSE;
1.640 + iLevel = aLevel; // update this value before releasing the lock
1.641 + if (aLevel == 0)
1.642 + {
1.643 + // we release the lock
1.644 + iOwner = 0;
1.645 + if (iRescheduleOnExit && TheScheduler.iKernCSLocked == 0)
1.646 + reschedule = TRUE; // need to trigger reschedule on full release
1.647 + }
1.648 + // now release the lock
1.649 + if (__e32_atomic_add_ord32(&iLock, TUint32(-count)) == (TUint32)count)
1.650 + { // fully released, check for reschedule
1.651 + if (!reschedule)
1.652 + return;
1.653 + }
1.654 + else
1.655 + { // not fully released
1.656 + if (aLevel == 0)
1.657 + __NK_ASSERT_ALWAYS(ReleaseSemaphore(iQ,1,NULL));
1.658 + return;
1.659 + }
1.660 + // unlocked everything but a reschedule may be required
1.661 + TheScheduler.iKernCSLocked = 1;
1.662 + TScheduler::Reschedule();
1.663 + // return with the kernel unlocked, but interrupts disabled
1.664 + // instead of going recursive with a call to EnableAllInterrupts() we iterate
1.665 + aLevel=0;
1.666 + }
1.667 + }
1.668 +
1.669 +void Win32Interrupt::Begin()
1.670 + {
1.671 + Mask();
1.672 + __NK_ASSERT_ALWAYS(iInterrupted==0); // check we haven't done this already
1.673 + __NK_ASSERT_ALWAYS(!IsScheduledThread()); // check that we aren't a scheduled thread
1.674 + NThread* pC;
1.675 + for (;;)
1.676 + {
1.677 + pC=static_cast<NThread*>(TheScheduler.iCurrentThread);
1.678 + DWORD r=SuspendThread(pC->iWinThread);
1.679 + if (pC == TheScheduler.iCurrentThread)
1.680 + {
1.681 + // there was no race while suspending the thread, so we can carry on
1.682 + __NK_ASSERT_ALWAYS(r != 0xffffffff);
1.683 + break;
1.684 + }
1.685 + // We suspended the thread while doing a context switch, resume it and try again
1.686 + if (r != 0xffffffff)
1.687 + __NK_ASSERT_ALWAYS(TInt(ResumeThread(pC->iWinThread)) > 0); // check thread was previously suspended
1.688 + }
1.689 +#ifdef BTRACE_CPU_USAGE
1.690 + BTrace0(BTrace::ECpuUsage,BTrace::EIrqStart);
1.691 +#endif
1.692 + iInterrupted = pC;
1.693 + }
1.694 +
1.695 +void Win32Interrupt::End()
1.696 + {
1.697 + __NK_ASSERT_ALWAYS(iOwner == GetCurrentThreadId()); // check we are the interrupting thread
1.698 + NThread* pC = iInterrupted;
1.699 + __NK_ASSERT_ALWAYS(pC==TheScheduler.iCurrentThread);
1.700 + iInterrupted = 0;
1.701 + if (iLock == 1 && TheScheduler.iKernCSLocked == 0 &&
1.702 + (TheScheduler.iRescheduleNeededFlag || TheScheduler.iDfcPendingFlag) &&
1.703 + pC->IsSafeToPreempt())
1.704 + {
1.705 + TheScheduler.iKernCSLocked = 1; // prevent further pre-emption
1.706 + if (pC->iWakeup == NThread::EIdle)
1.707 + {
1.708 + // wake up the NULL thread, it will always reschedule immediately
1.709 + pC->WakeUp();
1.710 + }
1.711 + else
1.712 + {
1.713 + // pre-empt the current thread and poke the 'scheduler' thread
1.714 + __NK_ASSERT_ALWAYS(pC->iWakeup == NThread::ERelease);
1.715 + pC->iWakeup = NThread::EResume;
1.716 + UpdateThreadCpuTime(*pC, iScheduler);
1.717 + RescheduleNeeded();
1.718 + NKern::EnableAllInterrupts();
1.719 + iScheduler.WakeUp();
1.720 + return;
1.721 + }
1.722 + }
1.723 + else
1.724 + {
1.725 + // no thread reschedle, so emit trace...
1.726 +#ifdef BTRACE_CPU_USAGE
1.727 + BTrace0(BTrace::ECpuUsage,BTrace::EIrqEnd);
1.728 +#endif
1.729 + }
1.730 +
1.731 + if (((NThread*)pC)->iInKernel == 0 && // thread is running in user mode
1.732 + pC->iUserModeCallbacks != NULL && // and has callbacks queued
1.733 + TheScheduler.iKernCSLocked == 0 && // and is not currently processing a diversion
1.734 + pC->IsSafeToPreempt()) // and can be safely prempted at this point
1.735 + {
1.736 + TheScheduler.iKernCSLocked = 1;
1.737 + pC->ApplyDiversion();
1.738 + }
1.739 + NKern::EnableAllInterrupts();
1.740 + __NK_ASSERT_ALWAYS(TInt(ResumeThread(pC->iWinThread)) > 0); // check thread was previously suspended
1.741 + }
1.742 +
1.743 +void Win32Interrupt::Reschedule(TAny*)
1.744 +//
1.745 +// The entry-point for the interrupt-rescheduler thread.
1.746 +//
1.747 +// This spends its whole life going around the TScheduler::Reschedule() loop
1.748 +// selecting another thread to run.
1.749 +//
1.750 + {
1.751 + TheScheduler.iKernCSLocked = 1;
1.752 + RescheduleNeeded();
1.753 + TScheduler::Reschedule();
1.754 + FAULT();
1.755 + }
1.756 +
1.757 +void Win32Interrupt::ForceReschedule()
1.758 + {
1.759 + RescheduleNeeded();
1.760 + iScheduler.WakeUp();
1.761 + }
1.762 +
1.763 +void SchedulerEscape()
1.764 + {
1.765 + NThread& me=CheckedCurrentThread();
1.766 + EnterKernel();
1.767 + __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked==0); // Can't call Escape() with the Emulator/kernel already locked
1.768 + NKern::ThreadEnterCS();
1.769 + NKern::Lock();
1.770 + me.iNState=NThreadBase::EBlocked;
1.771 + TheScheduler.Remove(&me);
1.772 + me.iWakeup=NThread::EEscaped;
1.773 + SetThreadPriority(me.iWinThread,THREAD_PRIORITY_ABOVE_NORMAL);
1.774 + Interrupt.ForceReschedule(); // schedules some other thread so we can carry on outside the scheduler domain
1.775 + // this will change the value of iCurrentThread to ensure the 'escaped' invariants are set
1.776 + }
1.777 +
1.778 +void ReenterDfc(TAny* aPtr)
1.779 + {
1.780 + NThread& me = *static_cast<NThread*>(aPtr);
1.781 + me.iWakeup = NThread::ERelease;
1.782 + me.CheckSuspendThenReady();
1.783 + }
1.784 +
1.785 +void SchedulerReenter()
1.786 + {
1.787 + NThread* me=SchedulerThread();
1.788 + __NK_ASSERT_ALWAYS(me);
1.789 + __NK_ASSERT_ALWAYS(me->iWakeup == NThread::EEscaped);
1.790 + TDfc idfc(&ReenterDfc, me);
1.791 + StartOfInterrupt();
1.792 + idfc.Add();
1.793 + EndOfInterrupt();
1.794 + SetThreadPriority(me->iWinThread,THREAD_PRIORITY_NORMAL);
1.795 + __NK_ASSERT_ALWAYS(WaitForSingleObject(me->iScheduleLock, INFINITE) == WAIT_OBJECT_0);
1.796 + // when released, the kernel is locked and handed over to us
1.797 + // need to complete the reschedule protocol in this thread now
1.798 + TScheduler::Reschedule();
1.799 + NKern::EnableAllInterrupts();
1.800 + NKern::ThreadLeaveCS();
1.801 + LeaveKernel();
1.802 + }
1.803 +
1.804 +
1.805 +/** Return the current processor context type
1.806 + (thread, IDFC, interrupt or escaped thread)
1.807 +
1.808 + @return A value from NKern::TContext enumeration (including EEscaped)
1.809 + @pre Any context
1.810 +
1.811 + @see NKern::TContext
1.812 + */
1.813 +EXPORT_C TInt NKern::CurrentContext()
1.814 + {
1.815 + NThread* t = SchedulerThread();
1.816 + if (!t)
1.817 + return NKern::EInterrupt;
1.818 + if (TheScheduler.iInIDFC)
1.819 + return NKern::EIDFC;
1.820 + if (t->iWakeup == NThread::EEscaped)
1.821 + return NKern::EEscaped;
1.822 + __NK_ASSERT_ALWAYS(NKern::Crashed() || t == TheScheduler.iCurrentThread);
1.823 + return NKern::EThread;
1.824 + }
1.825 +
1.826 +//
1.827 +// We use SuspendThread and ResumeThread to preempt threads. This can cause
1.828 +// deadlock if the thread is using windows synchronisation primitives (eg
1.829 +// critical sections). This isn't too much of a problem most of the time,
1.830 +// because threads generally use the symbian environment rather than the native
1.831 +// windows APIs. However exceptions are an issue - they can happen at any time,
1.832 +// and cause execution of native windows code over which we have no control.
1.833 +//
1.834 +// To work around this we examine the call stack to see if the thread is inside
1.835 +// one of the windows exception handling functions. If so, preemption is
1.836 +// deferred.
1.837 +//
1.838 +
1.839 +#include <winnt.h>
1.840 +
1.841 +const TInt KWin32NonPreemptibleFunctionCount = 2;
1.842 +
1.843 +struct TWin32FunctionInfo
1.844 + {
1.845 + TUint iStartAddr;
1.846 + TUint iLength;
1.847 + };
1.848 +
1.849 +static TWin32FunctionInfo Win32NonPreemptibleFunctions[KWin32NonPreemptibleFunctionCount];
1.850 +
1.851 +TWin32FunctionInfo Win32FindExportedFunction(const char* aModuleName, const char* aFunctionName)
1.852 + {
1.853 + HMODULE library = GetModuleHandleA(aModuleName);
1.854 + __NK_ASSERT_ALWAYS(library != NULL);
1.855 +
1.856 + // Find the start address of the function
1.857 + TUint start = (TUint)GetProcAddress(library, aFunctionName);
1.858 + __NK_ASSERT_ALWAYS(start);
1.859 +
1.860 + // Now have to check all other exports to find the end of the function
1.861 + TUint end = 0xffffffff;
1.862 + TInt i = 1;
1.863 + for (;;)
1.864 + {
1.865 + TUint addr = (TUint)GetProcAddress(library, MAKEINTRESOURCEA(i));
1.866 + if (!addr)
1.867 + break;
1.868 + if (addr > start && addr < end)
1.869 + end = addr;
1.870 + ++i;
1.871 + }
1.872 + __NK_ASSERT_ALWAYS(end != 0xffffffff);
1.873 +
1.874 + TWin32FunctionInfo result = { start, end - start };
1.875 + return result;
1.876 + }
1.877 +
1.878 +void Win32FindNonPreemptibleFunctions()
1.879 + {
1.880 + Win32NonPreemptibleFunctions[0] = Win32FindExportedFunction("kernel32.dll", "RaiseException");
1.881 + Win32NonPreemptibleFunctions[1] = Win32FindExportedFunction("ntdll.dll", "KiUserExceptionDispatcher");
1.882 + }
1.883 +
1.884 +TBool Win32IsThreadInNonPreemptibleFunction(HANDLE aWinThread, TLinAddr aStackTop)
1.885 + {
1.886 + const TInt KMaxSearchDepth = 16; // 12 max observed while handling exceptions
1.887 + const TInt KMaxStackSize = 1024 * 1024; // Default reserved stack size on windows
1.888 + const TInt KMaxFrameSize = 4096;
1.889 +
1.890 + CONTEXT c;
1.891 + c.ContextFlags=CONTEXT_FULL;
1.892 + GetThreadContext(aWinThread, &c);
1.893 +
1.894 + TUint eip = c.Eip;
1.895 + TUint ebp = c.Ebp;
1.896 + TUint lastEbp = c.Esp;
1.897 +
1.898 + // Walk the call stack
1.899 + for (TInt i = 0 ; i < KMaxSearchDepth ; ++i)
1.900 + {
1.901 + for (TInt j = 0 ; j < KWin32NonPreemptibleFunctionCount ; ++j)
1.902 + {
1.903 + const TWin32FunctionInfo& info = Win32NonPreemptibleFunctions[j];
1.904 + if (TUint(eip - info.iStartAddr) < info.iLength)
1.905 + {
1.906 + __KTRACE_OPT(KSCHED, DEBUGPRINT("Thread is in non-preemptible function %d at frame %d: eip == %08x", j, i, eip));
1.907 + return TRUE;
1.908 + }
1.909 + }
1.910 +
1.911 + // Check frame pointer is valid before dereferencing it
1.912 + if (TUint(aStackTop - ebp) > KMaxStackSize || TUint(ebp - lastEbp) > KMaxFrameSize || ebp & 3)
1.913 + break;
1.914 +
1.915 + TUint* frame = (TUint*)ebp;
1.916 + lastEbp = ebp;
1.917 + ebp = frame[0];
1.918 + eip = frame[1];
1.919 + }
1.920 +
1.921 + return FALSE;
1.922 + }
1.923 +
1.924 +TBool NThread::IsSafeToPreempt()
1.925 + {
1.926 + return !Win32IsThreadInNonPreemptibleFunction(iWinThread, iUserStackBase);
1.927 + }
1.928 +
1.929 +void LeaveKernel()
1.930 + {
1.931 + TInt& k=CheckedCurrentThread().iInKernel;
1.932 + __NK_ASSERT_DEBUG(k>0);
1.933 + if (k==1) // just about to leave kernel
1.934 + {
1.935 + NThread& t = CheckedCurrentThread();
1.936 + __NK_ASSERT_ALWAYS(t.iCsCount==0);
1.937 + __NK_ASSERT_ALWAYS(t.iHeldFastMutex==0);
1.938 + __NK_ASSERT_ALWAYS(TheScheduler.iKernCSLocked==0);
1.939 + NKern::DisableAllInterrupts();
1.940 + t.CallUserModeCallbacks();
1.941 + NKern::EnableAllInterrupts();
1.942 + }
1.943 + --k;
1.944 + }
1.945 +