First public contribution.
1 // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\nkernsmp\sched.cpp
18 // NThreadBase member data
19 #define __INCLUDE_NTHREADBASE_DEFINES__
22 #define __INCLUDE_TDFC_DEFINES__
27 TSpinLock NEventHandler::TiedLock(TSpinLock::EOrderEventHandlerTied);
29 /******************************************************************************
31 ******************************************************************************/
33 // TScheduler resides in .bss so other fields are zero-initialised
34 TScheduler::TScheduler()
35 : iActiveCpus1(1), // only boot CPU for now
36 iActiveCpus2(1), // only boot CPU for now
37 iIdleSpinLock(TSpinLock::EOrderIdleDFCList),
38 iCpusNotIdle(1) // only boot CPU for now
41 for (i=0; i<KMaxCpus; ++i)
43 TSubScheduler* s = TheSubSchedulers + i;
46 s->iCpuNum = TUint32(i);
52 /** Return a pointer to the scheduler
53 Intended for use by the crash debugger, not for general device driver use.
55 @return Pointer to the scheduler object
58 EXPORT_C TScheduler* TScheduler::Ptr()
64 /******************************************************************************
66 ******************************************************************************/
68 // TSubScheduler resides in .bss so other fields are zero-initialised
69 TSubScheduler::TSubScheduler()
70 : TPriListBase(KNumPriorities),
71 iExIDfcLock(TSpinLock::EOrderExIDfcQ),
72 iReadyListLock(TSpinLock::EOrderReadyList),
74 iEventHandlerLock(TSpinLock::EOrderEventHandlerList)
79 /******************************************************************************
81 ******************************************************************************/
82 void NSchedulable::AcqSLock()
84 iSSpinLock.LockOnly();
85 if (iParent!=this && iParent)
89 void NSchedulable::RelSLock()
91 if (iParent!=this && iParent)
93 iSSpinLock.UnlockOnly();
96 void NSchedulable::LAcqSLock()
102 void NSchedulable::RelSLockU()
108 void NSchedulable::UnPauseT()
110 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NSchedulable::UnPauseT");
111 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nUnPauseT",this));
112 __NK_ASSERT_DEBUG(iPauseCount);
113 if (--iPauseCount || iReady || iSuspended || (iParent && ((NThread*)this)->iWaitState.ThreadIsBlocked()))
118 void NSchedulable::DeferredReadyIDfcFn(TAny* aPtr)
120 NSchedulable* a = (NSchedulable*)aPtr;
122 TUint32 evs = __e32_atomic_and_acq32(&a->iEventState, ~EDeferredReady);
123 if (evs & EDeferredReady)
133 NThreadGroup* g = (NThreadGroup*)a;
134 __KTRACE_OPT(KNKERN,DEBUGPRINT("%G nDeferredReady",g));
135 __NK_ASSERT_DEBUG(g->iPauseCount);
136 if (--g->iPauseCount && g->iNThreadList.NonEmpty())
143 TInt NSchedulable::AddTiedEvent(NEventHandler* aEvent)
145 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T AddEv %08x",this,aEvent));
146 TInt r = KErrGeneral;
147 NEventHandler::TiedLock.LockOnly();
151 else if (!aEvent->iTied)
153 aEvent->iTied = this;
154 iEvents.Add(&aEvent->iTiedLink);
158 NEventHandler::TiedLock.UnlockOnly();
162 void ipi_dummy(TGenericIPI*)
166 /** Detach and cancel any tied events attached to this thread/group
168 Call in a thread context with interrupts and preemption enabled.
169 Calling thread in critical section.
173 void NSchedulable::DetachTiedEvents()
175 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T DetTiedEv",this));
177 NEventHandler::TiedLock.LockOnly();
182 // can't destroy a group until all threads have detached from it
183 NThreadGroup* g = (NThreadGroup*)this;
184 __NK_ASSERT_ALWAYS(g->iThreadCount==0 && g->iNThreadList.IsEmpty());
187 NEventHandler::TiedLock.UnlockOnly();
189 // send IPI to all processors to synchronise
190 // after this, any tied IDFCs can only proceed to completion
191 // they can't be queued again
193 ipi.QueueAllOther(&ipi_dummy);
195 ipi.WaitCompletion();
200 NEventHandler::TiedLock.LockOnly();
202 NEventHandler* h = 0;
204 if (!iEvents.IsEmpty())
206 h = _LOFF(iEvents.First()->Deque(), NEventHandler, iTiedLink);
207 h->iTiedLink.iNext = 0;
211 if (type == NEventHandler::EEventHandlerNTimer)
213 // everything's easy for a timer since we can just cancel it here
214 NTimer* tmr = (NTimer*)h;
215 tmr->DoCancel(NTimer::ECancelDestroy);
218 else if (type == NEventHandler::EEventHandlerIDFC)
220 // can just cancel the IDFC with TiedLock held
221 // EndTiedEvent() may be delayed, but we wait for that further down
222 // iTied will have been captured before the IDFC state is reset
223 // Cancel() waits for the state to be reset
226 d->iHType = (TUint8)NEventHandler::EEventHandlerDummy;
229 NEventHandler::TiedLock.UnlockOnly();
235 case NEventHandler::EEventHandlerIrq:
237 NIrqHandler* pH = (NIrqHandler*)h;
238 // pH can't have been freed since we dequeued it but left iTied set
239 pH->Unbind(pH->iHandle, this);
242 case NEventHandler::EEventHandlerNTimer:
243 case NEventHandler::EEventHandlerIDFC:
244 case NEventHandler::EEventHandlerDummy:
245 // nothing left to do
248 __NK_ASSERT_ALWAYS(0);
253 // Wait for any remaining tied event handlers to complete
254 while (iEventState & EEventCountMask)
260 /******************************************************************************
262 ******************************************************************************/
265 /******************************************************************************
267 ******************************************************************************/
269 /** Makes a nanothread ready.
271 For use by RTOS personality layers.
273 @pre Kernel must be locked.
274 @pre Call either in a thread or an IDFC context.
275 @pre The thread being made ready must not be explicitly suspended
277 @post Kernel is locked.
279 void NSchedulable::ReadyT(TUint aMode)
281 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NSchedulable::ReadyT");
282 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nReadyT(%x)",this,aMode));
283 NThreadBase* t = (NThreadBase*)this;
286 t = (NThreadBase*)0xface0fff;
288 __NK_ASSERT_DEBUG(!iReady && (!iParent || (!t->iWaitState.iWtC.iWtStFlags && !t->iPauseCount && !t->iSuspended)));
289 TSubScheduler& ss0 = SubScheduler();
290 NSchedulable* g = this;
291 if (iParent != this && iParent)
293 NThreadGroup* tg = (NThreadGroup*)iParent;
294 iReady = EReadyGroup;
297 // extra thread added to group - change priority if necessary
298 tg->iNThreadList.Add(this);
299 TInt gp = tg->iPriority;
300 TSubScheduler& ss = TheSubSchedulers[tg->iReady & EReadyCpuMask];
301 ss.iReadyListLock.LockOnly();
302 TInt hp = ss.HighestPriority();
304 ss.ChangePriority(tg, iPriority);
305 if (iPriority>hp || (iPriority==hp && ss.iCurrentThread && ss.iCurrentThread->iTime==0))
308 RescheduleNeeded(); // reschedule on this processor
310 ss0.iReschedIPIs |= ss.iCpuMask; // will kick the other CPU when this CPU reenables preemption
312 if ((aMode & ENewTimeslice) && t->iTime==0 && (iNext!=this || ss.iQueue[iPriority]))
313 t->iTime = t->iTimeslice;
314 ss.iReadyListLock.UnlockOnly();
317 tg->iNThreadList.Add(this);
318 tg->iPriority = iPriority; // first in group
319 g = tg; // fall through to add group to subscheduler
322 if (aMode & EUnPause)
324 cpu = (g->iEventState & EThreadCpuMask)>>EThreadCpuShift;
325 if (CheckCpuAgainstAffinity(cpu, g->iCpuAffinity))
328 else if (g->iFreezeCpu)
331 if (!CheckCpuAgainstAffinity(cpu, g->iCpuAffinity))
332 g->iCpuChange = TRUE;
334 else if (!(g->iCpuAffinity & NTHREADBASE_CPU_AFFINITY_MASK))
335 cpu = g->iCpuAffinity;
336 else if ((aMode & EPreferSameCpu) && (g->iCpuAffinity & ss0.iCpuMask))
341 TScheduler& s = TheScheduler;
342 TUint32 m = g->iCpuAffinity & s.iActiveCpus1;
344 TInt lowest_p = KMaxTInt;
345 for (i=0; i<s.iNumCpus; ++i)
347 TSubScheduler& ss = *s.iSub[i];
348 if (!(m & ss.iCpuMask))
350 TInt hp = ss.HighestPriority();
359 if (cpu>=0 && g->iLastCpu!=i)
366 __NK_ASSERT_ALWAYS(cpu>=0);
367 if (g->TiedEventReadyInterlock(cpu))
369 __KTRACE_OPT(KSCHED2,DEBUGPRINT("ReadyT->CPU %dD",cpu));
371 // ((TDfc*)g->i_IDfcMem)->Add();
374 __KTRACE_OPT(KSCHED2,DEBUGPRINT("ReadyT->CPU %d",cpu));
375 TSubScheduler& ss = TheSubSchedulers[cpu];
376 ss.iReadyListLock.LockOnly();
377 TInt hp = ss.HighestPriority();
378 if (g->iPriority>hp || (g->iPriority==hp && ss.iCurrentThread && ss.iCurrentThread->iTime==0))
381 RescheduleNeeded(); // reschedule on this processor
383 ss0.iReschedIPIs |= ss.iCpuMask; // will kick the other CPU when this CPU reenables preemption
386 g->iReady = TUint8(cpu | EReadyOffset);
387 if ((aMode & ENewTimeslice) && iParent && t->iTime==0 && g->iNext!=g)
388 t->iTime = t->iTimeslice;
389 ss.iReadyListLock.UnlockOnly();
393 NThread* TSubScheduler::SelectNextThread()
395 NThread* ot = iCurrentThread;
397 TBool migrate = FALSE;
398 TBool gmigrate = FALSE;
399 TBool fmd_done = FALSE;
400 TBool fmd_res = FALSE;
403 iReadyListLock.LockOnly();
404 iRescheduleNeededFlag = FALSE;
409 ot->iNewParent->AcqSLock();
410 SaveTimesliceTimer(ot); // remember how much of current thread's timeslice remains
411 if (ot->iCsFunction==NThreadBase::ECSDivertPending && ot->iWaitState.iWtC.iWtStFlags)
413 // thread about to exit so cancel outstanding wait
414 ot->DoReleaseT(KErrDied,0);
416 if (ot->iWaitState.iWtC.iWtStFlags==0)
418 // ASSUMPTION: If iNewParent set, ot can't hold a fast mutex (assertion in JoinGroup)
419 TBool pfmd = (ot->iParent!=ot && !ot->iFastMutexDefer);
420 if (ot->iTime==0 || pfmd)
422 // ot's timeslice has expired
423 fmd_res = ot->CheckFastMutexDefer();
428 ot->iTime = 0x80000000; // mark deferred timeslice expiry
431 ot->iFastMutexDefer = 1;
432 ++ot->iParent->iFreezeCpu;
437 iReadyListLock.LockOnly();
438 iRescheduleNeededFlag = FALSE;
440 // process outstanding suspend/kill/CPU change on ot
442 __NK_ASSERT_DEBUG(!(ot->iWaitState.iWtC.iWtStFlags & NThreadWaitState::EWtStWaitActive));
443 if (ot->iWaitState.iWtC.iWtStFlags || ot->iPauseCount || ot->iSuspended)
445 // ot is no longer ready to run
446 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T WS: %02x %02x (%08x) P:%02x S:%1x", ot,
447 ot->iWaitState.iWtC.iWtStFlags, ot->iWaitState.iWtC.iWtObjType, ot->iWaitState.iWtC.iWtObj, ot->iPauseCount, ot->iSuspended));
448 TInt wtst = ot->iWaitState.DoWait();
449 if (wtst>=0 && wtst!=NThread::EWaitFastMutex)
450 ot->iTime = ot->iTimeslice;
454 ot->iParent = ot->iNewParent, ++((NThreadGroup*)ot->iParent)->iThreadCount;
455 wmb(); // must make sure iParent is updated before iNewParent is cleared
458 ot->iCpuChange = FALSE;
460 else if (ot->iNewParent)
462 __NK_ASSERT_ALWAYS(ot->iParent==ot && !ot->iHeldFastMutex && !ot->iFreezeCpu);
465 ot->iParent = ot->iNewParent;
466 ot->iCpuChange = FALSE;
467 ++((NThreadGroup*)ot->iParent)->iThreadCount;
468 wmb(); // must make sure iParent is updated before iNewParent is cleared
471 else if (ot->iParent->iCpuChange && !ot->iParent->iFreezeCpu)
473 if (!CheckCpuAgainstAffinity(iCpuNum, ot->iParent->iCpuAffinity))
478 fmd_res = ot->CheckFastMutexDefer(), fmd_done = TRUE;
481 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T A:%08x",ot,ot->iParent->iCpuAffinity));
484 ot->iCpuChange = FALSE;
489 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T GA:%08x",ot,ot->iParent->iCpuAffinity));
491 ot->iParent->iReady = 0;
493 ot->iCpuChange = FALSE;
494 ot->iParent->iCpuChange = FALSE;
499 ot->iCpuChange = FALSE;
500 ot->iParent->iCpuChange = FALSE;
504 NSchedulable* g = (NSchedulable*)First();
506 if (g && g->IsGroup())
508 t = (NThread*)((NThreadGroup*)g)->iNThreadList.First();
514 TBool rrct = (t && t->iNext!=t);
515 if (t && t->iTime==0 && (rrcg || rrct))
517 // candidate thread's timeslice has expired and there is another at the same priority
522 ((NThreadGroup*)ot->iParent)->iNThreadList.iQueue[ot->iPriority] = ot->iNext;
523 iQueue[ot->iParent->iPriority] = ot->iParent->iNext;
526 iQueue[ot->iPriority] = ot->iNext;
527 ot->iTime = ot->iTimeslice;
528 NSchedulable* g2 = (NSchedulable*)First();
530 t = (NThread*)((NThreadGroup*)g2)->iNThreadList.First();
535 // loop again since we need to lock t before round robining it
536 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T RRL",ot));
537 iRescheduleNeededFlag = TRUE;
541 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T RR",ot));
543 /* if (ot->iCpuAffinity & NTHREADBASE_CPU_AFFINITY_MASK)
549 ot->iTime = ot->iTimeslice;
552 else // loop again since we need to lock t before round robining it
554 __KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T LL",ot));
555 iRescheduleNeededFlag = TRUE;
563 ot->iParent->iCurrent = 0;
564 ot->CompleteContextSave();
568 t->iLastCpu = iCpuNum;
569 t->iParent->iLastCpu = iCpuNum;
570 t->iCurrent = TUint8(iCpuNum | NSchedulable::EReadyOffset);
571 t->iParent->iCurrent = t->iCurrent;
575 UpdateThreadTimes(ot,t); // update ot's run time and set up the timeslice timer for t
576 iReadyListLock.UnlockOnly();
578 ot->ReadyT(NThreadBase::ENewTimeslice); // new timeslice if it's queued behind another thread at same priority
580 ot->iParent->ReadyT(0); // new timeslice if it's queued behind another thread at same priority
585 // DFC to signal thread is now dead
586 if (ot->iWaitState.ThreadIsDead() && ot->iWaitState.iWtC.iKillDfc)
587 ot->iWaitState.iWtC.iKillDfc->DoEnque();
589 __KTRACE_OPT(KSCHED,DEBUGPRINT("Rschd->%T",t));
590 __NK_ASSERT_ALWAYS(!t || t->iParent); // must be a thread not a group
591 return t; // could return NULL
595 void NThreadBase::UnReadyT()
599 NThreadGroup& g = *(NThreadGroup*)iParent;
600 TPriListBase& l = g.iNThreadList;
604 TSubScheduler& ss = TheSubSchedulers[g.iReady & EReadyCpuMask];
607 // __KTRACE_OPT(KNKERN,DEBUGPRINT("%T UnReadyT (G=%G-)",this,&g));
614 // __KTRACE_OPT(KNKERN,DEBUGPRINT("%T UnReadyT (G=%G)",this,&g));
615 ss.ChangePriority(&g, l.HighestPriority());
621 // __KTRACE_OPT(KNKERN,DEBUGPRINT("%T UnReadyT",this));
622 TheSubSchedulers[iReady & EReadyCpuMask].Remove(this);
628 void NThreadBase::ChangeReadyThreadPriority()
630 TInt newp = iMutexPri>iBasePri ? iMutexPri : iBasePri;
631 TInt oldp = iPriority;
632 TSubScheduler* ss0 = &SubScheduler();
633 TSubScheduler* ss = 0;
636 ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
637 ss->iReadyListLock.LockOnly();
639 TBool resched = FALSE;
640 NSchedulable* g = iParent;
643 NThreadGroup* tg = (NThreadGroup*)g;
644 tg->iNThreadList.ChangePriority(this, newp);
647 TInt ngp = tg->iNThreadList.HighestPriority();
648 if (ngp!=tg->iPriority)
649 ss->ChangePriority(tg, ngp);
653 ss->ChangePriority(this, newp);
654 if (iCurrent) // can't be current if parent not ready
656 TInt nhp = ss->HighestPriority();
657 if (newp<oldp && (newp<nhp || (newp==nhp && iTime==0)))
662 NThreadBase* ct = ss->iCurrentThread;
663 TInt cp = ct ? ct->iPriority : -1;
664 if (newp>cp || (newp==cp && ct->iTime==0))
672 ss0->iReschedIPIs |= ss->iCpuMask; // will kick the other CPU when this CPU reenables preemption
675 ss->iReadyListLock.UnlockOnly();
679 /** Changes the priority of a nanokernel thread.
681 For use by RTOS personality layers.
682 Do not use this function directly on a Symbian OS thread.
684 The thread's unknown state handler will be invoked with function EChangePriority
685 and parameter newp if the current NState is not recognised and the new priority
686 is not equal to the original priority.
688 @param newp The new nanokernel priority (0 <= newp < KNumPriorities).
690 @pre Kernel must be locked.
691 @pre Call in a thread context.
693 @post Kernel is locked.
695 EXPORT_C void NThreadBase::SetPriority(TInt newp)
697 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::SetPriority");
699 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetPri %d(%d)->%d(%d)",this,iPriority,iBasePri,newp,iMutexPri));
700 iBasePri = TUint8(newp);
701 if (iMutexPri > iBasePri)
703 TInt oldp = iPriority;
710 if (iLinkedObj && iLinkedObjType==EWaitFastMutex)
711 wfm = (NFastMutex*)iLinkedObj;
714 // if thread is attached to/waiting on a fast mutex, need to acquire mutex lock
717 wfm->iMutexLock.LockOnly();
720 wfm->iWaitQ.ChangePriority(&iWaitLink, newp); // change position of this thread on mutex wait queue
724 ChangeReadyThreadPriority();
726 if (wfm && newp<=wfm->iWaitQ.HighestPriority())
728 // this thread was contending for the mutex but they may be other waiting threads
729 // with higher or equal priority, so wake up the first thread on the list.
730 NThreadBase* pT = _LOFF(wfm->iWaitQ.First(), NThreadBase, iWaitLink);
733 // if thread is still blocked on this fast mutex, release it but leave it on the wait queue
734 // NOTE: it can't be suspended
735 pT->iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, wfm, KErrNone);
741 iPriority = (TUint8)newp;
742 if (wfm && newp>oldp)
744 NThreadBase* pT = _LOFF(wfm->iWaitQ.First(), NThreadBase, iWaitLink); // highest priority waiting thread
747 // this is now highest priority waiting thread so wake it up
748 iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, wfm, KErrNone);
755 NThreadBase* t = (NThreadBase*)(TLinAddr(wfm->iHoldingThread)&~3);
757 t->SetMutexPriority(wfm);
758 wfm->iMutexLock.UnlockOnly();
763 /** Set the inherited priority of a nanokernel thread.
765 @pre Kernel must be locked.
766 @pre Call in a thread context.
767 @pre The thread holds a fast mutex
769 @post Kernel is locked.
771 void NThreadBase::SetMutexPriority(NFastMutex* aM)
773 TInt newp = aM->iWaitQ.HighestPriority();
777 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetMPri %d->%d Base %d (mutex %08x)",this,iMutexPri,newp,iBasePri,aM));
778 iMutexPri = TUint8(newp);
779 if (iMutexPri < iBasePri)
781 TInt oldp = iPriority;
788 ChangeReadyThreadPriority();
790 iPriority = (TUint8)newp;
795 void NThreadBase::LoseInheritedPriorityT()
797 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nLoseInhPri %d->%d",this,iPriority,iBasePri));
798 TSubScheduler* ss = &SubScheduler();
799 TInt newp = iBasePri;
800 NSchedulable* g = iParent;
801 ss->iReadyListLock.LockOnly();
804 NThreadGroup* tg = (NThreadGroup*)g;
805 tg->iNThreadList.ChangePriority(this, newp);
806 TInt hp = tg->iNThreadList.HighestPriority();
807 if (hp == tg->iPriority)
816 if (newp <= ss->HighestPriority())
818 ss->ChangePriority(g, newp);
820 ss->iReadyListLock.UnlockOnly();