First public contribution.
1 // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\nkernsmp\nkern.cpp
18 // NThreadBase member data
19 #define __INCLUDE_NTHREADBASE_DEFINES__
23 /******************************************************************************
25 ******************************************************************************/
27 /** Acquires the fast mutex.
29 This will block until the mutex is available, and causes
30 the thread to enter an implicit critical section until the mutex is released.
32 Generally threads would use NKern::FMWait() which manipulates the kernel lock
35 @pre Kernel must be locked, with lock count 1.
36 @pre The calling thread holds no fast mutexes.
38 @post Kernel is locked, with lock count 1.
39 @post The calling thread holds the mutex.
41 @see NFastMutex::Signal()
44 EXPORT_C void NFastMutex::Wait()
46 NThreadBase* pC = NCurrentThreadL();
47 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED_ONCE|MASK_NO_FAST_MUTEX,"NFastMutex::Wait");
49 pC->iHeldFastMutex = this; // to handle kill/suspend between here and setting iHeldFastMutex
53 void NFastMutex::DoWaitL()
55 NThreadBase* pC = NCurrentThreadL();
56 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T FMWait %M",pC,this));
57 TBool waited = FALSE; // set if we actually had to wait
58 iMutexLock.LockOnly(); // acquire mutex spin lock
59 __e32_atomic_ior_rlx_ptr(&iHoldingThread, 1); // set contention flag to make sure any other thread must acquire the mutex spin lock
63 if (pC->iFastMutexDefer == 1)
64 --pC->iParent->iFreezeCpu;
65 pC->iFastMutexDefer = 0;
66 NThreadBase* pH = (NThreadBase*)(TLinAddr(iHoldingThread) &~ 1);
70 TInt wp = iWaitQ.HighestPriority(); // -1 if no other thread wants the mutex
72 // don't grab mutex if we have been suspended/killed/migrated by the previous holding thread
73 if (!pC->iSuspended && pC->iCsFunction!=NThreadBase::ECSDivertPending && (!pC->iParent->iCpuChange || pC->iParent->iFreezeCpu))
75 TInt p = pC->iPriority;
76 if (p>wp || (p==wp && waited))
78 // if we are highest priority waiting thread or equal and we have waited then grab the mutex
79 // don't just grab it if we are equal priority and someone else was already waiting
80 // set contention flag if other threads waiting or if current thread has a round robin outstanding
81 pC->iMutexPri = (TUint8)(wp>=0 ? wp : 0); // pC's actual priority doesn't change since p>=wp
82 iHoldingThread = (wp>=0 || TUint32(pC->iTime)==0x80000000u) ? (NThreadBase*)(TLinAddr(pC)|1) : pC;
83 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T got mutex %M CF=%d WP=%d",TLinAddr(iHoldingThread)&~1,this,TLinAddr(iHoldingThread)&1,wp));
85 iMutexLock.UnlockOnly();
86 #ifdef BTRACE_FAST_MUTEX
87 BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexWait, this);
93 pC->iFastMutexDefer = 2; // signal to scheduler to allow ctxsw without incrementing iParent->iFreezeCpu
94 if (!pC->iSuspended && pC->iCsFunction!=NThreadBase::ECSDivertPending && (!pC->iParent->iCpuChange || pC->iParent->iFreezeCpu))
96 // this forces priority changes to wait for the mutex lock
97 pC->iLinkedObjType = NThreadBase::EWaitFastMutex;
98 pC->iLinkedObj = this;
99 pC->iWaitState.SetUpWait(NThreadBase::EWaitFastMutex, NThreadWaitState::EWtStObstructed, this);
100 pC->iWaitLink.iPriority = pC->iPriority;
101 iWaitQ.Add(&pC->iWaitLink);
104 pH->SetMutexPriority(this);
106 iMutexLock.UnlockOnly();
108 #ifdef BTRACE_FAST_MUTEX
109 BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexBlock, this);
111 NKern::PreemptionPoint(); // we block here until the mutex is released and we are 'nominated' for it or we are suspended/killed
112 iMutexLock.LockOnly();
114 if (pC->iPauseCount || pC->iSuspended || pC->iCsFunction==NThreadBase::ECSDivertPending || (pC->iParent->iCpuChange && !pC->iParent->iFreezeCpu))
117 goto do_pause; // let pause/suspend/kill take effect
119 // if thread was suspended it will have been removed from the wait queue
121 goto thread_suspended;
122 iWaitQ.Remove(&pC->iWaitLink); // take ourselves off the wait/contend queue while we try to grab the mutex
123 pC->iWaitLink.iNext = 0;
125 pC->iLinkedObjType = NThreadBase::EWaitNone;
127 // if we are suspended or killed, we loop round again and do the 'else' clause next time
132 if (pC->iSuspended || pC->iCsFunction==NThreadBase::ECSDivertPending)
134 // wake up next thread to take this one's place
135 if (!pH && !iWaitQ.IsEmpty())
137 NThreadBase* pT = _LOFF(iWaitQ.First(), NThreadBase, iWaitLink);
139 // if thread is still blocked on this fast mutex, release it but leave it on the wait queue
140 // NOTE: it can't be suspended
141 pT->iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, this, KErrNone);
145 iMutexLock.UnlockOnly();
146 NKern::PreemptionPoint(); // thread suspends/dies/migrates here
147 iMutexLock.LockOnly();
151 // set contention flag to make sure any other thread must acquire the mutex spin lock
152 // need to do it again since mutex may have been released while thread was suspended
153 __e32_atomic_ior_rlx_ptr(&iHoldingThread, 1);
159 #ifndef __FAST_MUTEX_MACHINE_CODED__
160 /** Releases a previously acquired fast mutex.
162 Generally, threads would use NKern::FMSignal() which manipulates the kernel lock
165 @pre The calling thread holds the mutex.
166 @pre Kernel must be locked.
168 @post Kernel is locked.
170 @see NFastMutex::Wait()
171 @see NKern::FMSignal()
173 EXPORT_C void NFastMutex::Signal()
175 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED,"NFastMutex::Signal");
176 #ifdef BTRACE_FAST_MUTEX
177 BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexSignal, this);
179 NThreadBase* pC = NCurrentThreadL();
180 ((volatile TUint32&)pC->iHeldFastMutex) |= 1; // flag to indicate about to release mutex
182 if (__e32_atomic_cas_rel_ptr(&iHoldingThread, &pC, 0))
184 // tricky if suspend/kill here
185 // suspend/kill should check flag set above and aMutex->iHoldingThread
186 // if bit 0 of iHeldFastMutex set and iHoldingThread==pC then set iHeldFastMutex=0 and proceed
188 // no-one else was waiting for the mutex - simple
189 pC->iHeldFastMutex = 0;
193 // there was contention so do it the hard way
198 void NFastMutex::DoSignalL()
200 NThreadBase* pC = NCurrentThreadL();
201 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T FMSignal %M",pC,this));
202 __ASSERT_WITH_MESSAGE_DEBUG(HeldByCurrentThread(),"The calling thread holds the mutex","NFastMutex::Signal");
204 iMutexLock.LockOnly();
205 if (!iWaitQ.IsEmpty())
207 NThreadBase* pT = _LOFF(iWaitQ.First(), NThreadBase, iWaitLink);
210 // if thread is still blocked on this fast mutex, release it but leave it on the wait queue
211 // NOTE: it can't be suspended
212 pT->iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, this, KErrNone);
214 iHoldingThread = (NThreadBase*)1; // mark mutex as released but contended
217 iHoldingThread = 0; // mark mutex as released and uncontended
218 __KTRACE_OPT(KNKERN,DEBUGPRINT("SiHT=%d",iHoldingThread));
220 pC->iHeldFastMutex = 0;
221 iMutexLock.UnlockOnly();
223 if (pC->iPriority != pC->iBasePri)
225 // lose any inherited priority
226 pC->LoseInheritedPriorityT();
228 if (TUint32(pC->iTime)==0x80000000u)
231 RescheduleNeeded(); // handle deferred timeslicing
232 __KTRACE_OPT(KNKERN,DEBUGPRINT("DTS %T",pC));
234 if (pC->iFastMutexDefer)
236 pC->iFastMutexDefer = 0;
237 --pC->iParent->iFreezeCpu;
239 if (pC->iParent->iCpuChange && !pC->iParent->iFreezeCpu)
240 RescheduleNeeded(); // need to migrate to another CPU
241 if (!pC->iCsCount && pC->iCsFunction)
247 /** Checks if the current thread holds this fast mutex
249 @return TRUE if the current thread holds this fast mutex
251 @pre Call in thread context.
253 EXPORT_C TBool NFastMutex::HeldByCurrentThread()
255 return (TLinAddr(iHoldingThread)&~1) == (TLinAddr)NKern::CurrentThread();
259 /** Returns the fast mutex held by the calling thread, if any.
261 @return If the calling thread currently holds a fast mutex, this function
262 returns a pointer to it; otherwise it returns NULL.
263 @pre Call in thread context.
265 EXPORT_C NFastMutex* NKern::HeldFastMutex()
267 NThreadBase* t = NKern::CurrentThread();
268 NFastMutex* m = (NFastMutex*)(TLinAddr(t->iHeldFastMutex)&~3);
269 return (m && m->HeldByCurrentThread()) ? m : 0;
273 #ifndef __FAST_MUTEX_MACHINE_CODED__
274 /** Acquires a fast mutex.
276 This will block until the mutex is available, and causes
277 the thread to enter an implicit critical section until the mutex is released.
279 @param aMutex The fast mutex to acquire.
281 @post The calling thread holds the mutex.
283 @see NFastMutex::Wait()
284 @see NKern::FMSignal()
286 @pre No fast mutex can be held.
287 @pre Call in a thread context.
288 @pre Kernel must be unlocked
289 @pre interrupts enabled
292 EXPORT_C void NKern::FMWait(NFastMutex* aMutex)
294 __KTRACE_OPT(KNKERN,DEBUGPRINT("NFMW %M", aMutex));
295 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::FMWait");
296 NThreadBase* pC = NKern::CurrentThread();
298 // If the reschedule IPI from an external suspend or kill occurs after this
299 // point the initiating CPU must observe the write to iHeldFastMutex before
300 // the cas operation.
301 pC->iHeldFastMutex = aMutex; // kill/suspend after this point should set mutex contention flag
302 NThreadBase* expect = 0;
303 if (__e32_atomic_cas_acq_ptr(&aMutex->iHoldingThread, &expect, pC))
305 // mutex was free and we have just claimed it - simple
306 #ifdef BTRACE_FAST_MUTEX
307 BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexWait, aMutex);
312 // care required if suspend/kill here
314 // there is contention so do it the hard way
321 /** Releases a previously acquired fast mutex.
323 @param aMutex The fast mutex to release.
325 @pre The calling thread holds the mutex.
327 @see NFastMutex::Signal()
330 EXPORT_C void NKern::FMSignal(NFastMutex* aMutex)
332 NThreadBase* pC = NKern::CurrentThread();
333 __KTRACE_OPT(KNKERN,DEBUGPRINT("NFMS %M", aMutex));
334 #ifdef BTRACE_FAST_MUTEX
335 BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexSignal, aMutex);
337 ((volatile TUint32&)pC->iHeldFastMutex) |= 1; // flag to indicate about to release mutex
339 if (__e32_atomic_cas_rel_ptr(&aMutex->iHoldingThread, &pC, 0))
341 // no-one else was waiting for the mutex and we have just released it
343 // tricky if suspend/kill here
344 // suspend/kill should check flag set above and aMutex->iHoldingThread
345 // if bit 0 of iHeldFastMutex set and iHoldingThread==pC then set iHeldFastMutex=0 and proceed
347 // If the reschedule IPI from an external suspend or kill occurs after this
348 // point the initiating CPU must observe the write to iHeldFastMutex after
349 // the cas operation.
350 pC->iHeldFastMutex = 0;
354 // there was contention so do it the hard way
360 /** Acquires the System Lock.
362 This will block until the mutex is available, and causes
363 the thread to enter an implicit critical section until the mutex is released.
365 @post System lock is held.
367 @see NKern::UnlockSystem()
370 @pre No fast mutex can be held.
371 @pre Call in a thread context.
372 @pre Kernel must be unlocked
373 @pre interrupts enabled
376 EXPORT_C void NKern::LockSystem()
378 NKern::FMWait(&TheScheduler.iLock);
382 /** Releases the System Lock.
384 @pre System lock must be held.
386 @see NKern::LockSystem()
387 @see NKern::FMSignal()
389 EXPORT_C void NKern::UnlockSystem()
391 NKern::FMSignal(&TheScheduler.iLock);
395 /** Temporarily releases a fast mutex if there is contention.
397 If there is another thread attempting to acquire the mutex, the calling
398 thread releases the mutex and then acquires it again.
400 This is more efficient than the equivalent code:
407 @return TRUE if the mutex was relinquished, FALSE if not.
409 @pre The mutex must be held.
411 @post The mutex is held.
413 EXPORT_C TBool NKern::FMFlash(NFastMutex* aM)
415 NThreadBase* pC = NKern::CurrentThread();
416 __ASSERT_WITH_MESSAGE_DEBUG(aM->HeldByCurrentThread(),"The calling thread holds the mutex","NKern::FMFlash");
417 TBool w = (pC->iMutexPri >= pC->iBasePri); // a thread of greater or equal priority is waiting
422 NKern::PreemptionPoint();
426 #ifdef BTRACE_FAST_MUTEX
429 BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexFlash, aM);
436 /** Temporarily releases the System Lock if there is contention.
439 is another thread attempting to acquire the System lock, the calling
440 thread releases the mutex and then acquires it again.
442 This is more efficient than the equivalent code:
445 NKern::UnlockSystem();
449 Note that this can only allow higher priority threads to use the System
450 lock as lower priority cannot cause contention on a fast mutex.
452 @return TRUE if the system lock was relinquished, FALSE if not.
454 @pre System lock must be held.
456 @post System lock is held.
458 @see NKern::LockSystem()
459 @see NKern::UnlockSystem()
461 EXPORT_C TBool NKern::FlashSystem()
463 CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"NKern::FlashSystem");
464 return NKern::FMFlash(&TheScheduler.iLock);
468 /******************************************************************************
470 ******************************************************************************/
472 /** Sets the owner of a fast semaphore.
474 @param aThread The thread to own this semaphore. If aThread==0, then the
475 owner is set to the current thread.
477 @pre Kernel must be locked.
478 @pre If changing ownership form one thread to another, the there must be no
479 pending signals or waits.
480 @pre Call either in a thread or an IDFC context.
482 @post Kernel is locked.
484 EXPORT_C void NFastSemaphore::SetOwner(NThreadBase* aThread)
486 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::SetOwner");
488 aThread = NCurrentThreadL();
489 if (iOwningThread && iOwningThread!=aThread)
491 __NK_ASSERT_ALWAYS(!iCount); // Can't change owner if iCount!=0
493 iOwningThread = aThread;
497 #ifndef __FAST_SEM_MACHINE_CODED__
498 /** Waits on a fast semaphore.
500 Decrements the signal count for the semaphore and
501 removes the calling thread from the ready-list if the semaphore becomes
502 unsignalled. Only the thread that owns a fast semaphore can wait on it.
504 Note that this function does not block, it merely updates the NThread state,
505 rescheduling will only occur when the kernel is unlocked. Generally threads
506 would use NKern::FSWait() which manipulates the kernel lock for you.
508 @pre The calling thread must own the semaphore.
509 @pre No fast mutex can be held.
510 @pre Kernel must be locked.
512 @post Kernel is locked.
514 @see NFastSemaphore::Signal()
518 EXPORT_C void NFastSemaphore::Wait()
520 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NO_FAST_MUTEX,"NFastSemaphore::Wait");
521 NThreadBase* pC = NCurrentThreadL();
522 __ASSERT_WITH_MESSAGE_ALWAYS(pC==iOwningThread,"The calling thread must own the semaphore","NFastSemaphore::Wait");
523 pC->iWaitState.SetUpWait(NThreadBase::EWaitFastSemaphore, 0, this);
524 if (Dec(pC)) // full barrier
525 pC->iWaitState.CancelWait(); // don't have to wait
527 RescheduleNeeded(); // have to wait
531 /** Signals a fast semaphore.
533 Increments the signal count of a fast semaphore by
534 one and releases any waiting thread if the semphore becomes signalled.
536 Note that a reschedule will not occur before this function returns, this will
537 only take place when the kernel is unlocked. Generally threads
538 would use NKern::FSSignal() which manipulates the kernel lock for you.
540 @pre Kernel must be locked.
541 @pre Call either in a thread or an IDFC context.
543 @post Kernel is locked.
545 @see NFastSemaphore::Wait()
546 @see NKern::FSSignal()
549 EXPORT_C void NFastSemaphore::Signal()
551 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::Signal");
552 NThreadBase* t = Inc(1); // release semantics
556 t->iWaitState.UnBlockT(NThreadBase::EWaitFastSemaphore, this, KErrNone);
562 /** Signals a fast semaphore multiple times.
564 @pre Kernel must be locked.
565 @pre Call either in a thread or an IDFC context.
567 @post Kernel is locked.
571 EXPORT_C void NFastSemaphore::SignalN(TInt aCount)
573 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::SignalN");
574 __NK_ASSERT_DEBUG(aCount>=0);
577 NThreadBase* t = Inc(aCount);
581 t->iWaitState.UnBlockT(NThreadBase::EWaitFastSemaphore, this, KErrNone);
588 /** Cancels a wait on a fast semaphore.
590 @pre Kernel must be locked.
591 @pre Call either in a thread or an IDFC context.
593 @post Kernel is locked.
597 void NFastSemaphore::WaitCancel()
603 /** Waits for a signal on the current thread's I/O semaphore.
605 @pre No fast mutex can be held.
606 @pre Call in a thread context.
607 @pre Kernel must be unlocked
608 @pre interrupts enabled
610 EXPORT_C void NKern::WaitForAnyRequest()
612 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::WaitForAnyRequest");
613 __KTRACE_OPT(KNKERN,DEBUGPRINT("WfAR"));
614 NThreadBase* t = NKern::LockC();
615 NFastSemaphore* s = &t->iRequestSemaphore;
616 t->iWaitState.SetUpWait(NThreadBase::EWaitFastSemaphore, 0, s);
617 if (s->Dec(t)) // fully ordered semantics
618 t->iWaitState.CancelWait(); // don't have to wait
620 RescheduleNeeded(); // have to wait
626 /** Resets a fast semaphore.
628 @pre Kernel must be locked.
629 @pre Call either in a thread or an IDFC context.
631 @post Kernel is locked.
635 EXPORT_C void NFastSemaphore::Reset()
637 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::Reset");
638 NThreadBase* t = DoReset();
642 t->iWaitState.UnBlockT(NThreadBase::EWaitFastSemaphore, this, KErrNone);
648 /** Sets the owner of a fast semaphore.
650 @param aSem The semaphore to change ownership off.
651 @param aThread The thread to own this semaphore. If aThread==0, then the
652 owner is set to the current thread.
654 @pre If changing ownership form one thread to another, the there must be no
655 pending signals or waits.
657 EXPORT_C void NKern::FSSetOwner(NFastSemaphore* aSem,NThreadBase* aThread)
659 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSetOwner %m %T",aSem,aThread));
661 aSem->SetOwner(aThread);
665 #ifndef __FAST_SEM_MACHINE_CODED__
666 /** Waits on a fast semaphore.
668 Decrements the signal count for the semaphore
669 and waits for a signal if the semaphore becomes unsignalled. Only the
670 thread that owns a fast semaphore can wait on it.
672 @param aSem The semaphore to wait on.
674 @pre The calling thread must own the semaphore.
675 @pre No fast mutex can be held.
677 @see NFastSemaphore::Wait()
679 EXPORT_C void NKern::FSWait(NFastSemaphore* aSem)
681 __KTRACE_OPT(KNKERN,DEBUGPRINT("NFSW %m",aSem));
688 /** Signals a fast semaphore.
690 Increments the signal count of a fast semaphore
691 by one and releases any waiting thread if the semaphore becomes signalled.
693 @param aSem The semaphore to signal.
697 @pre Interrupts must be enabled.
698 @pre Do not call from an ISR
700 EXPORT_C void NKern::FSSignal(NFastSemaphore* aSem)
702 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::FSSignal(NFastSemaphore*)");
703 __KTRACE_OPT(KNKERN,DEBUGPRINT("NFSS %m",aSem));
710 /** Signals a fast semaphore multiple times.
712 Increments the signal count of a
713 fast semaphore by aCount and releases any waiting thread if the semphore
716 @param aSem The semaphore to signal.
717 @param aCount The number of times to signal the semaphore.
721 @pre Interrupts must be enabled.
722 @pre Do not call from an ISR
724 EXPORT_C void NKern::FSSignalN(NFastSemaphore* aSem, TInt aCount)
726 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::FSSignalN(NFastSemaphore*, TInt)");
727 __KTRACE_OPT(KNKERN,DEBUGPRINT("NFSSN %m %d",aSem,aCount));
728 __NK_ASSERT_DEBUG(aCount>=0);
732 aSem->SignalN(aCount);
737 /** Signals the request semaphore of a nanothread.
739 This function is intended to be used by the EPOC layer and personality
740 layers. Device drivers should use Kern::RequestComplete instead.
742 @param aThread Nanothread to signal. Must be non NULL.
744 @see Kern::RequestComplete()
746 @pre Interrupts must be enabled.
747 @pre Do not call from an ISR
749 EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread)
751 NKern::FSSignal(&aThread->iRequestSemaphore);
755 /** Signals the request semaphore of a nanothread several times.
757 This function is intended to be used by the EPOC layer and personality
758 layers. Device drivers should use Kern::RequestComplete instead.
760 @param aThread Nanothread to signal. If NULL, the current thread is signaled.
761 @param aCount Number of times the request semaphore must be signaled.
765 @see Kern::RequestComplete()
767 EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread, TInt aCount)
769 __ASSERT_WITH_MESSAGE_DEBUG(aCount >= 0,"aCount >= 0","NKern::ThreadRequestSignal");
771 aThread = (NThread*)NKern::CurrentThread();
772 NKern::FSSignalN(&aThread->iRequestSemaphore, aCount);
778 /** Atomically signals a fast semaphore and releases a fast mutex.
780 Rescheduling only occurs after both synchronisation operations are complete.
782 @param aSem The semaphore to signal.
783 @param aMutex The mutex to release. If NULL, the System Lock is released
785 @pre The calling thread must hold the mutex.
787 @see NKern::FMSignal()
789 EXPORT_C void NKern::FSSignal(NFastSemaphore* aSem, NFastMutex* aMutex)
792 aMutex=&TheScheduler.iLock;
793 __KTRACE_OPT(KNKERN,DEBUGPRINT("NFSS %m +FM %M",aSem,aMutex));
801 /** Atomically signals a fast semaphore multiple times and releases a fast mutex.
803 Rescheduling only occurs after both synchronisation operations are complete.
805 @param aSem The semaphore to signal.
806 @param aCount The number of times to signal the semaphore.
807 @param aMutex The mutex to release. If NULL, the System Lock is released.
809 @pre The calling thread must hold the mutex.
811 @see NKern::FMSignal()
813 EXPORT_C void NKern::FSSignalN(NFastSemaphore* aSem, TInt aCount, NFastMutex* aMutex)
816 aMutex=&TheScheduler.iLock;
817 __KTRACE_OPT(KNKERN,DEBUGPRINT("NFSSN %m %d + FM %M",aSem,aCount,aMutex));
819 aSem->SignalN(aCount);
825 /******************************************************************************
827 ******************************************************************************/
829 void NThreadBase::DoCsFunctionT()
831 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nDoCsFuncT %d",this,iCsFunction));
837 // suspend this thread f times
844 if (f==ECSExitPending || f==ECSDivertPending)
846 // We need to exit now
848 Exit(); // this won't return
850 // UnknownState(ELeaveCS,f); // call into RTOS personality
851 __NK_ASSERT_ALWAYS(0);
854 TBool NThreadBase::DoSuspendOrKillT(TInt aCount, TSubScheduler* aS)
861 iSuspendCount+=aCount;
869 NThreadGroup* g = (NThreadGroup*)iParent;
870 g->iNThreadList.Remove(this);
873 if (this == NCurrentThreadL())
876 aS->iReadyListLock.UnlockOnly();
880 iCsFunction = ECSDivertPending;
884 aS->iReadyListLock.UnlockOnly();
885 DoReleaseT(KErrDied,0);
886 if (!iReady && !iPauseCount)
892 // If aCount>=0 suspend the thread aCount times
893 // If aCount<0 kill the thread
894 TBool NThreadBase::SuspendOrKill(TInt aCount)
896 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSuspendOrKill %d", this, aCount));
899 TBool result = FALSE;
900 TBool concurrent = FALSE;
901 TSubScheduler* ss = 0;
904 if (iLinkedObj && iLinkedObjType==EWaitFastMutex)
905 wfm = (NFastMutex*)iLinkedObj;
907 goto done2; // if already exiting ignore suspend or kill
910 // if thread is waiting on a fast mutex, need to acquire mutex lock
913 wfm->iMutexLock.LockOnly();
917 if (iReady && iParent->iReady)
919 ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
920 ss->iReadyListLock.LockOnly();
922 concurrent = (iCurrent && this!=NCurrentThreadL());
923 if (iWaitState.ThreadIsDead()) // already dead so suspension/kill is a no-op
927 // thread is actually running on another CPU
928 // interrupt that CPU and wait for it to enter interrupt mode
929 // this allows a snapshot of the thread state to be observed
930 // in this state, the thread cannot enter or leave a critical section
931 send_resched_ipi_and_wait(iLastCpu);
935 suspend_or_kill_in_cs:
936 __KTRACE_OPT(KNKERN,DEBUGPRINT("n Suspend %T (CSF %d) %d",this,iCsFunction,aCount));
937 if (aCount>0) // -ve means thread is about to exit
938 iCsFunction+=aCount; // so thread will suspend itself when it leaves the critical section
940 iCsFunction = ECSExitPending;
943 // iCsCount==0 and it can't become nonzero until we release the thread spin lock
944 // (since threads may not set iCsCount to a nonzero value with the kernel lock held)
945 // Make sure the thread isn't actually about to exit by itself
947 goto done; // if already exiting ignore suspend or kill
950 wfm->iWaitQ.Remove(&iWaitLink); // take thread off the wait/contend queue
953 iLinkedObjType = EWaitNone;
954 result = DoSuspendOrKillT(aCount, ss);
956 DoReleaseT(KErrGeneral, 0); // thread isn't blocked any more, just suspended
959 // May need to adjust holding thread's inherited priority.
960 // May need to wake up next thread to take this one's place.
961 NThreadBase* pH = (NThreadBase*)(TLinAddr(wfm->iHoldingThread) &~ 1);
963 pH->SetMutexPriority(wfm);
964 else if (!pH && !wfm->iWaitQ.IsEmpty())
966 NThreadBase* pT = _LOFF(wfm->iWaitQ.First(), NThreadBase, iWaitLink);
968 pT->iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, wfm, KErrNone);
971 wfm->iMutexLock.UnlockOnly();
974 if (CheckFastMutexDefer())
975 goto suspend_or_kill_in_cs;
977 // thread not in critical section, so suspend it
978 result = DoSuspendOrKillT(aCount, ss);
983 wfm->iMutexLock.UnlockOnly();
985 ss->iReadyListLock.UnlockOnly();
993 /** Suspends a nanothread the specified number of times.
995 For use by RTOS personality layers.
996 Do not use this function directly on a Symbian OS thread.
997 Since the kernel is locked on entry, any reschedule will be deferred until
999 The suspension will be deferred if the target thread is currently in a
1000 critical section; in this case the suspension will take effect when it exits
1001 the critical section.
1002 The thread's unknown state handler will be invoked with function ESuspend and
1003 parameter aCount if the current NState is not recognised and it is not in a
1006 @param aCount = the number of times to suspend.
1007 @return TRUE, if the suspension has taken immediate effect;
1008 FALSE, if the thread is in a critical section or is already suspended.
1010 @pre Kernel must be locked.
1011 @pre Call in a thread context.
1013 @post Kernel is locked.
1015 EXPORT_C TBool NThreadBase::Suspend(TInt aCount)
1017 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NThreadBase::Suspend");
1018 __NK_ASSERT_ALWAYS(aCount>=0);
1020 // If thread is executing a critical section, we must defer the suspend
1022 return SuspendOrKill(aCount);
1026 TBool NThreadBase::Resume(TBool aForce)
1028 TBool result = FALSE;
1030 if (iWaitState.ThreadIsDead() || iCsFunction<0) // already dead or dying so resume is a no-op
1040 else if (iSuspendCount)
1050 if (!iPauseCount && !iReady && !iWaitState.iWtC.iWtStFlags)
1060 /** Resumes a nanothread, cancelling one suspension.
1062 For use by RTOS personality layers.
1063 Do not use this function directly on a Symbian OS thread.
1064 Since the kernel is locked on entry, any reschedule will be deferred until
1066 If the target thread is currently in a critical section this will simply
1067 cancel one deferred suspension.
1068 The thread's unknown state handler will be invoked with function EResume if
1069 the current NState is not recognised and it is not in a critical section.
1071 @return TRUE, if the resumption has taken immediate effect;
1072 FALSE, if the thread is in a critical section or is still suspended.
1074 @pre Kernel must be locked.
1075 @pre Call either in a thread or an IDFC context.
1077 @post Kernel must be locked.
1079 EXPORT_C TBool NThreadBase::Resume()
1081 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NThreadBase::Resume");
1082 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nRsm",this));
1084 return Resume(FALSE);
1088 /** Resumes a nanothread, cancelling all outstanding suspensions.
1090 For use by RTOS personality layers.
1091 Do not use this function directly on a Symbian OS thread.
1092 Since the kernel is locked on entry, any reschedule will be deferred until
1094 If the target thread is currently in a critical section this will simply
1095 cancel all deferred suspensions.
1096 The thread's unknown state handler will be invoked with function EForceResume
1097 if the current NState is not recognised and it is not in a critical section.
1099 @return TRUE, if the resumption has taken immediate effect;
1100 FALSE, if the thread is in a critical section.
1102 @pre Kernel must be locked.
1103 @pre Call either in a thread or an IDFC context.
1105 @post Kernel is locked.
1107 EXPORT_C TBool NThreadBase::ForceResume()
1109 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::ForceResume");
1110 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nFRsm",this));
1112 return Resume(TRUE);
1116 void NThreadBase::DoReleaseT(TInt aReturnCode, TUint aMode)
1119 TUint32 b = iWaitState.ReleaseT(wobj, aReturnCode); // cancels timer if necessary
1121 // if wait pending or no wait, done
1122 // if wait in effect and nothing else stopping it, make thread ready
1123 // cancel any outstanding wait on fast semaphore if abnormal release
1124 // FIXME: Potential problems with abnormal release of generic wait objects
1125 if (aReturnCode<0 && ((b>>8)&0xff)==NThreadBase::EWaitFastSemaphore && wobj)
1126 ((NFastSemaphore*)wobj)->WaitCancel();
1128 if ((b & NThreadWaitState::EWtStWaitActive) && !iPauseCount && !iSuspended)
1132 /** Releases a waiting nanokernel thread.
1134 For use by RTOS personality layers.
1135 Do not use this function directly on a Symbian OS thread.
1136 This function should make the thread ready (provided it is not explicitly
1137 suspended) and cancel any wait timeout. It should also remove it from any
1139 If aReturnCode is nonnegative it indicates normal completion of the wait.
1140 If aReturnCode is negative it indicates early/abnormal completion of the
1141 wait and so any wait object should be reverted as if the wait had never
1142 occurred (eg semaphore count should be incremented as this thread has not
1143 actually acquired the semaphore).
1144 The thread's unknown state handler will be invoked with function ERelease
1145 and parameter aReturnCode if the current NState is not recognised.
1147 @param aReturnCode The reason code for release.
1149 @pre Kernel must be locked.
1150 @pre Call either in a thread or an IDFC context.
1152 @post Kernel is locked.
1154 EXPORT_C void NThreadBase::Release(TInt aReturnCode, TUint aMode)
1156 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::Release");
1157 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nRel %d",this,aReturnCode));
1159 DoReleaseT(aReturnCode, aMode);
1164 /** Signals a nanokernel thread's request semaphore.
1166 This can also be used on Symbian OS threads.
1168 @pre Kernel must be locked.
1169 @pre Call either in a thread or an IDFC context.
1171 @post Kernel is locked.
1173 EXPORT_C void NThreadBase::RequestSignal()
1175 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::RequestSignal");
1176 iRequestSemaphore.Signal();
1180 void exit_sync_fn(TAny* aDfc)
1182 ((TDfc*)aDfc)->Enque();
1185 void NThreadBase::Exit()
1187 // The current thread is exiting
1188 // Enter with kernel locked, don't return
1189 __NK_ASSERT_DEBUG(this==NCurrentThreadL());
1193 TInt threadCS = iCsCount;
1194 TInt kernCS = SubScheduler().iKernLockCount;
1197 iCsFunction = ECSExitInProgress;
1198 NFastMutex* m = NKern::HeldFastMutex();
1202 __KTRACE_OPT(KSCHED,DEBUGPRINT("Exit %T %u",this,NTickCount()));
1203 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nExit, CSC %d HeldFM %M KernCS %d",this,threadCS,iHeldFastMutex,kernCS));
1211 NThreadExitHandler xh = iHandlers->iExitHandler;
1213 pD = (*xh)((NThread*)this); // call exit handler
1215 // detach any tied events
1218 NKern::LeaveGroup(); // detach from group if exit handler didn't do it
1221 #ifdef BTRACE_THREAD_IDENTIFICATION
1222 BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadDestroy,this);
1224 __NK_ASSERT_ALWAYS(iCsFunction == ECSExitInProgress);
1225 iWaitState.SetDead(pD); // doesn't return
1229 /** Kills a nanokernel thread.
1231 For use by RTOS personality layers.
1232 Do not use this function directly on a Symbian OS thread.
1234 When acting on the calling thread, causes the calling thread to exit.
1236 When acting on another thread, causes that thread to exit unless it is
1237 currently in a critical section. In this case the thread is marked as
1238 "exit pending" and will exit as soon as it leaves the critical section.
1240 In either case the exiting thread first invokes its exit handler (if it
1241 exists). The handler runs with preemption enabled and with the thread in a
1242 critical section so that it may not be suspended or killed again. The
1243 handler may return a pointer to a TDfc, which will be enqueued just before
1244 the thread finally terminates (after the kernel has been relocked). This DFC
1245 will therefore execute once the NThread has been safely removed from the
1246 scheduler and is intended to be used to cleanup the NThread object and any
1247 associated personality layer resources.
1249 @pre Kernel must be locked.
1250 @pre Call in a thread context.
1251 @pre If acting on calling thread, calling thread must not be in a
1252 critical section; if it is the kernel will fault. Also, the kernel
1253 must be locked exactly once (iKernCSLocked = 1).
1255 @post Kernel is locked, if not acting on calling thread.
1256 @post Does not return if it acts on the calling thread.
1258 EXPORT_C void NThreadBase::Kill()
1261 // Enter with kernel locked
1262 // Exit with kernel locked if not current thread, otherwise does not return
1263 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED_ONCE|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::Kill");
1264 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nKill",this));
1265 OnKill(); // platform-specific hook
1266 NThreadBase* pC = NCurrentThreadL();
1269 if (iCsFunction==ECSExitInProgress)
1271 Exit(); // this will not return
1277 /** Change the CPU affinity of a thread
1279 @pre Kernel must be locked.
1280 @pre Call in a thread context.
1282 @param The number of the CPU to which this thread should be locked, or
1283 KCpuAny if it should be able to run on any CPU.
1284 @return The previous affinity mask.
1286 TUint32 NThreadBase::SetCpuAffinity(TUint32 aAffinity)
1288 // check aAffinity is valid
1290 TUint32 old_aff = iParent->iCpuAffinity;
1291 TBool migrate = FALSE;
1292 TBool make_ready = FALSE;
1293 TSubScheduler* ss0 = &SubScheduler();
1294 TSubScheduler* ss = 0;
1295 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetCpu %08x->%08x, F:%d R:%02x PR:%02x",this,iParent->iCpuAffinity,aAffinity,iParent->iFreezeCpu,iReady,iParent->iReady));
1296 if (i_NThread_Initial)
1297 goto done; // can't change affinity of initial thread
1298 iParent->iCpuAffinity = aAffinity; // set new affinity, might not take effect yet
1299 if (!iParent->iReady)
1300 goto done; // thread/group not currently on a ready list so can just change affinity
1301 migrate = !CheckCpuAgainstAffinity(iParent->iReady & EReadyCpuMask, aAffinity); // TRUE if thread's current CPU is incompatible with the new affinity
1303 goto done; // don't need to move thread, so just change affinity
1304 ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
1305 ss->iReadyListLock.LockOnly();
1306 if (iParent->iCurrent)
1308 iParent->iCpuChange = TRUE; // mark CPU change pending
1312 // kick other CPU now so migration happens before acquisition of fast mutex
1313 send_resched_ipi_and_wait(iParent->iReady & EReadyCpuMask);
1317 // Note: Need to know here if any thread in group would return TRUE from CheckFastMutexDefer()
1318 // This is handled by the scheduler - when a thread belonging to a group is context switched
1319 // out while holding a fast mutex its iFastMutexDefer is set to 1 and the group's iFreezeCpu
1321 if (iParent->iFreezeCpu || (iParent==this && CheckFastMutexDefer()))
1322 iParent->iCpuChange = TRUE; // CPU frozen or fast mutex held so just mark deferred CPU migration
1325 ss->Remove(iParent);
1326 iParent->iReady = 0;
1330 ss->iReadyListLock.UnlockOnly();
1339 /******************************************************************************
1341 ******************************************************************************/
1342 #ifndef __NTHREAD_WAITSTATE_MACHINE_CODED__
1343 void NThreadWaitState::SetUpWait(TUint aType, TUint aFlags, TAny* aWaitObj)
1345 SetUpWait(aType, aFlags, aWaitObj, 0);
1348 void NThreadWaitState::SetUpWait(TUint aType, TUint aFlags, TAny* aWaitObj, TUint32 aTimeout)
1350 aFlags &= EWtStObstructed;
1351 aFlags |= EWtStWaitPending;
1353 TUint64 ws64 = (TUint32)aWaitObj;
1355 ws64 |= ((aType<<8)|aFlags);
1356 TUint64 oldws64 = __e32_atomic_swp_rlx64(&iWtSt64, ws64);
1357 if (I64LOW(oldws64)!=0)
1358 __crash(); // ??we were already waiting for something else??
1359 iTimer.iTriggerTime = aTimeout;
1362 void NThreadWaitState::CancelWait()
1364 TUint64 oldws64 = __e32_atomic_swp_rlx64(&iWtSt64, 0);
1365 if (oldws64 & (EWtStDead|EWtStWaitActive))
1369 TInt NThreadWaitState::DoWait()
1371 TUint64 oldws64 = iWtSt64;
1373 TUint32 timeout = iTimer.iTriggerTime;
1374 TUint32 set = timeout ? (EWtStWaitActive|EWtStTimeout) : EWtStWaitActive;
1376 TUint32 ws32 = I64LOW(oldws64);
1377 if (ws32 & EWtStDead)
1379 if (!(ws32 & EWtStWaitPending))
1382 ws64 &= ~TUint64(EWtStWaitPending);
1383 ws64 |= TUint64(set);
1384 } while(!__e32_atomic_cas_rlx64(&iWtSt64, &oldws64, ws64));
1387 if (iTimer.OneShot(timeout, TRUE)!=KErrNone)
1389 ++iTimer.iNTimerSpare1;
1391 return TUint32(oldws64)>>8;
1394 TInt NThreadWaitState::UnBlockT(TUint aType, TAny* aWaitObj, TInt aReturnValue)
1396 TUint64 exp = TUint32(aWaitObj);
1399 TUint64 oldws64 = iWtSt64;
1402 if ((oldws64 ^ exp) < TUint64(EWtStDead))
1403 ws64 = TUint64(TUint32(aReturnValue))<<32;
1406 } while(!__e32_atomic_cas_rel64(&iWtSt64, &oldws64, ws64));
1407 if ((oldws64 ^ exp) >= TUint64(EWtStDead))
1408 return KErrGeneral; // not unblocked - no matching wait
1409 if (oldws64 & EWtStTimeout)
1411 if (oldws64 & EWtStWaitActive)
1413 NThreadBase* t = Thread();
1414 if (!t->iPauseCount && !t->iSuspended)
1420 TUint32 NThreadWaitState::ReleaseT(TAny*& aWaitObj, TInt aReturnValue)
1422 TUint64 leave = EWtStDead;
1423 TUint64 set = TUint64(TUint32(aReturnValue))<<32;
1424 TUint64 ws64 = __e32_atomic_axo_ord64(&iWtSt64, leave, set);
1425 aWaitObj = (TAny*)I64HIGH(ws64);
1426 TUint32 ws32 = I64LOW(ws64);
1427 if (ws32 & EWtStTimeout)
1433 void NThreadWaitState::SetDead(TDfc* aKillDfc)
1435 TDfc syncDfc(&exit_sync_fn, aKillDfc, TheTimerQ.iDfc.iDfcQ, 0);
1436 NThreadBase* t = Thread();
1438 iWtC.iWtStFlags = NThreadWaitState::EWtStDead;
1439 iWtC.iWtObjType = NThreadBase::EWaitNone;
1441 if (aKillDfc && iTimer.iNTimerSpare1)
1443 // There is an outstanding timer expiry handler still running
1444 // so we must synchronise with DfcThread1.
1445 // Add a priority 0 DFC to DfcThread1 so this thread's exit DFC can
1446 // only run after the timer expiry handler has completed.
1447 aKillDfc = &syncDfc;
1449 iWtC.iKillDfc = aKillDfc;
1452 NKern::Unlock(); // this won't return
1455 void NThreadWaitState::CancelTimerT()
1457 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nCancelTimerT ",Thread()));
1458 if (iTimer.Cancel())
1459 --iTimer.iNTimerSpare1;
1462 // Potential race condition - timer must have completed but expiry
1463 // handler has not yet run. Signal to the handler that it should do
1464 // nothing by flipping the bottom bit of iTimer.iPtr
1465 // This condition cannot possibly recur until the expiry handler has
1466 // run since all expiry handlers run in DfcThread1.
1467 volatile TLinAddr& x = *(volatile TLinAddr*)&iTimer.iPtr;
1472 // Timeout handler, called in DfcThread1
1473 // NOTE: aPtr is sampled with the timer queue locked, so if Cancel() on the timer fails
1474 // and iTimer.iPtr is then changed, aPtr here will differ from iTimer.iPtr.
1475 // This fact is used here to detect expiry of cancelled timers.
1476 void NThreadWaitState::TimerExpired(TAny* aPtr)
1478 TLinAddr cookie = (TLinAddr)aPtr;
1479 NThreadWaitState* pW = (NThreadWaitState*)(cookie &~ 3);
1480 NThread* pT = (NThread*)pW->Thread();
1481 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nTmExp",pT));
1482 NThreadTimeoutHandler th = pT->iHandlers->iTimeoutHandler;
1484 TUint flags = pW->iWtSt32[0];
1485 if (!(flags & EWtStWaitActive) || ((flags>>8)&0xff)!=NThreadBase::EWaitBlocked)
1489 // Use higher level timeout handler
1491 (*th)(pT, NThreadBase::ETimeoutPreamble);
1492 TInt param = NThreadBase::ETimeoutPostamble;
1494 TLinAddr current_cookie = *(volatile TLinAddr*)&pW->iTimer.iPtr;
1495 if ((cookie ^ current_cookie) & 1)
1497 // The timer was cancelled just after expiring but before this function
1498 // managed to acquire the thread spin lock, so it's spurious
1499 param = NThreadBase::ETimeoutSpurious;
1504 --pW->iTimer.iNTimerSpare1; // note timer has expired
1508 TLinAddr current_cookie = *(volatile TLinAddr*)&pW->iTimer.iPtr;
1509 if ((cookie ^ current_cookie) & 1)
1510 // The timer was cancelled just after expiring but before this function
1511 // managed to acquire the thread spin lock, so just return without doing anything.
1513 pT->DoReleaseT(KErrTimedOut,0);
1520 /******************************************************************************
1521 * NKern:: static functions
1522 ******************************************************************************/
1524 /** Suspends the execution of a thread.
1526 This function is intended to be used by the EPOC layer and personality layers.
1527 Do not use this function directly on a Symbian OS thread - use Kern::ThreadSuspend().
1529 If the thread is in a critical section or holds a fast mutex, the suspension will
1530 be deferred until the thread leaves the critical section or signals the fast mutex.
1531 Otherwise the thread will be suspended with immediate effect. If the thread it's
1532 running, the execution of the thread will be suspended and a reschedule will occur.
1534 @param aThread Thread to be suspended.
1535 @param aCount Number of times to suspend this thread.
1537 @return TRUE, if the thread had changed the state from non-suspended to suspended;
1540 @see Kern::ThreadSuspend()
1542 EXPORT_C TBool NKern::ThreadSuspend(NThread* aThread, TInt aCount)
1545 TBool r=aThread->Suspend(aCount);
1551 /** Resumes the execution of a thread.
1553 This function is intended to be used by the EPOC layer and personality layers.
1554 Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
1556 This function resumes the thread once. If the thread was suspended more than once
1557 the thread will remain suspended.
1558 If the thread is in a critical section, this function will decrease the number of
1559 deferred suspensions.
1561 @param aThread Thread to be resumed.
1563 @return TRUE, if the thread had changed the state from suspended to non-suspended;
1566 @see Kern::ThreadResume()
1568 EXPORT_C TBool NKern::ThreadResume(NThread* aThread)
1571 TBool r=aThread->Resume();
1577 /** Resumes the execution of a thread and signals a mutex.
1579 This function is intended to be used by the EPOC layer and personality layers.
1580 Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
1582 This function resumes the thread once. If the thread was suspended more than once
1583 the thread will remain suspended.
1584 If the thread is in a critical section, this function will decrease the number of
1585 deferred suspensions.
1587 @param aThread Thread to be resumed.
1588 @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
1590 @return TRUE, if the thread had changed the state from suspended to non-suspended;
1593 @see Kern::ThreadResume()
1595 EXPORT_C TBool NKern::ThreadResume(NThread* aThread, NFastMutex* aMutex)
1598 aMutex=&TheScheduler.iLock;
1599 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NRsm + FM %M",aThread,aMutex));
1601 TBool r=aThread->Resume();
1608 /** Forces the execution of a thread to be resumed.
1610 This function is intended to be used by the EPOC layer and personality layers.
1611 Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
1613 This function cancels all suspensions on a thread.
1615 @param aThread Thread to be resumed.
1617 @return TRUE, if the thread had changed the state from suspended to non-suspended;
1620 @see Kern::ThreadResume()
1622 EXPORT_C TBool NKern::ThreadForceResume(NThread* aThread)
1625 TBool r=aThread->ForceResume();
1631 /** Forces the execution of a thread to be resumed and signals a mutex.
1633 This function is intended to be used by the EPOC layer and personality layers.
1634 Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
1636 This function cancels all suspensions on a thread.
1638 @param aThread Thread to be resumed.
1639 @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
1641 @return TRUE, if the thread had changed the state from suspended to non-suspended;
1644 @see Kern::ThreadResume()
1646 EXPORT_C TBool NKern::ThreadForceResume(NThread* aThread, NFastMutex* aMutex)
1649 aMutex=&TheScheduler.iLock;
1650 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NFRsm + FM %M",aThread,aMutex));
1652 TBool r=aThread->ForceResume();
1659 /** Awakens a nanothread.
1661 This function is used to implement synchronisation primitives in the EPOC
1662 kernel (e.g. DMutex and DSemaphore) and in personality layers. It is not
1663 intended to be used directly by device drivers.
1665 If the nanothread is waiting on a fast semaphore, waiting for a DFC, or is
1666 blocked in a call to NKern::Block, it is awakened and put back on the ready
1667 list. Otherwise, the thread state is unchanged. In particular, nothing
1668 happens if the nanothread has been explicitly suspended.
1670 @param aThread Thread to release.
1671 @param aReturnValue Value returned by NKern::Block if the thread was blocked.
1675 @pre Interrupts must be enabled.
1676 @pre Do not call from an ISR
1678 EXPORT_C void NKern::ThreadRelease(NThread* aThread, TInt aReturnValue)
1680 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::ThreadRelease(NThread*, TInt)");
1682 aThread->Release(aReturnValue,0);
1687 /** Atomically awakens a nanothread and signals a fast mutex.
1689 This function is used to implement synchronisation primitives in the EPOC
1690 kernel (e.g. DMutex and DSemaphore) and in personality layers. It is not
1691 intended to be used directly by device drivers.
1693 @param aThread Thread to release.
1694 @param aReturnValue Value returned by NKern::Block if the thread was blocked.
1695 @param aMutex Fast mutex to signal. If NULL, the system lock is signalled.
1697 @see NKern::ThreadRelease(NThread*, TInt)
1700 @pre Call in a thread context.
1701 @pre Interrupts must be enabled.
1702 @pre Kernel must be unlocked.
1703 @pre Specified mutex must be held
1705 EXPORT_C void NKern::ThreadRelease(NThread* aThread, TInt aReturnValue, NFastMutex* aMutex)
1707 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadRelease(NThread*,TInt,NFastMutex*)");
1709 aMutex=&TheScheduler.iLock;
1710 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NRel ret %d + FM %M",aThread,aReturnValue,aMutex));
1712 aThread->Release(aReturnValue,0);
1718 /** Changes the priority of a thread.
1720 This function is intended to be used by the EPOC layer and personality layers.
1721 Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority().
1723 @param aThread Thread to receive the new priority.
1724 @param aPriority New priority for aThread.
1726 @see Kern::SetThreadPriority()
1728 EXPORT_C void NKern::ThreadSetPriority(NThread* aThread, TInt aPriority)
1731 aThread->SetPriority(aPriority);
1736 /** Changes the priority of a thread and signals a mutex.
1738 This function is intended to be used by the EPOC layer and personality layers.
1739 Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority().
1741 @param aThread Thread to receive the new priority.
1742 @param aPriority New priority for aThread.
1743 @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
1745 @see Kern::SetThreadPriority()
1747 EXPORT_C void NKern::ThreadSetPriority(NThread* aThread, TInt aPriority, NFastMutex* aMutex)
1750 aMutex=&TheScheduler.iLock;
1751 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NSPri->%d + FM %M",aThread,aPriority,aMutex));
1753 aThread->SetPriority(aPriority);
1759 /** Atomically signals the request semaphore of a nanothread and a fast mutex.
1761 This function is intended to be used by the EPOC layer and personality
1762 layers. Device drivers should use Kern::RequestComplete instead.
1764 @param aThread Nanothread to signal. Must be non NULL.
1765 @param aMutex Fast mutex to signal. If NULL, the system lock is signaled.
1767 @see Kern::RequestComplete()
1769 @pre Call in a thread context.
1770 @pre Interrupts must be enabled.
1771 @pre Kernel must be unlocked.
1772 @pre Specified mutex must be held
1774 EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread, NFastMutex* aMutex)
1776 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadRequestSignal(NThread*,NFastMutex*)");
1778 aMutex = &TheScheduler.iLock;
1780 aThread->iRequestSemaphore.Signal();
1786 /** Kills a nanothread.
1788 This function is intended to be used by the EPOC layer and personality layers.
1789 Do not use this function directly on a Symbian OS thread - use Kern::ThreadKill().
1791 This function does not return if the current thread is killed.
1792 This function is asynchronous (i.e. the thread to kill may still be alive when the call returns).
1794 @param aThread Thread to kill. Must be non NULL.
1796 @pre If acting on calling thread, calling thread must not be in a
1798 @pre Thread must not already be exiting.
1800 @see Kern::ThreadKill()
1802 EXPORT_C void NKern::ThreadKill(NThread* aThread)
1810 /** Atomically kills a nanothread and signals a fast mutex.
1812 This function is intended to be used by the EPOC layer and personality layers.
1813 Do not use this function directly on a Symbian OS thread - use Kern::ThreadKill().
1815 @param aThread Thread to kill. Must be non NULL.
1816 @param aMutex Fast mutex to signal. If NULL, the system lock is signalled.
1818 @pre If acting on calling thread, calling thread must not be in a
1820 @pre Thread must not already be exiting.
1822 @see NKern::ThreadKill(NThread*)
1824 EXPORT_C void NKern::ThreadKill(NThread* aThread, NFastMutex* aMutex)
1827 aMutex = &TheScheduler.iLock;
1828 NThreadBase* pC = NKern::LockC();
1831 __NK_ASSERT_DEBUG(pC->iCsCount==0); // Make sure thread isn't in critical section
1832 __NK_ASSERT_ALWAYS(aMutex->HeldByCurrentThread());
1834 aThread->iCsFunction = NThreadBase::ECSExitPending;
1836 aMutex->iHoldingThread = (NThreadBase*)(TLinAddr(aThread) | 1);
1837 aMutex->Signal(); // this will make us exit
1838 FAULT(); // should never get here
1849 /** Enters thread critical section.
1851 This function can safely be used in device drivers.
1853 The current thread will enter its critical section. While in critical section
1854 the thread cannot be suspended or killed. Any suspension or kill will be deferred
1855 until the thread leaves the critical section.
1856 Some API explicitly require threads to be in critical section before calling that
1858 Only User threads need to call this function as the concept of thread critical
1859 section applies to User threads only.
1861 @pre Call in a thread context.
1862 @pre Kernel must be unlocked.
1864 EXPORT_C void NKern::ThreadEnterCS()
1866 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadEnterCS");
1867 NThreadBase* pC = NKern::CurrentThread();
1868 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NEntCS",pC));
1869 __NK_ASSERT_DEBUG(pC->iCsCount>=0);
1873 NThread* NKern::_ThreadEnterCS()
1875 NThreadBase* pC = NKern::CurrentThread();
1876 __NK_ASSERT_DEBUG(pC->iCsCount>=0);
1878 return (NThread*)pC;
1882 /** Leaves thread critical section.
1884 This function can safely be used in device drivers.
1886 The current thread will leave its critical section. If the thread was suspended/killed
1887 while in critical section, the thread will be suspended/killed after leaving the
1888 critical section by calling this function.
1889 Only User threads need to call this function as the concept of thread critical
1890 section applies to User threads only.
1892 @pre Call in a thread context.
1893 @pre Kernel must be unlocked.
1895 EXPORT_C void NKern::ThreadLeaveCS()
1897 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadLeaveCS");
1898 NThreadBase* pC = NKern::LockC();
1899 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NLvCS",pC));
1901 __NK_ASSERT_DEBUG(pC->iCsCount>0);
1902 if (--pC->iCsCount==0 && pC->iCsFunction!=0)
1904 NFastMutex* m = HeldFastMutex();
1906 m->iHoldingThread = (NThreadBase*)(TLinAddr(pC) | 1);
1908 pC->DoCsFunctionT();
1914 void NKern::_ThreadLeaveCS()
1916 NThreadBase* pC = NKern::LockC();
1918 __NK_ASSERT_DEBUG(pC->iCsCount>0);
1919 if (--pC->iCsCount==0 && pC->iCsFunction!=0)
1921 NFastMutex* m = HeldFastMutex();
1923 m->iHoldingThread = (NThreadBase*)(TLinAddr(pC) | 1);
1925 pC->DoCsFunctionT();
1931 /** Freeze the CPU of the current thread
1933 After this the current thread will not migrate to another processor
1935 @return A cookie to be passed to NKern::EndFreezeCpu() to allow nesting
1937 EXPORT_C TInt NKern::FreezeCpu()
1939 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::FreezeCpu");
1941 TSubScheduler& ss = SubScheduler();
1942 NThreadBase* pC = ss.iCurrentThread;
1943 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NFrzCpu",pC));
1950 if (pC->iParent != pC)
1953 ++pC->iParent->iFreezeCpu;
1961 /** Unfreeze the current thread's CPU
1963 After this the current thread will again be eligible to migrate to another processor
1965 @param aCookie the value returned by NKern::FreezeCpu()
1967 EXPORT_C void NKern::EndFreezeCpu(TInt aCookie)
1969 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::EndFreezeCpu");
1970 __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NEndFrz %d",NKern::CurrentThread(),aCookie));
1974 TSubScheduler& ss = SubScheduler();
1975 NThreadBase* pC = ss.iCurrentThread;
1980 if (pC->iParent != pC)
1983 if (!--pC->iParent->iFreezeCpu && pC->iParent->iCpuChange)
1987 else if (pC->iCpuChange) // deferred CPU change?
1994 /** Change the CPU affinity of a thread
1996 @pre Call in a thread context.
1998 @param The new CPU affinity mask
1999 @return The old affinity mask
2001 EXPORT_C TUint32 NKern::ThreadSetCpuAffinity(NThread* aThread, TUint32 aAffinity)
2004 TUint32 r = aThread->SetCpuAffinity(aAffinity);
2010 /** Modify a thread's timeslice
2012 @pre Call in a thread context.
2014 @param aTimeslice The new timeslice value
2016 EXPORT_C void NKern::ThreadSetTimeslice(NThread* aThread, TInt aTimeslice)
2019 aThread->AcqSLock();
2020 if (aThread->iTimeslice == aThread->iTime || aTimeslice<0)
2021 aThread->iTime = aTimeslice;
2022 aThread->iTimeslice = aTimeslice;
2023 aThread->RelSLock();
2028 /** Blocks current nanothread.
2030 This function is used to implement synchronisation primitives in the EPOC
2031 layer and in personality layers. It is not intended to be used directly by
2034 @param aTimeout If greater than 0, the nanothread will be blocked for at most
2035 aTimeout microseconds.
2036 @param aMode Bitmask whose possible values are documented in TBlockMode.
2037 @param aMutex Fast mutex to operate on. If NULL, the system lock is used.
2039 @see NKern::ThreadRelease()
2042 @pre Call in a thread context.
2043 @pre Interrupts must be enabled.
2044 @pre Kernel must be unlocked.
2045 @pre Specified mutex must be held
2047 EXPORT_C TInt NKern::Block(TUint32 aTimeout, TUint aMode, NFastMutex* aMutex)
2049 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::Block(TUint32,TUint,NFastMutex*)");
2051 aMutex = &TheScheduler.iLock;
2052 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Block time %d mode %d FM %M",aTimeout,aMode,aMutex));
2053 if (aMode & EEnterCS)
2054 NKern::_ThreadEnterCS(); // NOTE: MUST DO THIS BEFORE CALLING NKern::Lock()
2055 NThreadBase* pC = NKern::LockC();
2056 TUint flags = (aMode & NKern::EObstruct) ? NThreadWaitState::EWtStObstructed : 0;
2057 pC->iWaitState.SetUpWait(NThreadBase::EWaitBlocked, flags, 0, aTimeout);
2058 if (aMode & ERelease)
2061 NKern::Unlock(); // thread blocks here
2062 TInt r = pC->iWaitState.iWtC.iRetVal; // sample here since it will be overwritten if we block on the fast mutex
2070 @pre Call in a thread context.
2071 @pre Interrupts must be enabled.
2072 @pre Kernel must be unlocked.
2073 @pre No fast mutex can be held
2075 /** @see NKern::Block(TUint32, TUint, NFastMutex*) */
2076 EXPORT_C TInt NKern::Block(TUint32 aTimeout, TUint aMode)
2078 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::Block(TUint32,TUint)");
2079 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Block time %d mode %d",aTimeout,aMode));
2080 if (aMode & EEnterCS)
2081 NKern::_ThreadEnterCS(); // NOTE: MUST DO THIS BEFORE CALLING NKern::Lock()
2082 NThreadBase* pC = NKern::LockC();
2083 TUint flags = (aMode & NKern::EObstruct) ? NThreadWaitState::EWtStObstructed : 0;
2084 pC->iWaitState.SetUpWait(NThreadBase::EWaitBlocked, flags, 0, aTimeout);
2086 NKern::Unlock(); // thread blocks here
2087 return pC->iWaitState.iWtC.iRetVal;
2094 Places the current nanothread into a wait state on an externally
2095 defined wait object.
2097 For use by RTOS personality layers.
2098 Do not use this function directly on a Symbian OS thread.
2100 Since the kernel is locked on entry, any reschedule will be deferred until
2101 it is unlocked. The thread should be added to any necessary wait queue after
2102 a call to this function, since this function removes it from the ready list.
2103 The thread's wait timer is started if aTimeout is nonzero.
2104 The thread's NState and wait object are updated.
2106 Call NThreadBase::Release() when the wait condition is resolved.
2108 @param aTimeout The maximum time for which the thread should block, in nanokernel timer ticks.
2109 A zero value means wait forever.
2110 If the thread is still blocked when the timeout expires,
2111 then the timeout state handler will be called.
2112 @param aState The nanokernel thread state (N-State) value to be set.
2113 This state corresponds to the externally defined wait object.
2114 This value will be written into the member NThreadBase::iNState.
2115 @param aWaitObj A pointer to an externally defined wait object.
2116 This value will be written into the member NThreadBase::iWaitObj.
2118 @pre Kernel must be locked.
2119 @pre Call in a thread context.
2121 @post Kernel is locked.
2123 @see NThreadBase::Release()
2125 EXPORT_C void NKern::NanoBlock(TUint32 aTimeout, TUint aState, TAny* aWaitObj)
2127 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::NanoBlock");
2128 __KTRACE_OPT(KNKERN,DEBUGPRINT("NanoBlock time %d state %d obj %08x", aTimeout, aState, aWaitObj));
2129 NThreadBase* pC = NCurrentThreadL();
2130 pC->iWaitState.SetUpWait(aState, aState>>8, aWaitObj, aTimeout);
2137 EXPORT_C void NKern::Sleep(TUint32 aTime)
2139 Puts the current nanothread to sleep for the specified duration.
2141 It can be called from Symbian OS threads.
2143 @param aTime sleep time in nanokernel timer ticks.
2145 @pre No fast mutex can be held.
2146 @pre Kernel must be unlocked.
2147 @pre Call in a thread context.
2148 @pre Interrupts must be enabled.
2151 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::Sleep");
2152 __KTRACE_OPT(KNKERN,DEBUGPRINT("NSlp %d",aTime));
2153 NThreadBase* pC = NKern::LockC();
2154 pC->iWaitState.SetUpWait(NThreadBase::EWaitSleep, 0, 0, aTime);
2160 /** Terminates the current nanothread.
2162 Calls to this function never return.
2164 For use by RTOS personality layers.
2165 Do not use this function directly on a Symbian OS thread.
2167 @pre Call in a thread context.
2168 @pre Interrupts must be enabled.
2169 @pre Kernel must be unlocked.
2171 EXPORT_C void NKern::Exit()
2173 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::Exit");
2174 __KTRACE_OPT(KNKERN,DEBUGPRINT("NExit"));
2175 NKern::LockC()->Exit(); // this won't return
2180 /** Terminates the current nanothread at the next possible point.
2182 If the calling thread is not currently in a critical section and does not
2183 currently hold a fast mutex, it exits immediately and this function does
2184 not return. On the other hand if the thread is in a critical section or
2185 holds a fast mutex the thread continues executing but it will exit as soon
2186 as it leaves the critical section and/or releases the fast mutex.
2188 @pre Call in a thread context.
2189 @pre Interrupts must be enabled.
2190 @pre Kernel must be unlocked.
2192 EXPORT_C void NKern::DeferredExit()
2194 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::DeferredExit");
2195 __KTRACE_OPT(KNKERN,DEBUGPRINT("NDefExit"));
2196 NFastMutex* m = HeldFastMutex();
2197 NThreadBase* pC = NKern::LockC();
2198 if (!m && !pC->iCsCount)
2199 pC->Exit(); // this won't return
2201 if (pC->iCsFunction >= 0) // don't touch it if we are already exiting
2202 pC->iCsFunction = NThreadBase::ECSExitPending;
2204 if (m && !pC->iCsCount)
2205 m->iHoldingThread = (NThreadBase*)(TLinAddr(pC) | 1);
2210 /** Prematurely terminates the current thread's timeslice
2212 @pre Kernel must be unlocked.
2213 @pre Call in a thread context.
2215 @post Kernel is unlocked.
2217 EXPORT_C void NKern::YieldTimeslice()
2219 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::YieldTimeslice");
2220 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::YieldTimeslice"));
2221 NThreadBase* t = NKern::LockC();
2224 if (t->iNext!=t || t->iParent->iNext!=t->iParent)
2230 /** Returns the number of CPUs available to Symbian OS
2232 @return the number of CPUs
2234 @pre Call in any context.
2236 EXPORT_C TInt NKern::NumberOfCpus()
2238 return TheScheduler.iNumCpus;
2242 /** Rotates the specified CPU ready list for threads at the specified priority.
2244 For use by RTOS personality layers to allow external control of round-robin
2245 scheduling. Not intended for direct use by device drivers.
2247 @param aPriority = priority at which threads should be rotated.
2248 -1 means use calling thread's priority.
2249 @param aCpu CPU to act on
2251 @pre Kernel must be unlocked.
2252 @pre Call in a thread context.
2254 @post Kernel is unlocked.
2257 EXPORT_C void NKern::RotateReadyList(TInt aPriority, TInt aCpu)
2259 // CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::RotateReadyList");
2260 // __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::RotateReadyList %d",aPriority));
2261 // if (aPriority<0 || aPriority>=KNumPriorities)
2262 // aPriority=NKern::CurrentThread()->iPriority;
2264 // TheScheduler.RotateReadyList(aPriority);
2269 /** Rotates the ready list for threads at the specified priority.
2271 For use by RTOS personality layers to allow external control of round-robin
2272 scheduling. Not intended for direct use by device drivers.
2274 @param aPriority = priority at which threads should be rotated.
2275 -1 means use calling thread's priority.
2277 @pre Kernel must be unlocked.
2278 @pre Call in a thread context.
2280 @post Kernel is unlocked.
2282 EXPORT_C void NKern::RotateReadyList(TInt aPriority)
2284 RotateReadyList(aPriority, -1);
2288 /** Returns a pointer to the thread group to which the current thread belongs,
2289 if any. Returns NULL if current thread is a standalone thread.
2291 @pre Call in a thread context.
2293 EXPORT_C NThreadGroup* NKern::CurrentGroup()
2295 NThreadBase* pC = NKern::CurrentThread();
2296 return (pC->iParent == pC) ? (NThreadGroup*)0 : (NThreadGroup*)pC->iParent;
2300 /** Detaches the current thread from the group to which it currently belongs,
2301 if any. Returns a pointer to the group (NULL if none).
2303 @pre Call in a thread context.
2304 @pre Interrupts enabled
2305 @pre Kernel unlocked
2307 EXPORT_C NThreadGroup* NKern::LeaveGroup()
2309 CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_NOT_IDFC|MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED, "NKern::LeaveGroup");
2311 TSubScheduler& ss = SubScheduler();
2312 NThreadBase* pC = ss.iCurrentThread;
2313 pC->iNewParent = 0; // cancel any pending Join
2314 NThreadGroup* g = (pC->iParent == pC) ? (NThreadGroup*)0 : (NThreadGroup*)pC->iParent;
2315 TBool make_group_ready = FALSE;
2316 __KTRACE_OPT(KNKERN,DEBUGPRINT("NLeaveGroup %T (%G)",pC,g));
2319 while (!pC->TiedEventLeaveInterlock())
2321 TInt irq = NKern::DisableAllInterrupts();
2323 NKern::RestoreInterrupts(irq);
2326 ss.iReadyListLock.LockOnly();
2329 g->iCurrent = 0; // since current thread is no longer in g
2331 pC->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset);
2332 pC->iCpuAffinity = g->iCpuAffinity; // keep same CPU affinity
2333 // if we're frozen, the group's freeze count was incremented
2336 // if we've been marked as deferring, the group's freeze count was incremented
2337 if (pC->iFastMutexDefer == 1)
2340 pC->iFastMutexDefer = 0;
2342 // if the group was waiting to change cpu then this thread needs to change still
2345 pC->iCpuChange = g->iCpuChange;
2349 // we were the last thread in the group stopping it from moving
2350 // but there may be no other threads left after UnReadyT'ing this one
2351 g->iCpuChange = FALSE;
2356 make_group_ready = TRUE;
2360 ss.iReadyListLock.UnlockOnly();
2362 if (make_group_ready)
2364 g->RelSLock(); // since pC is no longer attached to g
2372 /** Adds the current thread to the specified group.
2374 @param aGroup = pointer to group to join
2376 @pre Call in a thread context, not in one of the idle threads.
2377 @pre Interrupts enabled
2378 @pre Kernel unlocked
2379 @pre Thread does not hold a fast mutex
2380 @pre Thread does not have a freeze on CPU migration
2381 @pre Current thread is not already in a group
2383 EXPORT_C void NKern::JoinGroup(NThreadGroup* aGroup)
2385 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD, "NKern::JoinGroup");
2387 TSubScheduler& ss = SubScheduler();
2388 NThreadBase* pC = ss.iCurrentThread;
2389 __ASSERT_WITH_MESSAGE_DEBUG(pC->iParent==pC, "Thread not already in a group", "NKern::JoinGroup");
2390 __ASSERT_WITH_MESSAGE_DEBUG(!pC->iFreezeCpu, "No interdiction on CPU migration", "NKern::JoinGroup");
2391 __ASSERT_WITH_MESSAGE_DEBUG(!pC->i_NThread_Initial, "Not idle thread", "NKern::JoinGroup");
2392 __NK_ASSERT_ALWAYS(pC->iParent==pC && !pC->iFreezeCpu);
2393 __KTRACE_OPT(KNKERN,DEBUGPRINT("NJoinGroup %T->%G",pC,aGroup));
2396 TBool migrate = !CheckCpuAgainstAffinity(ss.iCpuNum, aGroup->iCpuAffinity); // TRUE if thread's current CPU is incompatible with the group's affinity
2397 if (!aGroup->iReady || aGroup->iReady==pC->iReady)
2399 // group not ready or ready on this CPU
2402 ss.iReadyListLock.LockOnly();
2404 pC->iParent = aGroup;
2405 aGroup->iNThreadList.AddHead(pC);
2406 if (!aGroup->iReady)
2408 aGroup->iPriority = pC->iPriority;
2410 aGroup->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset);
2412 else if (pC->iPriority > aGroup->iPriority)
2414 ss.ChangePriority(aGroup, pC->iPriority);
2416 pC->iReady = NSchedulable::EReadyGroup;
2417 aGroup->iCurrent = aGroup->iReady;
2418 ss.iReadyListLock.UnlockOnly();
2419 ++aGroup->iThreadCount;
2423 // this thread needs to migrate to another CPU
2424 pC->iNewParent = aGroup;
2427 // the following reschedule definitely joins the group even if the
2428 // thread's CPU affinity is incompatible with that of the group
2429 // (the thread's CPU affinity is subsequently determined by that of
2433 if (pC->iParent != aGroup)
2436 while (!pC->TiedEventJoinInterlock())
2438 TInt irq = NKern::DisableAllInterrupts();
2440 NKern::RestoreInterrupts(irq);
2446 /******************************************************************************
2448 ******************************************************************************/
2450 #ifndef __PRI_LIST_MACHINE_CODED__
2451 /** Returns the priority of the highest priority item present on a priority list.
2453 @return The highest priority present or -1 if the list is empty.
2455 EXPORT_C TInt TPriListBase::HighestPriority()
2457 // TUint64 present = MAKE_TUINT64(iPresent[1], iPresent[0]);
2458 // return __e32_find_ms1_64(present);
2459 return __e32_find_ms1_64(iPresent64);
2463 /** Finds the highest priority item present on a priority list.
2465 If multiple items at the same priority are present, return the first to be
2466 added in chronological order.
2468 @return A pointer to the item or NULL if the list is empty.
2470 EXPORT_C TPriListLink* TPriListBase::First()
2472 TInt p = HighestPriority();
2473 return p >=0 ? static_cast<TPriListLink*>(iQueue[p]) : NULL;
2477 /** Adds an item to a priority list at the tail of the queue for its priority.
2479 @param aLink A pointer to the item - must not be NULL.
2481 EXPORT_C void TPriListBase::Add(TPriListLink* aLink)
2483 TInt p = aLink->iPriority;
2484 SDblQueLink* head = iQueue[p];
2487 // already some at this priority
2488 aLink->InsertBefore(head);
2492 // 'create' new list
2494 aLink->iNext = aLink->iPrev = aLink;
2495 iPresent[p>>5] |= 1u << (p & 0x1f);
2500 /** Removes an item from a priority list.
2502 @param aLink A pointer to the item - must not be NULL.
2504 EXPORT_C void TPriListBase::Remove(TPriListLink* aLink)
2506 if (!aLink->Alone())
2508 // not the last on this list
2509 TInt p = aLink->iPriority;
2510 if (iQueue[p] == aLink)
2511 iQueue[p] = aLink->iNext;
2516 TInt p = aLink->iPriority;
2518 iPresent[p>>5] &= ~(1u << (p & 0x1f));
2524 /** Changes the priority of an item on a priority list.
2526 @param aLink A pointer to the item to act on - must not be NULL.
2527 @param aNewPriority A new priority for the item.
2529 EXPORT_C void TPriListBase::ChangePriority(TPriListLink* aLink, TInt aNewPriority)
2531 if (aLink->iPriority!=aNewPriority)
2534 aLink->iPriority=TUint8(aNewPriority);
2540 /** Adds an item to a priority list at the head of the queue for its priority.
2542 @param aLink A pointer to the item - must not be NULL.
2544 EXPORT_C void TPriListBase::AddHead(TPriListLink* aLink)
2546 TInt p = aLink->iPriority;
2547 SDblQueLink* head = iQueue[p];
2551 // already some at this priority
2552 aLink->InsertBefore(head);
2556 // 'create' new list
2557 aLink->iNext = aLink->iPrev = aLink;
2558 iPresent[p>>5] |= 1u << (p & 0x1f);
2563 /******************************************************************************
2565 ******************************************************************************/
2567 TGenIPIList::TGenIPIList()
2568 : iGenIPILock(TSpinLock::EOrderGenericIPIList)
2572 TGenIPIList GenIPIList;
2575 extern void send_generic_ipis(TUint32);
2577 void generic_ipi_isr(TSubScheduler* aS)
2579 TGenericIPI* ipi = aS->iNextIPI;
2582 TUint32 m = aS->iCpuMask;
2583 SDblQueLink* anchor = &GenIPIList.iA;
2584 while (ipi != anchor)
2586 __e32_atomic_and_acq32(&ipi->iCpusIn, ~m);
2588 TInt irq = GenIPIList.iGenIPILock.LockIrqSave();
2589 TGenericIPI* n = (TGenericIPI*)ipi->iNext;
2590 ipi->iCpusOut &= ~m;
2591 if (ipi->iCpusOut == 0)
2598 while (ipi!=anchor && !(ipi->iCpusIn & m))
2599 ipi = (TGenericIPI*)ipi->iNext;
2602 GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
2607 void TGenericIPI::Queue(TGenericIPIFn aFunc, TUint32 aCpuMask)
2609 __KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI F=%08x M=%08x", aFunc, aCpuMask));
2611 TScheduler& s = TheScheduler;
2614 TInt irq = GenIPIList.iGenIPILock.LockIrqSave();
2615 if (aCpuMask & 0x80000000u)
2617 if (aCpuMask==0xffffffffu)
2618 aCpuMask = s.iActiveCpus2;
2619 else if (aCpuMask==0xfffffffeu)
2620 aCpuMask = s.iActiveCpus2 &~ SubScheduler().iCpuMask;
2625 iCpusOut = aCpuMask;
2628 GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
2632 GenIPIList.Add(this);
2633 for (i=0; i<s.iNumCpus; ++i)
2635 if (!(aCpuMask & (1<<i)))
2637 TSubScheduler& ss = *s.iSub[i];
2644 send_generic_ipis(ipis);
2645 GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
2646 __KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI ipis=%08x", ipis));
2649 void TGenericIPI::QueueAll(TGenericIPIFn aFunc)
2651 Queue(aFunc, 0xffffffffu);
2654 void TGenericIPI::QueueAllOther(TGenericIPIFn aFunc)
2656 Queue(aFunc, 0xfffffffeu);
2659 // Call from thread or IDFC with interrupts enabled
2660 void TGenericIPI::WaitEntry()
2662 CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_INTERRUPTS_ENABLED,"TGenericIPI::WaitEntry");
2670 // Call from thread or IDFC with interrupts enabled
2671 void TGenericIPI::WaitCompletion()
2673 CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_INTERRUPTS_ENABLED,"TGenericIPI::WaitCompletion");
2674 volatile TInt* p = (volatile TInt*)&iNext;
2682 /** Stop all other CPUs
2684 Call with kernel locked
2686 void TStopIPI::StopCPUs()
2689 QueueAllOther(&Isr); // send IPIs to all other CPUs
2690 WaitEntry(); // wait for other CPUs to reach the ISR
2693 void TStopIPI::ReleaseCPUs()
2695 iFlag = 1; // allow other CPUs to proceed
2696 WaitCompletion(); // wait for them to finish with this IPI
2699 void TStopIPI::Isr(TGenericIPI* a)
2701 TStopIPI* s = (TStopIPI*)a;