First public contribution.
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\nkern\nkern.cpp
18 // NThreadBase member data
19 #define __INCLUDE_NTHREADBASE_DEFINES__
23 /******************************************************************************
25 ******************************************************************************/
27 /** Checks if the current thread holds this fast mutex
29 @return TRUE if the current thread holds this fast mutex
32 EXPORT_C TBool NFastMutex::HeldByCurrentThread()
34 return iHoldingThread == NCurrentThread();
37 /** Find the fast mutex held by the current thread
39 @return a pointer to the fast mutex held by the current thread
40 @return NULL if the current thread does not hold a fast mutex
42 EXPORT_C NFastMutex* NKern::HeldFastMutex()
44 return TheScheduler.iCurrentThread->iHeldFastMutex;
48 #ifndef __FAST_MUTEX_MACHINE_CODED__
49 /** Acquires the fast mutex.
51 This will block until the mutex is available, and causes
52 the thread to enter an implicit critical section until the mutex is released.
54 Generally threads would use NKern::FMWait() which manipulates the kernel lock
57 @pre Kernel must be locked, with lock count 1.
58 @pre The calling thread holds no fast mutexes.
60 @post Kernel is locked, with lock count 1.
61 @post The calling thread holds the mutex.
63 @see NFastMutex::Signal()
66 EXPORT_C void NFastMutex::Wait()
68 __KTRACE_OPT(KNKERN,DEBUGPRINT("FMWait %M",this));
69 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED_ONCE|MASK_NO_FAST_MUTEX,"NFastMutex::Wait");
70 NThreadBase* pC=TheScheduler.iCurrentThread;
74 pC->iWaitFastMutex=this;
75 __KTRACE_OPT(KNKERN,DEBUGPRINT("FMWait: YieldTo %T",iHoldingThread));
76 TheScheduler.YieldTo(iHoldingThread); // returns with kernel unlocked, interrupts disabled
77 TheScheduler.iKernCSLocked = 1; // relock kernel
78 NKern::EnableAllInterrupts();
79 pC->iWaitFastMutex=NULL;
81 pC->iHeldFastMutex=this; // automatically puts thread into critical section
82 #ifdef BTRACE_FAST_MUTEX
83 BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexWait,this);
89 /** Releases a previously acquired fast mutex.
91 Generally, threads would use NKern::FMSignal() which manipulates the kernel lock
94 @pre The calling thread holds the mutex.
95 @pre Kernel must be locked.
97 @post Kernel is locked.
99 @see NFastMutex::Wait()
100 @see NKern::FMSignal()
102 EXPORT_C void NFastMutex::Signal()
104 __KTRACE_OPT(KNKERN,DEBUGPRINT("FMSignal %M",this));
105 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED,"NFastMutex::Signal");
106 NThreadBase* pC=TheScheduler.iCurrentThread;
107 __ASSERT_WITH_MESSAGE_DEBUG(pC->iHeldFastMutex==this,"The calling thread holds the mutex","NFastMutex::Signal");
108 #ifdef BTRACE_FAST_MUTEX
109 BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexSignal,this);
112 pC->iHeldFastMutex=NULL;
118 if (pC->iCsFunction && !pC->iCsCount)
124 /** Acquires a fast mutex.
126 This will block until the mutex is available, and causes
127 the thread to enter an implicit critical section until the mutex is released.
129 @param aMutex The fast mutex to acquire.
131 @post The calling thread holds the mutex.
133 @see NFastMutex::Wait()
134 @see NKern::FMSignal()
136 @pre No fast mutex can be held.
137 @pre Call in a thread context.
138 @pre Kernel must be unlocked
139 @pre interrupts enabled
142 EXPORT_C void NKern::FMWait(NFastMutex* aMutex)
144 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::FMWait");
151 /** Releases a previously acquired fast mutex.
153 @param aMutex The fast mutex to release.
155 @pre The calling thread holds the mutex.
157 @see NFastMutex::Signal()
160 EXPORT_C void NKern::FMSignal(NFastMutex* aMutex)
168 /** Acquires the System Lock.
170 This will block until the mutex is available, and causes
171 the thread to enter an implicit critical section until the mutex is released.
173 @post System lock is held.
175 @see NKern::UnlockSystem()
178 @pre No fast mutex can be held.
179 @pre Call in a thread context.
180 @pre Kernel must be unlocked
181 @pre interrupts enabled
183 EXPORT_C void NKern::LockSystem()
185 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::LockSystem");
187 TheScheduler.iLock.Wait();
192 /** Releases the System Lock.
194 @pre System lock must be held.
196 @see NKern::LockSystem()
197 @see NKern::FMSignal()
199 EXPORT_C void NKern::UnlockSystem()
202 TheScheduler.iLock.Signal();
207 /** Temporarily releases a fast mutex if there is contention.
209 If there is another thread attempting to acquire the mutex, the calling
210 thread releases the mutex and then acquires it again.
212 This is more efficient than the equivalent code:
219 @return TRUE if the mutex was relinquished, FALSE if not.
221 @pre The mutex must be held.
223 @post The mutex is held.
225 EXPORT_C TBool NKern::FMFlash(NFastMutex* aM)
227 __ASSERT_WITH_MESSAGE_DEBUG(aM->HeldByCurrentThread(),"The calling thread holds the mutex","NKern::FMFlash");
228 TBool w = aM->iWaiting;
233 NKern::PreemptionPoint();
237 #ifdef BTRACE_FAST_MUTEX
240 BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexFlash,aM);
247 /** Temporarily releases the System Lock if there is contention.
250 is another thread attempting to acquire the System lock, the calling
251 thread releases the mutex and then acquires it again.
253 This is more efficient than the equivalent code:
256 NKern::UnlockSystem();
260 Note that this can only allow higher priority threads to use the System
261 lock as lower priority cannot cause contention on a fast mutex.
263 @return TRUE if the system lock was relinquished, FALSE if not.
265 @pre System lock must be held.
267 @post System lock is held.
269 @see NKern::LockSystem()
270 @see NKern::UnlockSystem()
272 EXPORT_C TBool NKern::FlashSystem()
274 return NKern::FMFlash(&TheScheduler.iLock);
279 /******************************************************************************
281 ******************************************************************************/
283 /** Sets the owner of a fast semaphore.
285 @param aThread The thread to own this semaphore. If aThread==0, then the
286 owner is set to the current thread.
288 @pre Kernel must be locked.
289 @pre If changing ownership form one thread to another, the there must be no
290 pending signals or waits.
291 @pre Call either in a thread or an IDFC context.
293 @post Kernel is locked.
295 EXPORT_C void NFastSemaphore::SetOwner(NThreadBase* aThread)
297 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::SetOwner");
299 aThread = TheScheduler.iCurrentThread;
300 if(iOwningThread && iOwningThread!=aThread)
302 __NK_ASSERT_ALWAYS(!iCount); // Can't change owner if iCount!=0
304 iOwningThread = aThread;
308 #ifndef __FAST_SEM_MACHINE_CODED__
309 /** Waits on a fast semaphore.
311 Decrements the signal count for the semaphore and
312 removes the calling thread from the ready-list if the sempahore becomes
313 unsignalled. Only the thread that owns a fast semaphore can wait on it.
315 Note that this function does not block, it merely updates the NThread state,
316 rescheduling will only occur when the kernel is unlocked. Generally threads
317 would use NKern::FSWait() which manipulates the kernel lock for you.
319 @pre The calling thread must own the semaphore.
320 @pre No fast mutex can be held.
321 @pre Kernel must be locked.
323 @post Kernel is locked.
325 @see NFastSemaphore::Signal()
329 EXPORT_C void NFastSemaphore::Wait()
331 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NO_FAST_MUTEX,"NFastSemaphore::Wait");
332 NThreadBase* pC=TheScheduler.iCurrentThread;
333 __ASSERT_WITH_MESSAGE_ALWAYS(pC==iOwningThread,"The calling thread must own the semaphore","NFastSemaphore::Wait");
336 pC->iNState=NThread::EWaitFastSemaphore;
338 TheScheduler.Remove(pC);
344 /** Signals a fast semaphore.
346 Increments the signal count of a fast semaphore by
347 one and releases any waiting thread if the semphore becomes signalled.
349 Note that a reschedule will not occur before this function returns, this will
350 only take place when the kernel is unlocked. Generally threads
351 would use NKern::FSSignal() which manipulates the kernel lock for you.
353 @pre Kernel must be locked.
354 @pre Call either in a thread or an IDFC context.
356 @post Kernel is locked.
358 @see NFastSemaphore::Wait()
359 @see NKern::FSSignal()
362 EXPORT_C void NFastSemaphore::Signal()
364 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::Signal");
367 iOwningThread->iWaitObj=NULL;
368 iOwningThread->CheckSuspendThenReady();
373 /** Signals a fast semaphore multiple times.
375 @pre Kernel must be locked.
376 @pre Call either in a thread or an IDFC context.
378 @post Kernel is locked.
382 EXPORT_C void NFastSemaphore::SignalN(TInt aCount)
384 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::SignalN");
385 __NK_ASSERT_DEBUG(aCount>=0);
386 if (aCount>0 && iCount<0)
388 iOwningThread->iWaitObj=NULL;
389 iOwningThread->CheckSuspendThenReady();
395 /** Resets a fast semaphore.
397 @pre Kernel must be locked.
398 @pre Call either in a thread or an IDFC context.
400 @post Kernel is locked.
404 EXPORT_C void NFastSemaphore::Reset()
406 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::Reset");
409 iOwningThread->iWaitObj=NULL;
410 iOwningThread->CheckSuspendThenReady();
416 /** Cancels a wait on a fast semaphore.
418 @pre Kernel must be locked.
419 @pre Call either in a thread or an IDFC context.
421 @post Kernel is locked.
425 void NFastSemaphore::WaitCancel()
427 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::WaitCancel");
429 iOwningThread->iWaitObj=NULL;
430 iOwningThread->CheckSuspendThenReady();
434 /** Waits for a signal on the current thread's I/O semaphore.
436 @pre No fast mutex can be held.
437 @pre Call in a thread context.
438 @pre Kernel must be unlocked
439 @pre interrupts enabled
441 EXPORT_C void NKern::WaitForAnyRequest()
443 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::WaitForAnyRequest");
444 __KTRACE_OPT(KNKERN,DEBUGPRINT("WfAR"));
445 NThreadBase* pC=TheScheduler.iCurrentThread;
447 pC->iRequestSemaphore.Wait();
453 /** Sets the owner of a fast semaphore.
455 @param aSem The semaphore to change ownership off.
456 @param aThread The thread to own this semaphore. If aThread==0, then the
457 owner is set to the current thread.
459 @pre If changing ownership form one thread to another, the there must be no
460 pending signals or waits.
462 EXPORT_C void NKern::FSSetOwner(NFastSemaphore* aSem,NThreadBase* aThread)
464 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSetOwner %m %T",aSem,aThread));
466 aSem->SetOwner(aThread);
470 /** Waits on a fast semaphore.
472 Decrements the signal count for the semaphore
473 and waits for a signal if the sempahore becomes unsignalled. Only the
474 thread that owns a fast semaphore can wait on it.
476 @param aSem The semaphore to wait on.
478 @pre The calling thread must own the semaphore.
479 @pre No fast mutex can be held.
481 @see NFastSemaphore::Wait()
483 EXPORT_C void NKern::FSWait(NFastSemaphore* aSem)
485 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSWait %m",aSem));
492 /** Signals a fast semaphore.
494 Increments the signal count of a fast semaphore
495 by one and releases any waiting thread if the semphore becomes signalled.
497 @param aSem The semaphore to signal.
501 @pre Interrupts must be enabled.
502 @pre Do not call from an ISR
504 EXPORT_C void NKern::FSSignal(NFastSemaphore* aSem)
506 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::FSSignal(NFastSemaphore*)");
507 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignal %m",aSem));
514 /** Atomically signals a fast semaphore and releases a fast mutex.
516 Rescheduling only occurs after both synchronisation operations are complete.
518 @param aSem The semaphore to signal.
519 @param aMutex The mutex to release. If NULL, the System Lock is released
521 @pre The calling thread must hold the mutex.
523 @see NKern::FMSignal()
525 EXPORT_C void NKern::FSSignal(NFastSemaphore* aSem, NFastMutex* aMutex)
528 aMutex=&TheScheduler.iLock;
529 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignal %m +FM %M",aSem,aMutex));
537 /** Signals a fast semaphore multiple times.
539 Increments the signal count of a
540 fast semaphore by aCount and releases any waiting thread if the semphore
543 @param aSem The semaphore to signal.
544 @param aCount The number of times to signal the semaphore.
548 @pre Interrupts must be enabled.
549 @pre Do not call from an ISR
551 EXPORT_C void NKern::FSSignalN(NFastSemaphore* aSem, TInt aCount)
553 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::FSSignalN(NFastSemaphore*, TInt)");
554 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignalN %m %d",aSem,aCount));
556 aSem->SignalN(aCount);
561 /** Atomically signals a fast semaphore multiple times and releases a fast mutex.
563 Rescheduling only occurs after both synchronisation operations are complete.
565 @param aSem The semaphore to signal.
566 @param aCount The number of times to signal the semaphore.
567 @param aMutex The mutex to release. If NULL, the System Lock is released.
569 @pre The calling thread must hold the mutex.
571 @see NKern::FMSignal()
573 EXPORT_C void NKern::FSSignalN(NFastSemaphore* aSem, TInt aCount, NFastMutex* aMutex)
576 aMutex=&TheScheduler.iLock;
577 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignalN %m %d + FM %M",aSem,aCount,aMutex));
579 aSem->SignalN(aCount);
585 /******************************************************************************
587 ******************************************************************************/
589 #ifndef __SCHEDULER_MACHINE_CODED__
590 /** Makes a nanothread ready provided that it is not explicitly suspended.
592 For use by RTOS personality layers.
594 @pre Kernel must be locked.
595 @pre Call either in a thread or an IDFC context.
597 @post Kernel is locked.
599 EXPORT_C void NThreadBase::CheckSuspendThenReady()
601 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::CheckSuspendThenReady");
602 if (iSuspendCount==0)
608 /** Makes a nanothread ready.
610 For use by RTOS personality layers.
612 @pre Kernel must be locked.
613 @pre Call either in a thread or an IDFC context.
614 @pre The thread being made ready must not be explicitly suspended
616 @post Kernel is locked.
618 EXPORT_C void NThreadBase::Ready()
621 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::Ready");
622 __ASSERT_WITH_MESSAGE_DEBUG(iSuspendCount==0,"The thread being made ready must not be explicitly suspended","NThreadBase::Ready");
624 if (DEBUGNUM(KCRAZYSCHEDDELAY) && iPriority && TheTimerQ.iMsCount)
626 // Delay this thread, unless it's already on the delayed queue
627 if ((i_ThrdAttr & KThreadAttDelayed) == 0)
629 i_ThrdAttr |= KThreadAttDelayed;
630 TheScheduler.iDelayedQ.Add(this);
635 // Delayed scheduler off
636 // or idle thread, or the tick hasn't started yet
644 void NThreadBase::DoReady()
646 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::DoReady");
647 __ASSERT_WITH_MESSAGE_DEBUG(iSuspendCount==0,"The thread being made ready must not be explicitly suspended","NThreadBase::DoReady");
649 TScheduler& s=TheScheduler;
651 // __KTRACE_OPT(KSCHED,Kern::Printf("Ready(%O), priority %d status %d",this,p,iStatus));
656 if (!(s>p)) // s>p <=> highest ready priority > our priority so no preemption
658 // if no other thread at this priority or first thread at this priority has used its timeslice, reschedule
659 // note iNext points to first thread at this priority since we got added to the end
660 if (iNext==this || ((NThreadBase*)iNext)->iTime==0)
666 void NThreadBase::DoCsFunction()
668 __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::DoCsFunction %T %d",this,iCsFunction));
673 // suspend this thread f times
677 if (f==ECSExitPending)
679 // We need to exit now
680 Exit(); // this won't return
682 UnknownState(ELeaveCS,f); // call into RTOS personality
686 /** Suspends a nanothread the specified number of times.
688 For use by RTOS personality layers.
689 Do not use this function directly on a Symbian OS thread.
690 Since the kernel is locked on entry, any reschedule will be deferred until
692 The suspension will be deferred if the target thread is currently in a
693 critical section; in this case the suspension will take effect when it exits
694 the critical section.
695 The thread's unknown state handler will be invoked with function ESuspend and
696 parameter aCount if the current NState is not recognised and it is not in a
699 @param aCount = the number of times to suspend.
700 @return TRUE, if the suspension has taken immediate effect;
701 FALSE, if the thread is in a critical section or is already suspended.
703 @pre Kernel must be locked.
704 @pre Call in a thread context.
706 @post Kernel is locked.
708 EXPORT_C TBool NThreadBase::Suspend(TInt aCount)
710 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NThreadBase::Suspend");
711 // If thread is executing a critical section, we must defer the suspend
713 return FALSE; // already dead so suspension is a no-op
714 if (iCsCount || iHeldFastMutex)
716 __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Suspend %T (CSF %d) %d",this,iCsFunction,aCount));
717 if (iCsFunction>=0) // -ve means thread is about to exit
719 iCsFunction+=aCount; // so thread will suspend itself when it leaves the critical section
720 if (iHeldFastMutex && iCsCount==0)
721 iHeldFastMutex->iWaiting=1;
726 // thread not in critical section, so suspend it
727 __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Suspend %T (NState %d) %d",this,iNState,aCount));
731 TheScheduler.Remove(this);
734 case EWaitFastSemaphore:
741 UnknownState(ESuspend,aCount);
744 TInt old_suspend=iSuspendCount;
745 iSuspendCount-=aCount;
746 return (old_suspend==0); // return TRUE if thread has changed from not-suspended to suspended.
750 /** Resumes a nanothread, cancelling one suspension.
752 For use by RTOS personality layers.
753 Do not use this function directly on a Symbian OS thread.
754 Since the kernel is locked on entry, any reschedule will be deferred until
756 If the target thread is currently in a critical section this will simply
757 cancel one deferred suspension.
758 The thread's unknown state handler will be invoked with function EResume if
759 the current NState is not recognised and it is not in a critical section.
761 @return TRUE, if the resumption has taken immediate effect;
762 FALSE, if the thread is in a critical section or is still suspended.
764 @pre Kernel must be locked.
765 @pre Call either in a thread or an IDFC context.
767 @post Kernel must be locked.
769 EXPORT_C TBool NThreadBase::Resume()
771 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NThreadBase::Resume");
772 __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Resume %T, state %d CSC %d CSF %d",this,iNState,iCsCount,iCsFunction));
776 // If thread is in critical section, just cancel deferred suspends
777 if (iCsCount || iHeldFastMutex)
780 --iCsFunction; // one less deferred suspension
783 if (iSuspendCount<0 && ++iSuspendCount==0)
790 case EWaitFastSemaphore:
796 UnknownState(EResume,0);
799 return TRUE; // thread has changed from suspended to not-suspended
801 return FALSE; // still suspended or not initially suspended so no higher level action required
805 /** Resumes a nanothread, cancelling all outstanding suspensions.
807 For use by RTOS personality layers.
808 Do not use this function directly on a Symbian OS thread.
809 Since the kernel is locked on entry, any reschedule will be deferred until
811 If the target thread is currently in a critical section this will simply
812 cancel all deferred suspensions.
813 The thread's unknown state handler will be invoked with function EForceResume
814 if the current NState is not recognised and it is not in a critical section.
816 @return TRUE, if the resumption has taken immediate effect;
817 FALSE, if the thread is in a critical section.
819 @pre Kernel must be locked.
820 @pre Call either in a thread or an IDFC context.
822 @post Kernel is locked.
824 EXPORT_C TBool NThreadBase::ForceResume()
826 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::ForceResume");
827 __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::ForceResume %T, state %d CSC %d CSF %d",this,iNState,iCsCount,iCsFunction));
831 // If thread is in critical section, just cancel deferred suspends
832 if (iCsCount || iHeldFastMutex)
835 iCsFunction=0; // cancel all deferred suspensions
846 case EWaitFastSemaphore:
853 UnknownState(EForceResume,0);
861 /** Releases a waiting nanokernel thread.
863 For use by RTOS personality layers.
864 Do not use this function directly on a Symbian OS thread.
865 This function should make the thread ready (provided it is not explicitly
866 suspended) and cancel any wait timeout. It should also remove it from any
868 If aReturnCode is nonnegative it indicates normal completion of the wait.
869 If aReturnCode is negative it indicates early/abnormal completion of the
870 wait and so any wait object should be reverted as if the wait had never
871 occurred (eg semaphore count should be incremented as this thread has not
872 actually acquired the semaphore).
873 The thread's unknown state handler will be invoked with function ERelease
874 and parameter aReturnCode if the current NState is not recognised.
876 @param aReturnCode The reason code for release.
878 @pre Kernel must be locked.
879 @pre Call either in a thread or an IDFC context.
881 @post Kernel is locked.
883 EXPORT_C void NThreadBase::Release(TInt aReturnCode)
885 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::Release");
886 __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Release %T, state %d retcode %d",this,iNState,aReturnCode));
893 // don't release explicit suspensions
895 case EWaitFastSemaphore:
896 if (aReturnCode<0 && iWaitObj)
897 ((NFastSemaphore*)iWaitObj)->WaitCancel();
902 CheckSuspendThenReady();
905 UnknownState(ERelease,aReturnCode);
908 if (iTimer.iUserFlags)
910 if (iTimer.iState == NTimer::EIdle)
912 // Potential race condition - timer must have completed but expiry
913 // handler has not yet run. Signal to the handler that it should do
914 // nothing by flipping the bottom bit of iTimer.iPtr
915 // This condition cannot possibly recur until the expiry handler has
916 // run since all expiry handlers run in DfcThread1.
917 TLinAddr& x = *(TLinAddr*)&iTimer.iPtr;
921 iTimer.iUserFlags = FALSE;
924 iReturnValue=aReturnCode;
928 /** Signals a nanokernel thread's request semaphore.
930 This can also be used on Symbian OS threads.
932 @pre Kernel must be locked.
933 @pre Call either in a thread or an IDFC context.
935 @post Kernel is locked.
937 EXPORT_C void NThreadBase::RequestSignal()
939 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::RequestSignal");
940 iRequestSemaphore.Signal();
943 void NThreadBase::TimerExpired(TAny* aPtr)
945 TLinAddr cookie = (TLinAddr)aPtr;
946 NThread* pT = (NThread*)(cookie &~ 3);
947 __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::TimerExpired %T, state %d",pT,pT->iNState));
948 NThreadTimeoutHandler th = pT->iHandlers->iTimeoutHandler;
950 if (pT->iNState<ENumNStates && pT->iNState!=EBlocked)
954 // Use higher level timeout handler
956 (*th)(pT, ETimeoutPreamble);
957 TInt param = ETimeoutPostamble;
959 TLinAddr current_cookie = (TLinAddr)pT->iTimer.iPtr;
960 if ((cookie ^ current_cookie) & 1)
962 // The timer was cancelled just after expiring but before this function
963 // managed to call NKern::Lock(), so it's spurious
964 param = ETimeoutSpurious;
967 pT->iTimer.iUserFlags = FALSE;
972 TLinAddr current_cookie = (TLinAddr)pT->iTimer.iPtr;
973 if ((cookie ^ current_cookie) & 1)
975 // The timer was cancelled just after expiring but before this function
976 // managed to call NKern::Lock(), so just return without doing anything.
980 pT->iTimer.iUserFlags = FALSE;
988 case EWaitFastSemaphore:
989 ((NFastSemaphore*)pT->iWaitObj)->WaitCancel();
994 pT->CheckSuspendThenReady();
997 pT->UnknownState(ETimeout,0);
1001 pT->iReturnValue=KErrTimedOut;
1006 /** Changes the priority of a nanokernel thread.
1008 For use by RTOS personality layers.
1009 Do not use this function directly on a Symbian OS thread.
1011 The thread's unknown state handler will be invoked with function EChangePriority
1012 and parameter newp if the current NState is not recognised and the new priority
1013 is not equal to the original priority.
1015 @param newp The new nanokernel priority (0 <= newp < KNumPriorities).
1017 @pre Kernel must be locked.
1018 @pre Call in a thread context.
1020 @post Kernel is locked.
1022 EXPORT_C void NThreadBase::SetPriority(TInt newp)
1024 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::SetPriority");
1025 __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::SetPriority %T %d->%d, state %d",this,iPriority,newp,iNState));
1027 // When the crazy scheduler is active, refuse to set any priority higher than 1
1028 if (KCrazySchedulerEnabled() && newp>1)
1031 if (newp==iPriority)
1033 #ifdef BTRACE_THREAD_PRIORITY
1034 BTrace8(BTrace::EThreadPriority,BTrace::ENThreadPriority,this,newp);
1040 TInt oldp=iPriority;
1041 TheScheduler.ChangePriority(this,newp);
1042 NThreadBase* pC=TheScheduler.iCurrentThread;
1045 if (newp<oldp && (TheScheduler>newp || !TPriListLink::Alone())) // can't have scheduler<newp
1050 TInt cp=pC->iPriority;
1053 else if (newp==cp && pC->iTime==0)
1055 if (pC->iHeldFastMutex)
1056 pC->iHeldFastMutex->iWaiting=1; // don't round-robin now, wait until fast mutex released
1064 case EWaitFastSemaphore:
1069 iPriority=TUint8(newp);
1072 UnknownState(EChangePriority,newp);
1077 void NThreadBase::Exit()
1079 // The current thread is exiting
1080 // Enter with kernel locked, don't return
1081 __NK_ASSERT_DEBUG(this==TheScheduler.iCurrentThread);
1085 TInt threadCS=iCsCount;
1086 TInt kernCS=TheScheduler.iKernCSLocked;
1088 iCsFunction=ECSExitInProgress;
1090 __KTRACE_OPT(KSCHED,DEBUGPRINT("Exit %T %u",this,NTickCount()));
1091 __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Exit %T, CSC %d HeldFM %M KernCS %d",this,threadCS,iHeldFastMutex,kernCS));
1099 NThreadExitHandler xh = iHandlers->iExitHandler;
1101 pD=(*xh)((NThread*)this); // call exit handler
1106 TheScheduler.Remove(this);
1108 #ifdef BTRACE_THREAD_IDENTIFICATION
1109 BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadDestroy,this);
1111 __NK_ASSERT_ALWAYS(iCsFunction == ECSExitInProgress);
1112 TScheduler::Reschedule(); // this won't return
1117 /** Kills a nanokernel thread.
1119 For use by RTOS personality layers.
1120 Do not use this function directly on a Symbian OS thread.
1122 When acting on the calling thread, causes the calling thread to exit.
1124 When acting on another thread, causes that thread to exit unless it is
1125 currently in a critical section. In this case the thread is marked as
1126 "exit pending" and will exit as soon as it leaves the critical section.
1128 In either case the exiting thread first invokes its exit handler (if it
1129 exists). The handler runs with preemption enabled and with the thread in a
1130 critical section so that it may not be suspended or killed again. The
1131 handler may return a pointer to a TDfc, which will be enqueued just before
1132 the thread finally terminates (after the kernel has been relocked). This DFC
1133 will therefore execute once the NThread has been safely removed from the
1134 scheduler and is intended to be used to cleanup the NThread object and any
1135 associated personality layer resources.
1137 @pre Kernel must be locked.
1138 @pre Call in a thread context.
1139 @pre If acting on calling thread, calling thread must not be in a
1140 critical section; if it is the kernel will fault. Also, the kernel
1141 must be locked exactly once (iKernCSLocked = 1).
1143 @post Kernel is locked, if not acting on calling thread.
1144 @post Does not return if it acts on the calling thread.
1146 EXPORT_C void NThreadBase::Kill()
1149 // Enter with kernel locked
1150 // Exit with kernel locked if not current thread, otherwise does not return
1151 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED_ONCE|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::Kill");
1152 __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Kill %T, state %d CSC %d HeldFM %M",this,iNState,iCsCount,iHeldFastMutex));
1153 OnKill(); // platform-specific hook
1154 NThreadBase* pC=TheScheduler.iCurrentThread;
1157 if (iCsFunction==ECSExitInProgress)
1159 Exit(); // this will not return
1161 if (iCsCount || iHeldFastMutex)
1163 __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Kill %T deferred",this));
1165 return; // thread is already exiting
1166 iCsFunction=ECSExitPending; // zap any suspensions pending
1167 if (iHeldFastMutex && iCsCount==0)
1168 iHeldFastMutex->iWaiting=1;
1172 // thread is not in critical section
1173 // make the thread divert to Exit() when it next runs
1174 __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Kill diverting %T",this));
1175 Release(KErrDied); // cancel any waits on semaphores etc.
1176 ForceResume(); // release any suspensions
1177 iWaitFastMutex=NULL; // if thread was waiting for a fast mutex it needn't bother
1178 iCsCount=1; // stop anyone suspending the thread
1179 iCsFunction=ECSExitPending;
1180 ForceExit(); // get thread to call Exit when it is next scheduled
1184 /** Suspends the execution of a thread.
1186 This function is intended to be used by the EPOC layer and personality layers.
1187 Do not use this function directly on a Symbian OS thread - use Kern::ThreadSuspend().
1189 If the thread is in a critical section or holds a fast mutex, the suspension will
1190 be deferred until the thread leaves the critical section or signals the fast mutex.
1191 Otherwise the thread will be suspended with immediate effect. If the thread it's
1192 running, the execution of the thread will be suspended and a reschedule will occur.
1194 @param aThread Thread to be suspended.
1195 @param aCount Number of times to suspend this thread.
1197 @return TRUE, if the thread had changed the state from non-suspended to suspended;
1200 @see Kern::ThreadSuspend()
1202 EXPORT_C TBool NKern::ThreadSuspend(NThread* aThread, TInt aCount)
1205 TBool r=aThread->Suspend(aCount);
1211 /** Resumes the execution of a thread.
1213 This function is intended to be used by the EPOC layer and personality layers.
1214 Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
1216 This function resumes the thread once. If the thread was suspended more than once
1217 the thread will remain suspended.
1218 If the thread is in a critical section, this function will decrease the number of
1219 deferred suspensions.
1221 @param aThread Thread to be resumed.
1223 @return TRUE, if the thread had changed the state from suspended to non-suspended;
1226 @see Kern::ThreadResume()
1228 EXPORT_C TBool NKern::ThreadResume(NThread* aThread)
1231 TBool r=aThread->Resume();
1237 /** Resumes the execution of a thread and signals a mutex.
1239 This function is intended to be used by the EPOC layer and personality layers.
1240 Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
1242 This function resumes the thread once. If the thread was suspended more than once
1243 the thread will remain suspended.
1244 If the thread is in a critical section, this function will decrease the number of
1245 deferred suspensions.
1247 @param aThread Thread to be resumed.
1248 @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
1250 @return TRUE, if the thread had changed the state from suspended to non-suspended;
1253 @see Kern::ThreadResume()
1255 EXPORT_C TBool NKern::ThreadResume(NThread* aThread, NFastMutex* aMutex)
1258 aMutex=&TheScheduler.iLock;
1259 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadResume %T + FM %M",aThread,aMutex));
1261 TBool r=aThread->Resume();
1268 /** Forces the execution of a thread to be resumed.
1270 This function is intended to be used by the EPOC layer and personality layers.
1271 Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
1273 This function cancels all suspensions on a thread.
1275 @param aThread Thread to be resumed.
1277 @return TRUE, if the thread had changed the state from suspended to non-suspended;
1280 @see Kern::ThreadResume()
1282 EXPORT_C TBool NKern::ThreadForceResume(NThread* aThread)
1285 TBool r=aThread->ForceResume();
1291 /** Forces the execution of a thread to be resumed and signals a mutex.
1293 This function is intended to be used by the EPOC layer and personality layers.
1294 Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
1296 This function cancels all suspensions on a thread.
1298 @param aThread Thread to be resumed.
1299 @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
1301 @return TRUE, if the thread had changed the state from suspended to non-suspended;
1304 @see Kern::ThreadResume()
1306 EXPORT_C TBool NKern::ThreadForceResume(NThread* aThread, NFastMutex* aMutex)
1309 aMutex=&TheScheduler.iLock;
1310 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadForceResume %T + FM %M",aThread,aMutex));
1312 TBool r=aThread->ForceResume();
1319 /** Awakens a nanothread.
1321 This function is used to implement synchronisation primitives in the EPOC
1322 kernel (e.g. DMutex and DSemaphore) and in personality layers. It is not
1323 intended to be used directly by device drivers.
1325 If the nanothread is waiting on a fast semaphore, waiting for a DFC, or is
1326 blocked in a call to NKern::Block, it is awakened and put back on the ready
1327 list. Otherwise, the thread state is unchanged. In particular, nothing
1328 happens if the nanothread has been explicitly suspended.
1330 @param aThread Thread to release.
1331 @param aReturnValue Value returned by NKern::Block if the thread was blocked.
1335 @pre Interrupts must be enabled.
1336 @pre Do not call from an ISR
1338 EXPORT_C void NKern::ThreadRelease(NThread* aThread, TInt aReturnValue)
1340 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::ThreadRelease(NThread*, TInt)");
1342 aThread->Release(aReturnValue);
1347 /** Atomically awakens a nanothread and signals a fast mutex.
1349 This function is used to implement synchronisation primitives in the EPOC
1350 kernel (e.g. DMutex and DSemaphore) and in personality layers. It is not
1351 intended to be used directly by device drivers.
1353 @param aThread Thread to release.
1354 @param aReturnValue Value returned by NKern::Block if the thread was blocked.
1355 @param aMutex Fast mutex to signal. If NULL, the system lock is signalled.
1357 @see NKern::ThreadRelease(NThread*, TInt)
1360 @pre Call in a thread context.
1361 @pre Interrupts must be enabled.
1362 @pre Kernel must be unlocked.
1363 @pre Specified mutex must be held
1365 EXPORT_C void NKern::ThreadRelease(NThread* aThread, TInt aReturnValue, NFastMutex* aMutex)
1367 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadRelease(NThread*,TInt,NFastMutex*)");
1369 aMutex=&TheScheduler.iLock;
1370 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadRelease %T ret %d + FM %M",aThread,aReturnValue,aMutex));
1372 aThread->Release(aReturnValue);
1378 /** Changes the priority of a thread.
1380 This function is intended to be used by the EPOC layer and personality layers.
1381 Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority().
1383 @param aThread Thread to receive the new priority.
1384 @param aPriority New priority for aThread.
1386 @see Kern::SetThreadPriority()
1388 EXPORT_C void NKern::ThreadSetPriority(NThread* aThread, TInt aPriority)
1391 aThread->SetPriority(aPriority);
1396 /** Changes the priority of a thread and signals a mutex.
1398 This function is intended to be used by the EPOC layer and personality layers.
1399 Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority().
1401 @param aThread Thread to receive the new priority.
1402 @param aPriority New priority for aThread.
1403 @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
1405 @see Kern::SetThreadPriority()
1407 EXPORT_C void NKern::ThreadSetPriority(NThread* aThread, TInt aPriority, NFastMutex* aMutex)
1410 aMutex=&TheScheduler.iLock;
1411 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadSetPriority %T->%d + FM %M",aThread,aPriority,aMutex));
1413 aThread->SetPriority(aPriority);
1418 #ifndef __SCHEDULER_MACHINE_CODED__
1420 /** Signals the request semaphore of a nanothread.
1422 This function is intended to be used by the EPOC layer and personality
1423 layers. Device drivers should use Kern::RequestComplete instead.
1425 @param aThread Nanothread to signal. Must be non NULL.
1427 @see Kern::RequestComplete()
1429 @pre Interrupts must be enabled.
1430 @pre Do not call from an ISR
1432 EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread)
1434 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::ThreadRequestSignal(NThread*)");
1436 aThread->iRequestSemaphore.Signal();
1441 /** Atomically signals the request semaphore of a nanothread and a fast mutex.
1443 This function is intended to be used by the EPOC layer and personality
1444 layers. Device drivers should use Kern::RequestComplete instead.
1446 @param aThread Nanothread to signal. Must be non NULL.
1447 @param aMutex Fast mutex to signal. If NULL, the system lock is signaled.
1449 @see Kern::RequestComplete()
1451 @pre Call in a thread context.
1452 @pre Interrupts must be enabled.
1453 @pre Kernel must be unlocked.
1454 @pre Specified mutex must be held
1456 EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread, NFastMutex* aMutex)
1458 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadRequestSignal(NThread*,NFastMutex*)");
1460 aMutex=&TheScheduler.iLock;
1462 aThread->iRequestSemaphore.Signal();
1469 /** Signals the request semaphore of a nanothread several times.
1471 This function is intended to be used by the EPOC layer and personality
1472 layers. Device drivers should use Kern::RequestComplete instead.
1474 @param aThread Nanothread to signal. If NULL, the current thread is signaled.
1475 @param aCount Number of times the request semaphore must be signaled.
1479 @see Kern::RequestComplete()
1481 EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread, TInt aCount)
1483 __ASSERT_WITH_MESSAGE_DEBUG(aCount >= 0,"aCount >= 0","NKern::ThreadRequestSignal");
1485 aThread=(NThread*)TheScheduler.iCurrentThread;
1487 aThread->iRequestSemaphore.SignalN(aCount);
1492 /** Kills a nanothread.
1494 This function is intended to be used by the EPOC layer and personality layers.
1495 Do not use this function directly on a Symbian OS thread - use Kern::ThreadKill().
1497 This function does not return if the current thread is killed.
1498 This function is asynchronous (i.e. the thread to kill may still be alive when the call returns).
1500 @param aThread Thread to kill. Must be non NULL.
1502 @pre If acting on calling thread, calling thread must not be in a
1504 @pre Thread must not already be exiting.
1506 @see Kern::ThreadKill()
1508 EXPORT_C void NKern::ThreadKill(NThread* aThread)
1516 /** Atomically kills a nanothread and signals a fast mutex.
1518 This function is intended to be used by the EPOC layer and personality layers.
1519 Do not use this function directly on a Symbian OS thread - use Kern::ThreadKill().
1521 @param aThread Thread to kill. Must be non NULL.
1522 @param aMutex Fast mutex to signal. If NULL, the system lock is signalled.
1524 @pre If acting on calling thread, calling thread must not be in a
1526 @pre Thread must not already be exiting.
1528 @see NKern::ThreadKill(NThread*)
1530 EXPORT_C void NKern::ThreadKill(NThread* aThread, NFastMutex* aMutex)
1533 aMutex=&TheScheduler.iLock;
1534 NThreadBase* pC=TheScheduler.iCurrentThread;
1538 __NK_ASSERT_DEBUG(pC->iCsCount==0); // Make sure thread isn't in critical section
1539 aThread->iCsFunction=NThreadBase::ECSExitPending;
1541 aMutex->Signal(); // this will make us exit
1542 FAULT(); // should never get here
1553 /** Enters thread critical section.
1555 This function can safely be used in device drivers.
1557 The current thread will enter its critical section. While in critical section
1558 the thread cannot be suspended or killed. Any suspension or kill will be deferred
1559 until the thread leaves the critical section.
1560 Some API explicitly require threads to be in critical section before calling that
1562 Only User threads need to call this function as the concept of thread critical
1563 section applies to User threads only.
1565 @pre Call in a thread context.
1566 @pre Kernel must be unlocked.
1568 EXPORT_C void NKern::ThreadEnterCS()
1570 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadEnterCS");
1571 NThreadBase* pC=TheScheduler.iCurrentThread;
1572 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadEnterCS %T",pC));
1573 __NK_ASSERT_DEBUG(pC->iCsCount>=0);
1578 NThread* NKern::_ThreadEnterCS()
1580 NThread* pC = (NThread*)TheScheduler.iCurrentThread;
1581 __NK_ASSERT_DEBUG(pC->iCsCount>=0);
1587 /** Leaves thread critical section.
1589 This function can safely be used in device drivers.
1591 The current thread will leave its critical section. If the thread was suspended/killed
1592 while in critical section, the thread will be suspended/killed after leaving the
1593 critical section by calling this function.
1594 Only User threads need to call this function as the concept of thread critical
1595 section applies to User threads only.
1597 @pre Call in a thread context.
1598 @pre Kernel must be unlocked.
1601 EXPORT_C void NKern::ThreadLeaveCS()
1603 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadLeaveCS");
1604 NThreadBase* pC=TheScheduler.iCurrentThread;
1606 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadLeaveCS %T",pC));
1607 __NK_ASSERT_DEBUG(pC->iCsCount>0);
1608 if (--pC->iCsCount==0 && pC->iCsFunction!=0)
1610 if (pC->iHeldFastMutex)
1611 pC->iHeldFastMutex->iWaiting=1;
1618 void NKern::_ThreadLeaveCS()
1620 NThreadBase* pC=TheScheduler.iCurrentThread;
1622 __NK_ASSERT_DEBUG(pC->iCsCount>0);
1623 if (--pC->iCsCount==0 && pC->iCsFunction!=0)
1625 if (pC->iHeldFastMutex)
1626 pC->iHeldFastMutex->iWaiting=1;
1633 /** Freeze the CPU of the current thread
1635 After this the current thread will not migrate to another processor
1637 On uniprocessor builds does nothing and returns 0
1639 @return A cookie to be passed to NKern::EndFreezeCpu() to allow nesting
1641 EXPORT_C TInt NKern::FreezeCpu()
1643 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::FreezeCpu");
1648 /** Unfreeze the current thread's CPU
1650 After this the current thread will again be eligible to migrate to another processor
1652 On uniprocessor builds does nothing
1654 @param aCookie the value returned by NKern::FreezeCpu()
1656 EXPORT_C void NKern::EndFreezeCpu(TInt /*aCookie*/)
1658 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::EndFreezeCpu");
1662 /** Change the CPU affinity of a thread
1664 On uniprocessor builds does nothing
1666 @pre Call in a thread context.
1668 @param The new CPU affinity mask
1669 @return The old affinity mask
1671 EXPORT_C TUint32 NKern::ThreadSetCpuAffinity(NThread*, TUint32)
1673 return 0; // lock to processor 0
1677 /** Modify a thread's timeslice
1679 @pre Call in a thread context.
1681 @param aTimeslice The new timeslice value
1683 EXPORT_C void NKern::ThreadSetTimeslice(NThread* aThread, TInt aTimeslice)
1686 if (aThread->iTimeslice == aThread->iTime || aTimeslice<0)
1687 aThread->iTime = aTimeslice;
1688 aThread->iTimeslice = aTimeslice;
1693 /** Blocks current nanothread.
1695 This function is used to implement synchronisation primitives in the EPOC
1696 layer and in personality layers. It is not intended to be used directly by
1699 @param aTimeout If greater than 0, the nanothread will be blocked for at most
1700 aTimeout microseconds.
1701 @param aMode Bitmask whose possible values are documented in TBlockMode.
1702 @param aMutex Fast mutex to operate on. If NULL, the system lock is used.
1704 @see NKern::ThreadRelease()
1707 @pre Call in a thread context.
1708 @pre Interrupts must be enabled.
1709 @pre Kernel must be unlocked.
1710 @pre Specified mutex must be held
1712 EXPORT_C TInt NKern::Block(TUint32 aTimeout, TUint aMode, NFastMutex* aMutex)
1714 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::Block(TUint32,TUint,NFastMutex*)");
1716 aMutex=&TheScheduler.iLock;
1717 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Block time %d mode %d FM %M",aTimeout,aMode,aMutex));
1718 NThreadBase* pC=TheScheduler.iCurrentThread;
1721 if (aMode & EEnterCS)
1723 if (aMode & ERelease)
1725 #ifdef BTRACE_FAST_MUTEX
1726 BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexSignal,aMutex);
1728 aMutex->iHoldingThread=NULL;
1729 TBool w=aMutex->iWaiting;
1731 pC->iHeldFastMutex=NULL;
1732 if (w && !pC->iCsCount && pC->iCsFunction)
1738 pC->iTimer.iUserFlags = TRUE;
1739 pC->iTimer.OneShot(aTimeout,TRUE);
1741 if (pC->iNState==NThread::EReady)
1742 TheScheduler.Remove(pC);
1743 pC->iNState=NThread::EBlocked;
1747 return pC->iReturnValue;
1751 @pre Call in a thread context.
1752 @pre Interrupts must be enabled.
1753 @pre Kernel must be unlocked.
1754 @pre No fast mutex can be held
1756 /** @see NKern::Block(TUint32, TUint, NFastMutex*) */
1757 EXPORT_C TInt NKern::Block(TUint32 aTimeout, TUint aMode)
1759 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::Block(TUint32,TUint)");
1760 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Block time %d mode %d",aTimeout,aMode));
1761 NThreadBase* pC=TheScheduler.iCurrentThread;
1764 if (aMode & EEnterCS)
1769 pC->iTimer.iUserFlags = TRUE;
1770 pC->iTimer.OneShot(aTimeout,TRUE);
1772 pC->iNState=NThread::EBlocked;
1773 TheScheduler.Remove(pC);
1775 return pC->iReturnValue;
1781 EXPORT_C void NKern::NanoBlock(TUint32 aTimeout, TUint aState, TAny* aWaitObj)
1783 Places the current nanothread into a wait state on an externally
1784 defined wait object.
1786 For use by RTOS personality layers.
1787 Do not use this function directly on a Symbian OS thread.
1789 Since the kernel is locked on entry, any reschedule will be deferred until
1790 it is unlocked. The thread should be added to any necessary wait queue after
1791 a call to this function, since this function removes it from the ready list.
1792 The thread's wait timer is started if aTimeout is nonzero.
1793 The thread's NState and wait object are updated.
1795 Call NThreadBase::Release() when the wait condition is resolved.
1797 @param aTimeout The maximum time for which the thread should block, in nanokernel timer ticks.
1798 A zero value means wait forever.
1799 If the thread is still blocked when the timeout expires,
1800 then the timeout state handler will be called.
1801 @param aState The nanokernel thread state (N-State) value to be set.
1802 This state corresponds to the externally defined wait object.
1803 This value will be written into the member NThreadBase::iNState.
1804 @param aWaitObj A pointer to an externally defined wait object.
1805 This value will be written into the member NThreadBase::iWaitObj.
1807 @pre Kernel must be locked.
1808 @pre Call in a thread context.
1810 @post Kernel is locked.
1812 @see NThreadBase::Release()
1815 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::NanoBlock");
1816 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::NanoBlock time %d state %d obj %08x", aTimeout, aState, aWaitObj));
1817 NThreadBase* pC=TheScheduler.iCurrentThread;
1820 pC->iTimer.iUserFlags = TRUE;
1821 pC->iTimer.OneShot(aTimeout,TRUE);
1823 pC->iNState = (TUint8)aState;
1824 pC->iWaitObj = aWaitObj;
1825 pC->iReturnValue = 0;
1826 TheScheduler.Remove(pC);
1833 EXPORT_C void NKern::Sleep(TUint32 aTime)
1835 Puts the current nanothread to sleep for the specified duration.
1837 It can be called from Symbian OS threads.
1839 @param aTime sleep time in nanokernel timer ticks.
1841 @pre No fast mutex can be held.
1842 @pre Kernel must be unlocked.
1843 @pre Call in a thread context.
1844 @pre Interrupts must be enabled.
1847 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::Sleep");
1848 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Sleep %d",aTime));
1849 NThreadBase* pC=TheScheduler.iCurrentThread;
1851 pC->iTimer.iUserFlags = TRUE;
1852 pC->iTimer.OneShot(aTime,TRUE);
1853 pC->iNState=NThread::ESleep;
1854 TheScheduler.Remove(pC);
1860 /** Terminates the current nanothread.
1862 Calls to this function never return.
1864 For use by RTOS personality layers.
1865 Do not use this function directly on a Symbian OS thread.
1867 @pre Call in a thread context.
1868 @pre Interrupts must be enabled.
1869 @pre Kernel must be unlocked.
1871 EXPORT_C void NKern::Exit()
1873 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::Exit");
1874 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Exit"));
1875 NThreadBase* pC=TheScheduler.iCurrentThread;
1877 pC->Exit(); // this won't return
1882 /** Terminates the current nanothread at the next possible point.
1884 If the calling thread is not currently in a critical section and does not
1885 currently hold a fast mutex, it exits immediately and this function does
1886 not return. On the other hand if the thread is in a critical section or
1887 holds a fast mutex the thread continues executing but it will exit as soon
1888 as it leaves the critical section and/or releases the fast mutex.
1890 @pre Call in a thread context.
1891 @pre Interrupts must be enabled.
1892 @pre Kernel must be unlocked.
1894 EXPORT_C void NKern::DeferredExit()
1896 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::DeferredExit");
1897 __KTRACE_OPT(KNKERN,DEBUGPRINT("NDefExit"));
1898 NFastMutex* m = HeldFastMutex();
1899 NThreadBase* pC = NKern::LockC();
1900 if (!m && !pC->iCsCount)
1901 pC->Exit(); // this won't return
1902 if (pC->iCsFunction >= 0) // don't touch it if we are already exiting
1903 pC->iCsFunction = NThreadBase::ECSExitPending;
1904 if (m && !pC->iCsCount)
1910 /** Prematurely terminates the current thread's timeslice
1912 @pre Kernel must be unlocked.
1913 @pre Call in a thread context.
1915 @post Kernel is unlocked.
1917 EXPORT_C void NKern::YieldTimeslice()
1919 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::YieldTimeslice");
1920 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::YieldTimeslice"));
1921 NThreadBase* t = NKern::LockC();
1929 /** Rotates the ready list for threads at the specified priority.
1931 For use by RTOS personality layers to allow external control of round-robin
1932 scheduling. Not intended for direct use by device drivers.
1934 @param aPriority = priority at which threads should be rotated.
1935 -1 means use calling thread's priority.
1937 @pre Kernel must be unlocked.
1938 @pre Call in a thread context.
1940 @post Kernel is unlocked.
1942 EXPORT_C void NKern::RotateReadyList(TInt aPriority)
1944 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::RotateReadyList");
1945 __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::RotateReadyList %d",aPriority));
1946 if (aPriority<0 || aPriority>=KNumPriorities)
1947 aPriority=TheScheduler.iCurrentThread->iPriority;
1949 TheScheduler.RotateReadyList(aPriority);
1953 /** Rotates the ready list for threads at the specified priority.
1955 For use by RTOS personality layers to allow external control of round-robin
1956 scheduling. Not intended for direct use by device drivers.
1958 @param aPriority = priority at which threads should be rotated.
1959 -1 means use calling thread's priority.
1960 @param aCpu = which CPU's ready list should be rotated
1961 ignored on UP systems.
1963 @pre Kernel must be unlocked.
1964 @pre Call in a thread context.
1966 @post Kernel is unlocked.
1968 EXPORT_C void NKern::RotateReadyList(TInt aPriority, TInt /*aCpu*/)
1970 RotateReadyList(aPriority);
1974 /** Returns the NThread control block for the currently scheduled thread.
1976 Note that this is the calling thread if called from a thread context, or the
1977 interrupted thread if called from an interrupt context.
1979 @return A pointer to the NThread for the currently scheduled thread.
1981 @pre Call in any context.
1983 EXPORT_C NThread* NKern::CurrentThread()
1985 return (NThread*)TheScheduler.iCurrentThread;
1989 /** Returns the CPU number of the calling CPU.
1991 @return the CPU number of the calling CPU.
1993 @pre Call in any context.
1995 EXPORT_C TInt NKern::CurrentCpu()
2001 /** Returns the number of CPUs available to Symbian OS
2003 @return the number of CPUs
2005 @pre Call in any context.
2007 EXPORT_C TInt NKern::NumberOfCpus()
2013 /** Check if the kernel is locked the specified number of times.
2015 @param aCount The number of times the kernel should be locked
2016 If zero, tests if it is locked at all
2017 @return TRUE if the tested condition is true.
2021 EXPORT_C TBool NKern::KernelLocked(TInt aCount)
2024 return TheScheduler.iKernCSLocked == aCount;
2025 return TheScheduler.iKernCSLocked!=0;
2029 /******************************************************************************
2031 ******************************************************************************/
2033 #ifndef __PRI_LIST_MACHINE_CODED__
2034 /** Returns the priority of the highest priority item present on a priority list.
2036 @return The highest priority present or -1 if the list is empty.
2038 EXPORT_C TInt TPriListBase::HighestPriority()
2040 // TUint64 present = MAKE_TUINT64(iPresent[1], iPresent[0]);
2041 // return __e32_find_ms1_64(present);
2042 return __e32_find_ms1_64(iPresent64);
2046 /** Finds the highest priority item present on a priority list.
2048 If multiple items at the same priority are present, return the first to be
2049 added in chronological order.
2051 @return A pointer to the item or NULL if the list is empty.
2053 EXPORT_C TPriListLink* TPriListBase::First()
2055 TInt p = HighestPriority();
2056 return p >=0 ? static_cast<TPriListLink*>(iQueue[p]) : NULL;
2060 /** Adds an item to a priority list.
2062 @param aLink A pointer to the item - must not be NULL.
2064 EXPORT_C void TPriListBase::Add(TPriListLink* aLink)
2066 TInt p = aLink->iPriority;
2067 SDblQueLink* head = iQueue[p];
2070 // already some at this priority
2071 aLink->InsertBefore(head);
2075 // 'create' new list
2077 aLink->iNext = aLink->iPrev = aLink;
2078 iPresent[p>>5] |= 1u << (p & 0x1f);
2083 /** Removes an item from a priority list.
2085 @param aLink A pointer to the item - must not be NULL.
2087 EXPORT_C void TPriListBase::Remove(TPriListLink* aLink)
2089 if (!aLink->Alone())
2091 // not the last on this list
2092 TInt p = aLink->iPriority;
2093 if (iQueue[p] == aLink)
2094 iQueue[p] = aLink->iNext;
2099 TInt p = aLink->iPriority;
2101 iPresent[p>>5] &= ~(1u << (p & 0x1f));
2107 /** Changes the priority of an item on a priority list.
2109 @param aLink A pointer to the item to act on - must not be NULL.
2110 @param aNewPriority A new priority for the item.
2112 EXPORT_C void TPriListBase::ChangePriority(TPriListLink* aLink, TInt aNewPriority)
2114 if (aLink->iPriority!=aNewPriority)
2117 aLink->iPriority=TUint8(aNewPriority);
2123 /** Adds an item to a priority list at the head of the queue for its priority.
2125 @param aLink A pointer to the item - must not be NULL.
2127 EXPORT_C void TPriListBase::AddHead(TPriListLink* aLink)
2129 TInt p = aLink->iPriority;
2130 SDblQueLink* head = iQueue[p];
2134 // already some at this priority
2135 aLink->InsertBefore(head);
2139 // 'create' new list
2140 aLink->iNext = aLink->iPrev = aLink;
2141 iPresent[p>>5] |= 1u << (p & 0x1f);