sl@0: // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\nkern\nkern.cpp sl@0: // sl@0: // sl@0: sl@0: // NThreadBase member data sl@0: #define __INCLUDE_NTHREADBASE_DEFINES__ sl@0: sl@0: #include "nk_priv.h" sl@0: sl@0: /****************************************************************************** sl@0: * Fast mutex sl@0: ******************************************************************************/ sl@0: sl@0: /** Checks if the current thread holds this fast mutex sl@0: sl@0: @return TRUE if the current thread holds this fast mutex sl@0: @return FALSE if not sl@0: */ sl@0: EXPORT_C TBool NFastMutex::HeldByCurrentThread() sl@0: { sl@0: return iHoldingThread == NCurrentThread(); sl@0: } sl@0: sl@0: /** Find the fast mutex held by the current thread sl@0: sl@0: @return a pointer to the fast mutex held by the current thread sl@0: @return NULL if the current thread does not hold a fast mutex sl@0: */ sl@0: EXPORT_C NFastMutex* NKern::HeldFastMutex() sl@0: { sl@0: return TheScheduler.iCurrentThread->iHeldFastMutex; sl@0: } sl@0: sl@0: sl@0: #ifndef __FAST_MUTEX_MACHINE_CODED__ sl@0: /** Acquires the fast mutex. sl@0: sl@0: This will block until the mutex is available, and causes sl@0: the thread to enter an implicit critical section until the mutex is released. sl@0: sl@0: Generally threads would use NKern::FMWait() which manipulates the kernel lock sl@0: for you. sl@0: sl@0: @pre Kernel must be locked, with lock count 1. sl@0: @pre The calling thread holds no fast mutexes. sl@0: sl@0: @post Kernel is locked, with lock count 1. sl@0: @post The calling thread holds the mutex. sl@0: sl@0: @see NFastMutex::Signal() sl@0: @see NKern::FMWait() sl@0: */ sl@0: EXPORT_C void NFastMutex::Wait() sl@0: { sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("FMWait %M",this)); sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED_ONCE|MASK_NO_FAST_MUTEX,"NFastMutex::Wait"); sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: if (iHoldingThread) sl@0: { sl@0: iWaiting=1; sl@0: pC->iWaitFastMutex=this; sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("FMWait: YieldTo %T",iHoldingThread)); sl@0: TheScheduler.YieldTo(iHoldingThread); // returns with kernel unlocked, interrupts disabled sl@0: TheScheduler.iKernCSLocked = 1; // relock kernel sl@0: NKern::EnableAllInterrupts(); sl@0: pC->iWaitFastMutex=NULL; sl@0: } sl@0: pC->iHeldFastMutex=this; // automatically puts thread into critical section sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexWait,this); sl@0: #endif sl@0: iHoldingThread=pC; sl@0: } sl@0: sl@0: sl@0: /** Releases a previously acquired fast mutex. sl@0: sl@0: Generally, threads would use NKern::FMSignal() which manipulates the kernel lock sl@0: for you. sl@0: sl@0: @pre The calling thread holds the mutex. sl@0: @pre Kernel must be locked. sl@0: sl@0: @post Kernel is locked. sl@0: sl@0: @see NFastMutex::Wait() sl@0: @see NKern::FMSignal() sl@0: */ sl@0: EXPORT_C void NFastMutex::Signal() sl@0: { sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("FMSignal %M",this)); sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED,"NFastMutex::Signal"); sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: __ASSERT_WITH_MESSAGE_DEBUG(pC->iHeldFastMutex==this,"The calling thread holds the mutex","NFastMutex::Signal"); sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexSignal,this); sl@0: #endif sl@0: iHoldingThread=NULL; sl@0: pC->iHeldFastMutex=NULL; sl@0: TBool w=iWaiting; sl@0: iWaiting=0; sl@0: if (w) sl@0: { sl@0: RescheduleNeeded(); sl@0: if (pC->iCsFunction && !pC->iCsCount) sl@0: pC->DoCsFunction(); sl@0: } sl@0: } sl@0: sl@0: sl@0: /** Acquires a fast mutex. sl@0: sl@0: This will block until the mutex is available, and causes sl@0: the thread to enter an implicit critical section until the mutex is released. sl@0: sl@0: @param aMutex The fast mutex to acquire. sl@0: sl@0: @post The calling thread holds the mutex. sl@0: sl@0: @see NFastMutex::Wait() sl@0: @see NKern::FMSignal() sl@0: sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: @pre Kernel must be unlocked sl@0: @pre interrupts enabled sl@0: sl@0: */ sl@0: EXPORT_C void NKern::FMWait(NFastMutex* aMutex) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::FMWait"); sl@0: NKern::Lock(); sl@0: aMutex->Wait(); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Releases a previously acquired fast mutex. sl@0: sl@0: @param aMutex The fast mutex to release. sl@0: sl@0: @pre The calling thread holds the mutex. sl@0: sl@0: @see NFastMutex::Signal() sl@0: @see NKern::FMWait() sl@0: */ sl@0: EXPORT_C void NKern::FMSignal(NFastMutex* aMutex) sl@0: { sl@0: NKern::Lock(); sl@0: aMutex->Signal(); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Acquires the System Lock. sl@0: sl@0: This will block until the mutex is available, and causes sl@0: the thread to enter an implicit critical section until the mutex is released. sl@0: sl@0: @post System lock is held. sl@0: sl@0: @see NKern::UnlockSystem() sl@0: @see NKern::FMWait() sl@0: sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: @pre Kernel must be unlocked sl@0: @pre interrupts enabled sl@0: */ sl@0: EXPORT_C void NKern::LockSystem() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::LockSystem"); sl@0: NKern::Lock(); sl@0: TheScheduler.iLock.Wait(); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Releases the System Lock. sl@0: sl@0: @pre System lock must be held. sl@0: sl@0: @see NKern::LockSystem() sl@0: @see NKern::FMSignal() sl@0: */ sl@0: EXPORT_C void NKern::UnlockSystem() sl@0: { sl@0: NKern::Lock(); sl@0: TheScheduler.iLock.Signal(); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Temporarily releases a fast mutex if there is contention. sl@0: sl@0: If there is another thread attempting to acquire the mutex, the calling sl@0: thread releases the mutex and then acquires it again. sl@0: sl@0: This is more efficient than the equivalent code: sl@0: sl@0: @code sl@0: NKern::FMSignal(); sl@0: NKern::FMWait(); sl@0: @endcode sl@0: sl@0: @return TRUE if the mutex was relinquished, FALSE if not. sl@0: sl@0: @pre The mutex must be held. sl@0: sl@0: @post The mutex is held. sl@0: */ sl@0: EXPORT_C TBool NKern::FMFlash(NFastMutex* aM) sl@0: { sl@0: __ASSERT_WITH_MESSAGE_DEBUG(aM->HeldByCurrentThread(),"The calling thread holds the mutex","NKern::FMFlash"); sl@0: TBool w = aM->iWaiting; sl@0: if (w) sl@0: { sl@0: NKern::Lock(); sl@0: aM->Signal(); sl@0: NKern::PreemptionPoint(); sl@0: aM->Wait(); sl@0: NKern::Unlock(); sl@0: } sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: else sl@0: { sl@0: BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexFlash,aM); sl@0: } sl@0: #endif sl@0: return w; sl@0: } sl@0: sl@0: sl@0: /** Temporarily releases the System Lock if there is contention. sl@0: sl@0: If there sl@0: is another thread attempting to acquire the System lock, the calling sl@0: thread releases the mutex and then acquires it again. sl@0: sl@0: This is more efficient than the equivalent code: sl@0: sl@0: @code sl@0: NKern::UnlockSystem(); sl@0: NKern::LockSystem(); sl@0: @endcode sl@0: sl@0: Note that this can only allow higher priority threads to use the System sl@0: lock as lower priority cannot cause contention on a fast mutex. sl@0: sl@0: @return TRUE if the system lock was relinquished, FALSE if not. sl@0: sl@0: @pre System lock must be held. sl@0: sl@0: @post System lock is held. sl@0: sl@0: @see NKern::LockSystem() sl@0: @see NKern::UnlockSystem() sl@0: */ sl@0: EXPORT_C TBool NKern::FlashSystem() sl@0: { sl@0: return NKern::FMFlash(&TheScheduler.iLock); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: /****************************************************************************** sl@0: * Fast semaphore sl@0: ******************************************************************************/ sl@0: sl@0: /** Sets the owner of a fast semaphore. sl@0: sl@0: @param aThread The thread to own this semaphore. If aThread==0, then the sl@0: owner is set to the current thread. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre If changing ownership form one thread to another, the there must be no sl@0: pending signals or waits. sl@0: @pre Call either in a thread or an IDFC context. sl@0: sl@0: @post Kernel is locked. sl@0: */ sl@0: EXPORT_C void NFastSemaphore::SetOwner(NThreadBase* aThread) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::SetOwner"); sl@0: if(!aThread) sl@0: aThread = TheScheduler.iCurrentThread; sl@0: if(iOwningThread && iOwningThread!=aThread) sl@0: { sl@0: __NK_ASSERT_ALWAYS(!iCount); // Can't change owner if iCount!=0 sl@0: } sl@0: iOwningThread = aThread; sl@0: } sl@0: sl@0: sl@0: #ifndef __FAST_SEM_MACHINE_CODED__ sl@0: /** Waits on a fast semaphore. sl@0: sl@0: Decrements the signal count for the semaphore and sl@0: removes the calling thread from the ready-list if the sempahore becomes sl@0: unsignalled. Only the thread that owns a fast semaphore can wait on it. sl@0: sl@0: Note that this function does not block, it merely updates the NThread state, sl@0: rescheduling will only occur when the kernel is unlocked. Generally threads sl@0: would use NKern::FSWait() which manipulates the kernel lock for you. sl@0: sl@0: @pre The calling thread must own the semaphore. sl@0: @pre No fast mutex can be held. sl@0: @pre Kernel must be locked. sl@0: sl@0: @post Kernel is locked. sl@0: sl@0: @see NFastSemaphore::Signal() sl@0: @see NKern::FSWait() sl@0: @see NKern::Unlock() sl@0: */ sl@0: EXPORT_C void NFastSemaphore::Wait() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NO_FAST_MUTEX,"NFastSemaphore::Wait"); sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: __ASSERT_WITH_MESSAGE_ALWAYS(pC==iOwningThread,"The calling thread must own the semaphore","NFastSemaphore::Wait"); sl@0: if (--iCount<0) sl@0: { sl@0: pC->iNState=NThread::EWaitFastSemaphore; sl@0: pC->iWaitObj=this; sl@0: TheScheduler.Remove(pC); sl@0: RescheduleNeeded(); sl@0: } sl@0: } sl@0: sl@0: sl@0: /** Signals a fast semaphore. sl@0: sl@0: Increments the signal count of a fast semaphore by sl@0: one and releases any waiting thread if the semphore becomes signalled. sl@0: sl@0: Note that a reschedule will not occur before this function returns, this will sl@0: only take place when the kernel is unlocked. Generally threads sl@0: would use NKern::FSSignal() which manipulates the kernel lock for you. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: sl@0: @post Kernel is locked. sl@0: sl@0: @see NFastSemaphore::Wait() sl@0: @see NKern::FSSignal() sl@0: @see NKern::Unlock() sl@0: */ sl@0: EXPORT_C void NFastSemaphore::Signal() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::Signal"); sl@0: if (++iCount<=0) sl@0: { sl@0: iOwningThread->iWaitObj=NULL; sl@0: iOwningThread->CheckSuspendThenReady(); sl@0: } sl@0: } sl@0: sl@0: sl@0: /** Signals a fast semaphore multiple times. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: sl@0: @post Kernel is locked. sl@0: sl@0: @internalComponent sl@0: */ sl@0: EXPORT_C void NFastSemaphore::SignalN(TInt aCount) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::SignalN"); sl@0: __NK_ASSERT_DEBUG(aCount>=0); sl@0: if (aCount>0 && iCount<0) sl@0: { sl@0: iOwningThread->iWaitObj=NULL; sl@0: iOwningThread->CheckSuspendThenReady(); sl@0: } sl@0: iCount+=aCount; sl@0: } sl@0: sl@0: sl@0: /** Resets a fast semaphore. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: sl@0: @post Kernel is locked. sl@0: sl@0: @internalComponent sl@0: */ sl@0: EXPORT_C void NFastSemaphore::Reset() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::Reset"); sl@0: if (iCount<0) sl@0: { sl@0: iOwningThread->iWaitObj=NULL; sl@0: iOwningThread->CheckSuspendThenReady(); sl@0: } sl@0: iCount=0; sl@0: } sl@0: sl@0: sl@0: /** Cancels a wait on a fast semaphore. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: sl@0: @post Kernel is locked. sl@0: sl@0: @internalComponent sl@0: */ sl@0: void NFastSemaphore::WaitCancel() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::WaitCancel"); sl@0: iCount=0; sl@0: iOwningThread->iWaitObj=NULL; sl@0: iOwningThread->CheckSuspendThenReady(); sl@0: } sl@0: sl@0: sl@0: /** Waits for a signal on the current thread's I/O semaphore. sl@0: sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: @pre Kernel must be unlocked sl@0: @pre interrupts enabled sl@0: */ sl@0: EXPORT_C void NKern::WaitForAnyRequest() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::WaitForAnyRequest"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("WfAR")); sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: NKern::Lock(); sl@0: pC->iRequestSemaphore.Wait(); sl@0: NKern::Unlock(); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: /** Sets the owner of a fast semaphore. sl@0: sl@0: @param aSem The semaphore to change ownership off. sl@0: @param aThread The thread to own this semaphore. If aThread==0, then the sl@0: owner is set to the current thread. sl@0: sl@0: @pre If changing ownership form one thread to another, the there must be no sl@0: pending signals or waits. sl@0: */ sl@0: EXPORT_C void NKern::FSSetOwner(NFastSemaphore* aSem,NThreadBase* aThread) sl@0: { sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSetOwner %m %T",aSem,aThread)); sl@0: NKern::Lock(); sl@0: aSem->SetOwner(aThread); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: /** Waits on a fast semaphore. sl@0: sl@0: Decrements the signal count for the semaphore sl@0: and waits for a signal if the sempahore becomes unsignalled. Only the sl@0: thread that owns a fast semaphore can wait on it. sl@0: sl@0: @param aSem The semaphore to wait on. sl@0: sl@0: @pre The calling thread must own the semaphore. sl@0: @pre No fast mutex can be held. sl@0: sl@0: @see NFastSemaphore::Wait() sl@0: */ sl@0: EXPORT_C void NKern::FSWait(NFastSemaphore* aSem) sl@0: { sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSWait %m",aSem)); sl@0: NKern::Lock(); sl@0: aSem->Wait(); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Signals a fast semaphore. sl@0: sl@0: Increments the signal count of a fast semaphore sl@0: by one and releases any waiting thread if the semphore becomes signalled. sl@0: sl@0: @param aSem The semaphore to signal. sl@0: sl@0: @see NKern::FSWait() sl@0: sl@0: @pre Interrupts must be enabled. sl@0: @pre Do not call from an ISR sl@0: */ sl@0: EXPORT_C void NKern::FSSignal(NFastSemaphore* aSem) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::FSSignal(NFastSemaphore*)"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignal %m",aSem)); sl@0: NKern::Lock(); sl@0: aSem->Signal(); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Atomically signals a fast semaphore and releases a fast mutex. sl@0: sl@0: Rescheduling only occurs after both synchronisation operations are complete. sl@0: sl@0: @param aSem The semaphore to signal. sl@0: @param aMutex The mutex to release. If NULL, the System Lock is released sl@0: sl@0: @pre The calling thread must hold the mutex. sl@0: sl@0: @see NKern::FMSignal() sl@0: */ sl@0: EXPORT_C void NKern::FSSignal(NFastSemaphore* aSem, NFastMutex* aMutex) sl@0: { sl@0: if (!aMutex) sl@0: aMutex=&TheScheduler.iLock; sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignal %m +FM %M",aSem,aMutex)); sl@0: NKern::Lock(); sl@0: aSem->Signal(); sl@0: aMutex->Signal(); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Signals a fast semaphore multiple times. sl@0: sl@0: Increments the signal count of a sl@0: fast semaphore by aCount and releases any waiting thread if the semphore sl@0: becomes signalled. sl@0: sl@0: @param aSem The semaphore to signal. sl@0: @param aCount The number of times to signal the semaphore. sl@0: sl@0: @see NKern::FSWait() sl@0: sl@0: @pre Interrupts must be enabled. sl@0: @pre Do not call from an ISR sl@0: */ sl@0: EXPORT_C void NKern::FSSignalN(NFastSemaphore* aSem, TInt aCount) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::FSSignalN(NFastSemaphore*, TInt)"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignalN %m %d",aSem,aCount)); sl@0: NKern::Lock(); sl@0: aSem->SignalN(aCount); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Atomically signals a fast semaphore multiple times and releases a fast mutex. sl@0: sl@0: Rescheduling only occurs after both synchronisation operations are complete. sl@0: sl@0: @param aSem The semaphore to signal. sl@0: @param aCount The number of times to signal the semaphore. sl@0: @param aMutex The mutex to release. If NULL, the System Lock is released. sl@0: sl@0: @pre The calling thread must hold the mutex. sl@0: sl@0: @see NKern::FMSignal() sl@0: */ sl@0: EXPORT_C void NKern::FSSignalN(NFastSemaphore* aSem, TInt aCount, NFastMutex* aMutex) sl@0: { sl@0: if (!aMutex) sl@0: aMutex=&TheScheduler.iLock; sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSignalN %m %d + FM %M",aSem,aCount,aMutex)); sl@0: NKern::Lock(); sl@0: aSem->SignalN(aCount); sl@0: aMutex->Signal(); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /****************************************************************************** sl@0: * Thread sl@0: ******************************************************************************/ sl@0: sl@0: #ifndef __SCHEDULER_MACHINE_CODED__ sl@0: /** Makes a nanothread ready provided that it is not explicitly suspended. sl@0: sl@0: For use by RTOS personality layers. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: sl@0: @post Kernel is locked. sl@0: */ sl@0: EXPORT_C void NThreadBase::CheckSuspendThenReady() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::CheckSuspendThenReady"); sl@0: if (iSuspendCount==0) sl@0: Ready(); sl@0: else sl@0: iNState=ESuspended; sl@0: } sl@0: sl@0: /** Makes a nanothread ready. sl@0: sl@0: For use by RTOS personality layers. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: @pre The thread being made ready must not be explicitly suspended sl@0: sl@0: @post Kernel is locked. sl@0: */ sl@0: EXPORT_C void NThreadBase::Ready() sl@0: { sl@0: #ifdef _DEBUG sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::Ready"); sl@0: __ASSERT_WITH_MESSAGE_DEBUG(iSuspendCount==0,"The thread being made ready must not be explicitly suspended","NThreadBase::Ready"); sl@0: sl@0: if (DEBUGNUM(KCRAZYSCHEDDELAY) && iPriority && TheTimerQ.iMsCount) sl@0: { sl@0: // Delay this thread, unless it's already on the delayed queue sl@0: if ((i_ThrdAttr & KThreadAttDelayed) == 0) sl@0: { sl@0: i_ThrdAttr |= KThreadAttDelayed; sl@0: TheScheduler.iDelayedQ.Add(this); sl@0: } sl@0: } sl@0: else sl@0: { sl@0: // Delayed scheduler off sl@0: // or idle thread, or the tick hasn't started yet sl@0: DoReady(); sl@0: } sl@0: #else sl@0: DoReady(); sl@0: #endif sl@0: } sl@0: sl@0: void NThreadBase::DoReady() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::DoReady"); sl@0: __ASSERT_WITH_MESSAGE_DEBUG(iSuspendCount==0,"The thread being made ready must not be explicitly suspended","NThreadBase::DoReady"); sl@0: sl@0: TScheduler& s=TheScheduler; sl@0: TInt p=iPriority; sl@0: // __KTRACE_OPT(KSCHED,Kern::Printf("Ready(%O), priority %d status %d",this,p,iStatus)); sl@0: if (iNState==EDead) sl@0: return; sl@0: s.Add(this); sl@0: iNState=EReady; sl@0: if (!(s>p)) // s>p <=> highest ready priority > our priority so no preemption sl@0: { sl@0: // if no other thread at this priority or first thread at this priority has used its timeslice, reschedule sl@0: // note iNext points to first thread at this priority since we got added to the end sl@0: if (iNext==this || ((NThreadBase*)iNext)->iTime==0) sl@0: RescheduleNeeded(); sl@0: } sl@0: } sl@0: #endif sl@0: sl@0: void NThreadBase::DoCsFunction() sl@0: { sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::DoCsFunction %T %d",this,iCsFunction)); sl@0: TInt f=iCsFunction; sl@0: iCsFunction=0; sl@0: if (f>0) sl@0: { sl@0: // suspend this thread f times sl@0: Suspend(f); sl@0: return; sl@0: } sl@0: if (f==ECSExitPending) sl@0: { sl@0: // We need to exit now sl@0: Exit(); // this won't return sl@0: } sl@0: UnknownState(ELeaveCS,f); // call into RTOS personality sl@0: } sl@0: sl@0: sl@0: /** Suspends a nanothread the specified number of times. sl@0: sl@0: For use by RTOS personality layers. sl@0: Do not use this function directly on a Symbian OS thread. sl@0: Since the kernel is locked on entry, any reschedule will be deferred until sl@0: it is unlocked. sl@0: The suspension will be deferred if the target thread is currently in a sl@0: critical section; in this case the suspension will take effect when it exits sl@0: the critical section. sl@0: The thread's unknown state handler will be invoked with function ESuspend and sl@0: parameter aCount if the current NState is not recognised and it is not in a sl@0: critical section. sl@0: sl@0: @param aCount = the number of times to suspend. sl@0: @return TRUE, if the suspension has taken immediate effect; sl@0: FALSE, if the thread is in a critical section or is already suspended. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call in a thread context. sl@0: sl@0: @post Kernel is locked. sl@0: */ sl@0: EXPORT_C TBool NThreadBase::Suspend(TInt aCount) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NThreadBase::Suspend"); sl@0: // If thread is executing a critical section, we must defer the suspend sl@0: if (iNState==EDead) sl@0: return FALSE; // already dead so suspension is a no-op sl@0: if (iCsCount || iHeldFastMutex) sl@0: { sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Suspend %T (CSF %d) %d",this,iCsFunction,aCount)); sl@0: if (iCsFunction>=0) // -ve means thread is about to exit sl@0: { sl@0: iCsFunction+=aCount; // so thread will suspend itself when it leaves the critical section sl@0: if (iHeldFastMutex && iCsCount==0) sl@0: iHeldFastMutex->iWaiting=1; sl@0: } sl@0: return FALSE; sl@0: } sl@0: sl@0: // thread not in critical section, so suspend it sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Suspend %T (NState %d) %d",this,iNState,aCount)); sl@0: switch (iNState) sl@0: { sl@0: case EReady: sl@0: TheScheduler.Remove(this); sl@0: RescheduleNeeded(); sl@0: iNState=ESuspended; sl@0: case EWaitFastSemaphore: sl@0: case EWaitDfc: sl@0: case ESleep: sl@0: case EBlocked: sl@0: case ESuspended: sl@0: break; sl@0: default: sl@0: UnknownState(ESuspend,aCount); sl@0: break; sl@0: } sl@0: TInt old_suspend=iSuspendCount; sl@0: iSuspendCount-=aCount; sl@0: return (old_suspend==0); // return TRUE if thread has changed from not-suspended to suspended. sl@0: } sl@0: sl@0: sl@0: /** Resumes a nanothread, cancelling one suspension. sl@0: sl@0: For use by RTOS personality layers. sl@0: Do not use this function directly on a Symbian OS thread. sl@0: Since the kernel is locked on entry, any reschedule will be deferred until sl@0: it is unlocked. sl@0: If the target thread is currently in a critical section this will simply sl@0: cancel one deferred suspension. sl@0: The thread's unknown state handler will be invoked with function EResume if sl@0: the current NState is not recognised and it is not in a critical section. sl@0: sl@0: @return TRUE, if the resumption has taken immediate effect; sl@0: FALSE, if the thread is in a critical section or is still suspended. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: sl@0: @post Kernel must be locked. sl@0: */ sl@0: EXPORT_C TBool NThreadBase::Resume() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NThreadBase::Resume"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Resume %T, state %d CSC %d CSF %d",this,iNState,iCsCount,iCsFunction)); sl@0: if (iNState==EDead) sl@0: return FALSE; sl@0: sl@0: // If thread is in critical section, just cancel deferred suspends sl@0: if (iCsCount || iHeldFastMutex) sl@0: { sl@0: if (iCsFunction>0) sl@0: --iCsFunction; // one less deferred suspension sl@0: return FALSE; sl@0: } sl@0: if (iSuspendCount<0 && ++iSuspendCount==0) sl@0: { sl@0: switch (iNState) sl@0: { sl@0: case ESuspended: sl@0: Ready(); sl@0: case EReady: sl@0: case EWaitFastSemaphore: sl@0: case EWaitDfc: sl@0: case ESleep: sl@0: case EBlocked: sl@0: break; sl@0: default: sl@0: UnknownState(EResume,0); sl@0: break; sl@0: } sl@0: return TRUE; // thread has changed from suspended to not-suspended sl@0: } sl@0: return FALSE; // still suspended or not initially suspended so no higher level action required sl@0: } sl@0: sl@0: sl@0: /** Resumes a nanothread, cancelling all outstanding suspensions. sl@0: sl@0: For use by RTOS personality layers. sl@0: Do not use this function directly on a Symbian OS thread. sl@0: Since the kernel is locked on entry, any reschedule will be deferred until sl@0: it is unlocked. sl@0: If the target thread is currently in a critical section this will simply sl@0: cancel all deferred suspensions. sl@0: The thread's unknown state handler will be invoked with function EForceResume sl@0: if the current NState is not recognised and it is not in a critical section. sl@0: sl@0: @return TRUE, if the resumption has taken immediate effect; sl@0: FALSE, if the thread is in a critical section. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: sl@0: @post Kernel is locked. sl@0: */ sl@0: EXPORT_C TBool NThreadBase::ForceResume() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::ForceResume"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::ForceResume %T, state %d CSC %d CSF %d",this,iNState,iCsCount,iCsFunction)); sl@0: if (iNState==EDead) sl@0: return FALSE; sl@0: sl@0: // If thread is in critical section, just cancel deferred suspends sl@0: if (iCsCount || iHeldFastMutex) sl@0: { sl@0: if (iCsFunction>0) sl@0: iCsFunction=0; // cancel all deferred suspensions sl@0: return FALSE; sl@0: } sl@0: if (iSuspendCount<0) sl@0: { sl@0: iSuspendCount=0; sl@0: switch (iNState) sl@0: { sl@0: case ESuspended: sl@0: Ready(); sl@0: case EReady: sl@0: case EWaitFastSemaphore: sl@0: case EWaitDfc: sl@0: case ESleep: sl@0: case EBlocked: sl@0: case EDead: sl@0: break; sl@0: default: sl@0: UnknownState(EForceResume,0); sl@0: break; sl@0: } sl@0: } sl@0: return TRUE; sl@0: } sl@0: sl@0: sl@0: /** Releases a waiting nanokernel thread. sl@0: sl@0: For use by RTOS personality layers. sl@0: Do not use this function directly on a Symbian OS thread. sl@0: This function should make the thread ready (provided it is not explicitly sl@0: suspended) and cancel any wait timeout. It should also remove it from any sl@0: wait queues. sl@0: If aReturnCode is nonnegative it indicates normal completion of the wait. sl@0: If aReturnCode is negative it indicates early/abnormal completion of the sl@0: wait and so any wait object should be reverted as if the wait had never sl@0: occurred (eg semaphore count should be incremented as this thread has not sl@0: actually acquired the semaphore). sl@0: The thread's unknown state handler will be invoked with function ERelease sl@0: and parameter aReturnCode if the current NState is not recognised. sl@0: sl@0: @param aReturnCode The reason code for release. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: sl@0: @post Kernel is locked. sl@0: */ sl@0: EXPORT_C void NThreadBase::Release(TInt aReturnCode) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::Release"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Release %T, state %d retcode %d",this,iNState,aReturnCode)); sl@0: switch(iNState) sl@0: { sl@0: case EDead: sl@0: return; sl@0: case EReady: sl@0: case ESuspended: sl@0: // don't release explicit suspensions sl@0: break; sl@0: case EWaitFastSemaphore: sl@0: if (aReturnCode<0 && iWaitObj) sl@0: ((NFastSemaphore*)iWaitObj)->WaitCancel(); sl@0: break; sl@0: case ESleep: sl@0: case EBlocked: sl@0: case EWaitDfc: sl@0: CheckSuspendThenReady(); sl@0: break; sl@0: default: sl@0: UnknownState(ERelease,aReturnCode); sl@0: break; sl@0: } sl@0: if (iTimer.iUserFlags) sl@0: { sl@0: if (iTimer.iState == NTimer::EIdle) sl@0: { sl@0: // Potential race condition - timer must have completed but expiry sl@0: // handler has not yet run. Signal to the handler that it should do sl@0: // nothing by flipping the bottom bit of iTimer.iPtr sl@0: // This condition cannot possibly recur until the expiry handler has sl@0: // run since all expiry handlers run in DfcThread1. sl@0: TLinAddr& x = *(TLinAddr*)&iTimer.iPtr; sl@0: x ^= 1; sl@0: } sl@0: iTimer.Cancel(); sl@0: iTimer.iUserFlags = FALSE; sl@0: } sl@0: iWaitObj=NULL; sl@0: iReturnValue=aReturnCode; sl@0: } sl@0: sl@0: sl@0: /** Signals a nanokernel thread's request semaphore. sl@0: sl@0: This can also be used on Symbian OS threads. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call either in a thread or an IDFC context. sl@0: sl@0: @post Kernel is locked. sl@0: */ sl@0: EXPORT_C void NThreadBase::RequestSignal() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::RequestSignal"); sl@0: iRequestSemaphore.Signal(); sl@0: } sl@0: sl@0: void NThreadBase::TimerExpired(TAny* aPtr) sl@0: { sl@0: TLinAddr cookie = (TLinAddr)aPtr; sl@0: NThread* pT = (NThread*)(cookie &~ 3); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::TimerExpired %T, state %d",pT,pT->iNState)); sl@0: NThreadTimeoutHandler th = pT->iHandlers->iTimeoutHandler; sl@0: NKern::Lock(); sl@0: if (pT->iNStateiNState!=EBlocked) sl@0: th = NULL; sl@0: if (th) sl@0: { sl@0: // Use higher level timeout handler sl@0: NKern::Unlock(); sl@0: (*th)(pT, ETimeoutPreamble); sl@0: TInt param = ETimeoutPostamble; sl@0: NKern::Lock(); sl@0: TLinAddr current_cookie = (TLinAddr)pT->iTimer.iPtr; sl@0: if ((cookie ^ current_cookie) & 1) sl@0: { sl@0: // The timer was cancelled just after expiring but before this function sl@0: // managed to call NKern::Lock(), so it's spurious sl@0: param = ETimeoutSpurious; sl@0: } sl@0: else sl@0: pT->iTimer.iUserFlags = FALSE; sl@0: NKern::Unlock(); sl@0: (*th)(pT, param); sl@0: return; sl@0: } sl@0: TLinAddr current_cookie = (TLinAddr)pT->iTimer.iPtr; sl@0: if ((cookie ^ current_cookie) & 1) sl@0: { sl@0: // The timer was cancelled just after expiring but before this function sl@0: // managed to call NKern::Lock(), so just return without doing anything. sl@0: NKern::Unlock(); sl@0: return; sl@0: } sl@0: pT->iTimer.iUserFlags = FALSE; sl@0: switch(pT->iNState) sl@0: { sl@0: case EDead: sl@0: case EReady: sl@0: case ESuspended: sl@0: NKern::Unlock(); sl@0: return; sl@0: case EWaitFastSemaphore: sl@0: ((NFastSemaphore*)pT->iWaitObj)->WaitCancel(); sl@0: break; sl@0: case EBlocked: sl@0: case ESleep: sl@0: case EWaitDfc: sl@0: pT->CheckSuspendThenReady(); sl@0: break; sl@0: default: sl@0: pT->UnknownState(ETimeout,0); sl@0: break; sl@0: } sl@0: pT->iWaitObj=NULL; sl@0: pT->iReturnValue=KErrTimedOut; sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Changes the priority of a nanokernel thread. sl@0: sl@0: For use by RTOS personality layers. sl@0: Do not use this function directly on a Symbian OS thread. sl@0: sl@0: The thread's unknown state handler will be invoked with function EChangePriority sl@0: and parameter newp if the current NState is not recognised and the new priority sl@0: is not equal to the original priority. sl@0: sl@0: @param newp The new nanokernel priority (0 <= newp < KNumPriorities). sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call in a thread context. sl@0: sl@0: @post Kernel is locked. sl@0: */ sl@0: EXPORT_C void NThreadBase::SetPriority(TInt newp) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::SetPriority"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::SetPriority %T %d->%d, state %d",this,iPriority,newp,iNState)); sl@0: #ifdef _DEBUG sl@0: // When the crazy scheduler is active, refuse to set any priority higher than 1 sl@0: if (KCrazySchedulerEnabled() && newp>1) sl@0: newp=1; sl@0: #endif sl@0: if (newp==iPriority) sl@0: return; sl@0: #ifdef BTRACE_THREAD_PRIORITY sl@0: BTrace8(BTrace::EThreadPriority,BTrace::ENThreadPriority,this,newp); sl@0: #endif sl@0: switch(iNState) sl@0: { sl@0: case EReady: sl@0: { sl@0: TInt oldp=iPriority; sl@0: TheScheduler.ChangePriority(this,newp); sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: if (this==pC) sl@0: { sl@0: if (newpnewp || !TPriListLink::Alone())) // can't have scheduleroldp) sl@0: { sl@0: TInt cp=pC->iPriority; sl@0: if (newp>cp) sl@0: RescheduleNeeded(); sl@0: else if (newp==cp && pC->iTime==0) sl@0: { sl@0: if (pC->iHeldFastMutex) sl@0: pC->iHeldFastMutex->iWaiting=1; // don't round-robin now, wait until fast mutex released sl@0: else sl@0: RescheduleNeeded(); sl@0: } sl@0: } sl@0: break; sl@0: } sl@0: case ESuspended: sl@0: case EWaitFastSemaphore: sl@0: case EWaitDfc: sl@0: case ESleep: sl@0: case EBlocked: sl@0: case EDead: sl@0: iPriority=TUint8(newp); sl@0: break; sl@0: default: sl@0: UnknownState(EChangePriority,newp); sl@0: break; sl@0: } sl@0: } sl@0: sl@0: void NThreadBase::Exit() sl@0: { sl@0: // The current thread is exiting sl@0: // Enter with kernel locked, don't return sl@0: __NK_ASSERT_DEBUG(this==TheScheduler.iCurrentThread); sl@0: sl@0: OnExit(); sl@0: sl@0: TInt threadCS=iCsCount; sl@0: TInt kernCS=TheScheduler.iKernCSLocked; sl@0: iCsCount=1; sl@0: iCsFunction=ECSExitInProgress; sl@0: NKern::Unlock(); sl@0: __KTRACE_OPT(KSCHED,DEBUGPRINT("Exit %T %u",this,NTickCount())); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Exit %T, CSC %d HeldFM %M KernCS %d",this,threadCS,iHeldFastMutex,kernCS)); sl@0: if (kernCS!=1) sl@0: FAULT(); sl@0: if (iHeldFastMutex) sl@0: FAULT(); sl@0: if (threadCS) sl@0: FAULT(); sl@0: TDfc* pD=NULL; sl@0: NThreadExitHandler xh = iHandlers->iExitHandler; sl@0: if (xh) sl@0: pD=(*xh)((NThread*)this); // call exit handler sl@0: NKern::Lock(); sl@0: if (pD) sl@0: pD->DoEnque(); sl@0: iNState=EDead; sl@0: TheScheduler.Remove(this); sl@0: RescheduleNeeded(); sl@0: #ifdef BTRACE_THREAD_IDENTIFICATION sl@0: BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadDestroy,this); sl@0: #endif sl@0: __NK_ASSERT_ALWAYS(iCsFunction == ECSExitInProgress); sl@0: TScheduler::Reschedule(); // this won't return sl@0: FAULT(); sl@0: } sl@0: sl@0: sl@0: /** Kills a nanokernel thread. sl@0: sl@0: For use by RTOS personality layers. sl@0: Do not use this function directly on a Symbian OS thread. sl@0: sl@0: When acting on the calling thread, causes the calling thread to exit. sl@0: sl@0: When acting on another thread, causes that thread to exit unless it is sl@0: currently in a critical section. In this case the thread is marked as sl@0: "exit pending" and will exit as soon as it leaves the critical section. sl@0: sl@0: In either case the exiting thread first invokes its exit handler (if it sl@0: exists). The handler runs with preemption enabled and with the thread in a sl@0: critical section so that it may not be suspended or killed again. The sl@0: handler may return a pointer to a TDfc, which will be enqueued just before sl@0: the thread finally terminates (after the kernel has been relocked). This DFC sl@0: will therefore execute once the NThread has been safely removed from the sl@0: scheduler and is intended to be used to cleanup the NThread object and any sl@0: associated personality layer resources. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call in a thread context. sl@0: @pre If acting on calling thread, calling thread must not be in a sl@0: critical section; if it is the kernel will fault. Also, the kernel sl@0: must be locked exactly once (iKernCSLocked = 1). sl@0: sl@0: @post Kernel is locked, if not acting on calling thread. sl@0: @post Does not return if it acts on the calling thread. sl@0: */ sl@0: EXPORT_C void NThreadBase::Kill() sl@0: { sl@0: // Kill a thread sl@0: // Enter with kernel locked sl@0: // Exit with kernel locked if not current thread, otherwise does not return sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED_ONCE|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::Kill"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Kill %T, state %d CSC %d HeldFM %M",this,iNState,iCsCount,iHeldFastMutex)); sl@0: OnKill(); // platform-specific hook sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: if (this==pC) sl@0: { sl@0: if (iCsFunction==ECSExitInProgress) sl@0: FAULT(); sl@0: Exit(); // this will not return sl@0: } sl@0: if (iCsCount || iHeldFastMutex) sl@0: { sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Kill %T deferred",this)); sl@0: if (iCsFunction<0) sl@0: return; // thread is already exiting sl@0: iCsFunction=ECSExitPending; // zap any suspensions pending sl@0: if (iHeldFastMutex && iCsCount==0) sl@0: iHeldFastMutex->iWaiting=1; sl@0: return; sl@0: } sl@0: sl@0: // thread is not in critical section sl@0: // make the thread divert to Exit() when it next runs sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NThreadBase::Kill diverting %T",this)); sl@0: Release(KErrDied); // cancel any waits on semaphores etc. sl@0: ForceResume(); // release any suspensions sl@0: iWaitFastMutex=NULL; // if thread was waiting for a fast mutex it needn't bother sl@0: iCsCount=1; // stop anyone suspending the thread sl@0: iCsFunction=ECSExitPending; sl@0: ForceExit(); // get thread to call Exit when it is next scheduled sl@0: } sl@0: sl@0: sl@0: /** Suspends the execution of a thread. sl@0: sl@0: This function is intended to be used by the EPOC layer and personality layers. sl@0: Do not use this function directly on a Symbian OS thread - use Kern::ThreadSuspend(). sl@0: sl@0: If the thread is in a critical section or holds a fast mutex, the suspension will sl@0: be deferred until the thread leaves the critical section or signals the fast mutex. sl@0: Otherwise the thread will be suspended with immediate effect. If the thread it's sl@0: running, the execution of the thread will be suspended and a reschedule will occur. sl@0: sl@0: @param aThread Thread to be suspended. sl@0: @param aCount Number of times to suspend this thread. sl@0: sl@0: @return TRUE, if the thread had changed the state from non-suspended to suspended; sl@0: FALSE, otherwise. sl@0: sl@0: @see Kern::ThreadSuspend() sl@0: */ sl@0: EXPORT_C TBool NKern::ThreadSuspend(NThread* aThread, TInt aCount) sl@0: { sl@0: NKern::Lock(); sl@0: TBool r=aThread->Suspend(aCount); sl@0: NKern::Unlock(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: /** Resumes the execution of a thread. sl@0: sl@0: This function is intended to be used by the EPOC layer and personality layers. sl@0: Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume(). sl@0: sl@0: This function resumes the thread once. If the thread was suspended more than once sl@0: the thread will remain suspended. sl@0: If the thread is in a critical section, this function will decrease the number of sl@0: deferred suspensions. sl@0: sl@0: @param aThread Thread to be resumed. sl@0: sl@0: @return TRUE, if the thread had changed the state from suspended to non-suspended; sl@0: FALSE, otherwise. sl@0: sl@0: @see Kern::ThreadResume() sl@0: */ sl@0: EXPORT_C TBool NKern::ThreadResume(NThread* aThread) sl@0: { sl@0: NKern::Lock(); sl@0: TBool r=aThread->Resume(); sl@0: NKern::Unlock(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: /** Resumes the execution of a thread and signals a mutex. sl@0: sl@0: This function is intended to be used by the EPOC layer and personality layers. sl@0: Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume(). sl@0: sl@0: This function resumes the thread once. If the thread was suspended more than once sl@0: the thread will remain suspended. sl@0: If the thread is in a critical section, this function will decrease the number of sl@0: deferred suspensions. sl@0: sl@0: @param aThread Thread to be resumed. sl@0: @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled. sl@0: sl@0: @return TRUE, if the thread had changed the state from suspended to non-suspended; sl@0: FALSE, otherwise. sl@0: sl@0: @see Kern::ThreadResume() sl@0: */ sl@0: EXPORT_C TBool NKern::ThreadResume(NThread* aThread, NFastMutex* aMutex) sl@0: { sl@0: if (!aMutex) sl@0: aMutex=&TheScheduler.iLock; sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadResume %T + FM %M",aThread,aMutex)); sl@0: NKern::Lock(); sl@0: TBool r=aThread->Resume(); sl@0: aMutex->Signal(); sl@0: NKern::Unlock(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: /** Forces the execution of a thread to be resumed. sl@0: sl@0: This function is intended to be used by the EPOC layer and personality layers. sl@0: Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume(). sl@0: sl@0: This function cancels all suspensions on a thread. sl@0: sl@0: @param aThread Thread to be resumed. sl@0: sl@0: @return TRUE, if the thread had changed the state from suspended to non-suspended; sl@0: FALSE, otherwise. sl@0: sl@0: @see Kern::ThreadResume() sl@0: */ sl@0: EXPORT_C TBool NKern::ThreadForceResume(NThread* aThread) sl@0: { sl@0: NKern::Lock(); sl@0: TBool r=aThread->ForceResume(); sl@0: NKern::Unlock(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: /** Forces the execution of a thread to be resumed and signals a mutex. sl@0: sl@0: This function is intended to be used by the EPOC layer and personality layers. sl@0: Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume(). sl@0: sl@0: This function cancels all suspensions on a thread. sl@0: sl@0: @param aThread Thread to be resumed. sl@0: @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled. sl@0: sl@0: @return TRUE, if the thread had changed the state from suspended to non-suspended; sl@0: FALSE, otherwise. sl@0: sl@0: @see Kern::ThreadResume() sl@0: */ sl@0: EXPORT_C TBool NKern::ThreadForceResume(NThread* aThread, NFastMutex* aMutex) sl@0: { sl@0: if (!aMutex) sl@0: aMutex=&TheScheduler.iLock; sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadForceResume %T + FM %M",aThread,aMutex)); sl@0: NKern::Lock(); sl@0: TBool r=aThread->ForceResume(); sl@0: aMutex->Signal(); sl@0: NKern::Unlock(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: /** Awakens a nanothread. sl@0: sl@0: This function is used to implement synchronisation primitives in the EPOC sl@0: kernel (e.g. DMutex and DSemaphore) and in personality layers. It is not sl@0: intended to be used directly by device drivers. sl@0: sl@0: If the nanothread is waiting on a fast semaphore, waiting for a DFC, or is sl@0: blocked in a call to NKern::Block, it is awakened and put back on the ready sl@0: list. Otherwise, the thread state is unchanged. In particular, nothing sl@0: happens if the nanothread has been explicitly suspended. sl@0: sl@0: @param aThread Thread to release. sl@0: @param aReturnValue Value returned by NKern::Block if the thread was blocked. sl@0: sl@0: @see NKern::Block() sl@0: sl@0: @pre Interrupts must be enabled. sl@0: @pre Do not call from an ISR sl@0: */ sl@0: EXPORT_C void NKern::ThreadRelease(NThread* aThread, TInt aReturnValue) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::ThreadRelease(NThread*, TInt)"); sl@0: NKern::Lock(); sl@0: aThread->Release(aReturnValue); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Atomically awakens a nanothread and signals a fast mutex. sl@0: sl@0: This function is used to implement synchronisation primitives in the EPOC sl@0: kernel (e.g. DMutex and DSemaphore) and in personality layers. It is not sl@0: intended to be used directly by device drivers. sl@0: sl@0: @param aThread Thread to release. sl@0: @param aReturnValue Value returned by NKern::Block if the thread was blocked. sl@0: @param aMutex Fast mutex to signal. If NULL, the system lock is signalled. sl@0: sl@0: @see NKern::ThreadRelease(NThread*, TInt) sl@0: @see NKern::Block() sl@0: sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre Specified mutex must be held sl@0: */ sl@0: EXPORT_C void NKern::ThreadRelease(NThread* aThread, TInt aReturnValue, NFastMutex* aMutex) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadRelease(NThread*,TInt,NFastMutex*)"); sl@0: if (!aMutex) sl@0: aMutex=&TheScheduler.iLock; sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadRelease %T ret %d + FM %M",aThread,aReturnValue,aMutex)); sl@0: NKern::Lock(); sl@0: aThread->Release(aReturnValue); sl@0: aMutex->Signal(); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Changes the priority of a thread. sl@0: sl@0: This function is intended to be used by the EPOC layer and personality layers. sl@0: Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority(). sl@0: sl@0: @param aThread Thread to receive the new priority. sl@0: @param aPriority New priority for aThread. sl@0: sl@0: @see Kern::SetThreadPriority() sl@0: */ sl@0: EXPORT_C void NKern::ThreadSetPriority(NThread* aThread, TInt aPriority) sl@0: { sl@0: NKern::Lock(); sl@0: aThread->SetPriority(aPriority); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Changes the priority of a thread and signals a mutex. sl@0: sl@0: This function is intended to be used by the EPOC layer and personality layers. sl@0: Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority(). sl@0: sl@0: @param aThread Thread to receive the new priority. sl@0: @param aPriority New priority for aThread. sl@0: @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled. sl@0: sl@0: @see Kern::SetThreadPriority() sl@0: */ sl@0: EXPORT_C void NKern::ThreadSetPriority(NThread* aThread, TInt aPriority, NFastMutex* aMutex) sl@0: { sl@0: if (!aMutex) sl@0: aMutex=&TheScheduler.iLock; sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadSetPriority %T->%d + FM %M",aThread,aPriority,aMutex)); sl@0: NKern::Lock(); sl@0: aThread->SetPriority(aPriority); sl@0: aMutex->Signal(); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: #ifndef __SCHEDULER_MACHINE_CODED__ sl@0: sl@0: /** Signals the request semaphore of a nanothread. sl@0: sl@0: This function is intended to be used by the EPOC layer and personality sl@0: layers. Device drivers should use Kern::RequestComplete instead. sl@0: sl@0: @param aThread Nanothread to signal. Must be non NULL. sl@0: sl@0: @see Kern::RequestComplete() sl@0: sl@0: @pre Interrupts must be enabled. sl@0: @pre Do not call from an ISR sl@0: */ sl@0: EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::ThreadRequestSignal(NThread*)"); sl@0: NKern::Lock(); sl@0: aThread->iRequestSemaphore.Signal(); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Atomically signals the request semaphore of a nanothread and a fast mutex. sl@0: sl@0: This function is intended to be used by the EPOC layer and personality sl@0: layers. Device drivers should use Kern::RequestComplete instead. sl@0: sl@0: @param aThread Nanothread to signal. Must be non NULL. sl@0: @param aMutex Fast mutex to signal. If NULL, the system lock is signaled. sl@0: sl@0: @see Kern::RequestComplete() sl@0: sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre Specified mutex must be held sl@0: */ sl@0: EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread, NFastMutex* aMutex) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadRequestSignal(NThread*,NFastMutex*)"); sl@0: if (!aMutex) sl@0: aMutex=&TheScheduler.iLock; sl@0: NKern::Lock(); sl@0: aThread->iRequestSemaphore.Signal(); sl@0: aMutex->Signal(); sl@0: NKern::Unlock(); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: /** Signals the request semaphore of a nanothread several times. sl@0: sl@0: This function is intended to be used by the EPOC layer and personality sl@0: layers. Device drivers should use Kern::RequestComplete instead. sl@0: sl@0: @param aThread Nanothread to signal. If NULL, the current thread is signaled. sl@0: @param aCount Number of times the request semaphore must be signaled. sl@0: sl@0: @pre aCount >= 0 sl@0: sl@0: @see Kern::RequestComplete() sl@0: */ sl@0: EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread, TInt aCount) sl@0: { sl@0: __ASSERT_WITH_MESSAGE_DEBUG(aCount >= 0,"aCount >= 0","NKern::ThreadRequestSignal"); sl@0: if (!aThread) sl@0: aThread=(NThread*)TheScheduler.iCurrentThread; sl@0: NKern::Lock(); sl@0: aThread->iRequestSemaphore.SignalN(aCount); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Kills a nanothread. sl@0: sl@0: This function is intended to be used by the EPOC layer and personality layers. sl@0: Do not use this function directly on a Symbian OS thread - use Kern::ThreadKill(). sl@0: sl@0: This function does not return if the current thread is killed. sl@0: This function is asynchronous (i.e. the thread to kill may still be alive when the call returns). sl@0: sl@0: @param aThread Thread to kill. Must be non NULL. sl@0: sl@0: @pre If acting on calling thread, calling thread must not be in a sl@0: critical section sl@0: @pre Thread must not already be exiting. sl@0: sl@0: @see Kern::ThreadKill() sl@0: */ sl@0: EXPORT_C void NKern::ThreadKill(NThread* aThread) sl@0: { sl@0: NKern::Lock(); sl@0: aThread->Kill(); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Atomically kills a nanothread and signals a fast mutex. sl@0: sl@0: This function is intended to be used by the EPOC layer and personality layers. sl@0: Do not use this function directly on a Symbian OS thread - use Kern::ThreadKill(). sl@0: sl@0: @param aThread Thread to kill. Must be non NULL. sl@0: @param aMutex Fast mutex to signal. If NULL, the system lock is signalled. sl@0: sl@0: @pre If acting on calling thread, calling thread must not be in a sl@0: critical section sl@0: @pre Thread must not already be exiting. sl@0: sl@0: @see NKern::ThreadKill(NThread*) sl@0: */ sl@0: EXPORT_C void NKern::ThreadKill(NThread* aThread, NFastMutex* aMutex) sl@0: { sl@0: if (!aMutex) sl@0: aMutex=&TheScheduler.iLock; sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: NKern::Lock(); sl@0: if (aThread==pC) sl@0: { sl@0: __NK_ASSERT_DEBUG(pC->iCsCount==0); // Make sure thread isn't in critical section sl@0: aThread->iCsFunction=NThreadBase::ECSExitPending; sl@0: aMutex->iWaiting=1; sl@0: aMutex->Signal(); // this will make us exit sl@0: FAULT(); // should never get here sl@0: } sl@0: else sl@0: { sl@0: aThread->Kill(); sl@0: aMutex->Signal(); sl@0: } sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Enters thread critical section. sl@0: sl@0: This function can safely be used in device drivers. sl@0: sl@0: The current thread will enter its critical section. While in critical section sl@0: the thread cannot be suspended or killed. Any suspension or kill will be deferred sl@0: until the thread leaves the critical section. sl@0: Some API explicitly require threads to be in critical section before calling that sl@0: API. sl@0: Only User threads need to call this function as the concept of thread critical sl@0: section applies to User threads only. sl@0: sl@0: @pre Call in a thread context. sl@0: @pre Kernel must be unlocked. sl@0: */ sl@0: EXPORT_C void NKern::ThreadEnterCS() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadEnterCS"); sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadEnterCS %T",pC)); sl@0: __NK_ASSERT_DEBUG(pC->iCsCount>=0); sl@0: ++pC->iCsCount; sl@0: } sl@0: sl@0: sl@0: NThread* NKern::_ThreadEnterCS() sl@0: { sl@0: NThread* pC = (NThread*)TheScheduler.iCurrentThread; sl@0: __NK_ASSERT_DEBUG(pC->iCsCount>=0); sl@0: ++pC->iCsCount; sl@0: return pC; sl@0: } sl@0: sl@0: sl@0: /** Leaves thread critical section. sl@0: sl@0: This function can safely be used in device drivers. sl@0: sl@0: The current thread will leave its critical section. If the thread was suspended/killed sl@0: while in critical section, the thread will be suspended/killed after leaving the sl@0: critical section by calling this function. sl@0: Only User threads need to call this function as the concept of thread critical sl@0: section applies to User threads only. sl@0: sl@0: @pre Call in a thread context. sl@0: @pre Kernel must be unlocked. sl@0: */ sl@0: sl@0: EXPORT_C void NKern::ThreadLeaveCS() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadLeaveCS"); sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: NKern::Lock(); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::ThreadLeaveCS %T",pC)); sl@0: __NK_ASSERT_DEBUG(pC->iCsCount>0); sl@0: if (--pC->iCsCount==0 && pC->iCsFunction!=0) sl@0: { sl@0: if (pC->iHeldFastMutex) sl@0: pC->iHeldFastMutex->iWaiting=1; sl@0: else sl@0: pC->DoCsFunction(); sl@0: } sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: void NKern::_ThreadLeaveCS() sl@0: { sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: NKern::Lock(); sl@0: __NK_ASSERT_DEBUG(pC->iCsCount>0); sl@0: if (--pC->iCsCount==0 && pC->iCsFunction!=0) sl@0: { sl@0: if (pC->iHeldFastMutex) sl@0: pC->iHeldFastMutex->iWaiting=1; sl@0: else sl@0: pC->DoCsFunction(); sl@0: } sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: /** Freeze the CPU of the current thread sl@0: sl@0: After this the current thread will not migrate to another processor sl@0: sl@0: On uniprocessor builds does nothing and returns 0 sl@0: sl@0: @return A cookie to be passed to NKern::EndFreezeCpu() to allow nesting sl@0: */ sl@0: EXPORT_C TInt NKern::FreezeCpu() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::FreezeCpu"); sl@0: return 0; sl@0: } sl@0: sl@0: sl@0: /** Unfreeze the current thread's CPU sl@0: sl@0: After this the current thread will again be eligible to migrate to another processor sl@0: sl@0: On uniprocessor builds does nothing sl@0: sl@0: @param aCookie the value returned by NKern::FreezeCpu() sl@0: */ sl@0: EXPORT_C void NKern::EndFreezeCpu(TInt /*aCookie*/) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::EndFreezeCpu"); sl@0: } sl@0: sl@0: sl@0: /** Change the CPU affinity of a thread sl@0: sl@0: On uniprocessor builds does nothing sl@0: sl@0: @pre Call in a thread context. sl@0: sl@0: @param The new CPU affinity mask sl@0: @return The old affinity mask sl@0: */ sl@0: EXPORT_C TUint32 NKern::ThreadSetCpuAffinity(NThread*, TUint32) sl@0: { sl@0: return 0; // lock to processor 0 sl@0: } sl@0: sl@0: sl@0: /** Modify a thread's timeslice sl@0: sl@0: @pre Call in a thread context. sl@0: sl@0: @param aTimeslice The new timeslice value sl@0: */ sl@0: EXPORT_C void NKern::ThreadSetTimeslice(NThread* aThread, TInt aTimeslice) sl@0: { sl@0: NKern::Lock(); sl@0: if (aThread->iTimeslice == aThread->iTime || aTimeslice<0) sl@0: aThread->iTime = aTimeslice; sl@0: aThread->iTimeslice = aTimeslice; sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Blocks current nanothread. sl@0: sl@0: This function is used to implement synchronisation primitives in the EPOC sl@0: layer and in personality layers. It is not intended to be used directly by sl@0: device drivers. sl@0: sl@0: @param aTimeout If greater than 0, the nanothread will be blocked for at most sl@0: aTimeout microseconds. sl@0: @param aMode Bitmask whose possible values are documented in TBlockMode. sl@0: @param aMutex Fast mutex to operate on. If NULL, the system lock is used. sl@0: sl@0: @see NKern::ThreadRelease() sl@0: @see TBlockMode sl@0: sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre Specified mutex must be held sl@0: */ sl@0: EXPORT_C TInt NKern::Block(TUint32 aTimeout, TUint aMode, NFastMutex* aMutex) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::Block(TUint32,TUint,NFastMutex*)"); sl@0: if (!aMutex) sl@0: aMutex=&TheScheduler.iLock; sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Block time %d mode %d FM %M",aTimeout,aMode,aMutex)); sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: pC->iReturnValue=0; sl@0: NKern::Lock(); sl@0: if (aMode & EEnterCS) sl@0: ++pC->iCsCount; sl@0: if (aMode & ERelease) sl@0: { sl@0: #ifdef BTRACE_FAST_MUTEX sl@0: BTraceContext4(BTrace::EFastMutex,BTrace::EFastMutexSignal,aMutex); sl@0: #endif sl@0: aMutex->iHoldingThread=NULL; sl@0: TBool w=aMutex->iWaiting; sl@0: aMutex->iWaiting=0; sl@0: pC->iHeldFastMutex=NULL; sl@0: if (w && !pC->iCsCount && pC->iCsFunction) sl@0: pC->DoCsFunction(); sl@0: } sl@0: RescheduleNeeded(); sl@0: if (aTimeout) sl@0: { sl@0: pC->iTimer.iUserFlags = TRUE; sl@0: pC->iTimer.OneShot(aTimeout,TRUE); sl@0: } sl@0: if (pC->iNState==NThread::EReady) sl@0: TheScheduler.Remove(pC); sl@0: pC->iNState=NThread::EBlocked; sl@0: NKern::Unlock(); sl@0: if (aMode & EClaim) sl@0: FMWait(aMutex); sl@0: return pC->iReturnValue; sl@0: } sl@0: sl@0: /** sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held sl@0: */ sl@0: /** @see NKern::Block(TUint32, TUint, NFastMutex*) */ sl@0: EXPORT_C TInt NKern::Block(TUint32 aTimeout, TUint aMode) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::Block(TUint32,TUint)"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Block time %d mode %d",aTimeout,aMode)); sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: pC->iReturnValue=0; sl@0: NKern::Lock(); sl@0: if (aMode & EEnterCS) sl@0: ++pC->iCsCount; sl@0: RescheduleNeeded(); sl@0: if (aTimeout) sl@0: { sl@0: pC->iTimer.iUserFlags = TRUE; sl@0: pC->iTimer.OneShot(aTimeout,TRUE); sl@0: } sl@0: pC->iNState=NThread::EBlocked; sl@0: TheScheduler.Remove(pC); sl@0: NKern::Unlock(); sl@0: return pC->iReturnValue; sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: EXPORT_C void NKern::NanoBlock(TUint32 aTimeout, TUint aState, TAny* aWaitObj) sl@0: /** sl@0: Places the current nanothread into a wait state on an externally sl@0: defined wait object. sl@0: sl@0: For use by RTOS personality layers. sl@0: Do not use this function directly on a Symbian OS thread. sl@0: sl@0: Since the kernel is locked on entry, any reschedule will be deferred until sl@0: it is unlocked. The thread should be added to any necessary wait queue after sl@0: a call to this function, since this function removes it from the ready list. sl@0: The thread's wait timer is started if aTimeout is nonzero. sl@0: The thread's NState and wait object are updated. sl@0: sl@0: Call NThreadBase::Release() when the wait condition is resolved. sl@0: sl@0: @param aTimeout The maximum time for which the thread should block, in nanokernel timer ticks. sl@0: A zero value means wait forever. sl@0: If the thread is still blocked when the timeout expires, sl@0: then the timeout state handler will be called. sl@0: @param aState The nanokernel thread state (N-State) value to be set. sl@0: This state corresponds to the externally defined wait object. sl@0: This value will be written into the member NThreadBase::iNState. sl@0: @param aWaitObj A pointer to an externally defined wait object. sl@0: This value will be written into the member NThreadBase::iWaitObj. sl@0: sl@0: @pre Kernel must be locked. sl@0: @pre Call in a thread context. sl@0: sl@0: @post Kernel is locked. sl@0: sl@0: @see NThreadBase::Release() sl@0: */ sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::NanoBlock"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::NanoBlock time %d state %d obj %08x", aTimeout, aState, aWaitObj)); sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: if (aTimeout) sl@0: { sl@0: pC->iTimer.iUserFlags = TRUE; sl@0: pC->iTimer.OneShot(aTimeout,TRUE); sl@0: } sl@0: pC->iNState = (TUint8)aState; sl@0: pC->iWaitObj = aWaitObj; sl@0: pC->iReturnValue = 0; sl@0: TheScheduler.Remove(pC); sl@0: RescheduleNeeded(); sl@0: } sl@0: sl@0: sl@0: sl@0: sl@0: EXPORT_C void NKern::Sleep(TUint32 aTime) sl@0: /** sl@0: Puts the current nanothread to sleep for the specified duration. sl@0: sl@0: It can be called from Symbian OS threads. sl@0: sl@0: @param aTime sleep time in nanokernel timer ticks. sl@0: sl@0: @pre No fast mutex can be held. sl@0: @pre Kernel must be unlocked. sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: */ sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::Sleep"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Sleep %d",aTime)); sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: NKern::Lock(); sl@0: pC->iTimer.iUserFlags = TRUE; sl@0: pC->iTimer.OneShot(aTime,TRUE); sl@0: pC->iNState=NThread::ESleep; sl@0: TheScheduler.Remove(pC); sl@0: RescheduleNeeded(); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Terminates the current nanothread. sl@0: sl@0: Calls to this function never return. sl@0: sl@0: For use by RTOS personality layers. sl@0: Do not use this function directly on a Symbian OS thread. sl@0: sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: */ sl@0: EXPORT_C void NKern::Exit() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::Exit"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Exit")); sl@0: NThreadBase* pC=TheScheduler.iCurrentThread; sl@0: NKern::Lock(); sl@0: pC->Exit(); // this won't return sl@0: FAULT(); sl@0: } sl@0: sl@0: sl@0: /** Terminates the current nanothread at the next possible point. sl@0: sl@0: If the calling thread is not currently in a critical section and does not sl@0: currently hold a fast mutex, it exits immediately and this function does sl@0: not return. On the other hand if the thread is in a critical section or sl@0: holds a fast mutex the thread continues executing but it will exit as soon sl@0: as it leaves the critical section and/or releases the fast mutex. sl@0: sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: */ sl@0: EXPORT_C void NKern::DeferredExit() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::DeferredExit"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NDefExit")); sl@0: NFastMutex* m = HeldFastMutex(); sl@0: NThreadBase* pC = NKern::LockC(); sl@0: if (!m && !pC->iCsCount) sl@0: pC->Exit(); // this won't return sl@0: if (pC->iCsFunction >= 0) // don't touch it if we are already exiting sl@0: pC->iCsFunction = NThreadBase::ECSExitPending; sl@0: if (m && !pC->iCsCount) sl@0: m->iWaiting = TRUE; sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Prematurely terminates the current thread's timeslice sl@0: sl@0: @pre Kernel must be unlocked. sl@0: @pre Call in a thread context. sl@0: sl@0: @post Kernel is unlocked. sl@0: */ sl@0: EXPORT_C void NKern::YieldTimeslice() sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::YieldTimeslice"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::YieldTimeslice")); sl@0: NThreadBase* t = NKern::LockC(); sl@0: t->iTime = 0; sl@0: if (t->iNext != t) sl@0: RescheduleNeeded(); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: sl@0: /** Rotates the ready list for threads at the specified priority. sl@0: sl@0: For use by RTOS personality layers to allow external control of round-robin sl@0: scheduling. Not intended for direct use by device drivers. sl@0: sl@0: @param aPriority = priority at which threads should be rotated. sl@0: -1 means use calling thread's priority. sl@0: sl@0: @pre Kernel must be unlocked. sl@0: @pre Call in a thread context. sl@0: sl@0: @post Kernel is unlocked. sl@0: */ sl@0: EXPORT_C void NKern::RotateReadyList(TInt aPriority) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::RotateReadyList"); sl@0: __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::RotateReadyList %d",aPriority)); sl@0: if (aPriority<0 || aPriority>=KNumPriorities) sl@0: aPriority=TheScheduler.iCurrentThread->iPriority; sl@0: NKern::Lock(); sl@0: TheScheduler.RotateReadyList(aPriority); sl@0: NKern::Unlock(); sl@0: } sl@0: sl@0: /** Rotates the ready list for threads at the specified priority. sl@0: sl@0: For use by RTOS personality layers to allow external control of round-robin sl@0: scheduling. Not intended for direct use by device drivers. sl@0: sl@0: @param aPriority = priority at which threads should be rotated. sl@0: -1 means use calling thread's priority. sl@0: @param aCpu = which CPU's ready list should be rotated sl@0: ignored on UP systems. sl@0: sl@0: @pre Kernel must be unlocked. sl@0: @pre Call in a thread context. sl@0: sl@0: @post Kernel is unlocked. sl@0: */ sl@0: EXPORT_C void NKern::RotateReadyList(TInt aPriority, TInt /*aCpu*/) sl@0: { sl@0: RotateReadyList(aPriority); sl@0: } sl@0: sl@0: sl@0: /** Returns the NThread control block for the currently scheduled thread. sl@0: sl@0: Note that this is the calling thread if called from a thread context, or the sl@0: interrupted thread if called from an interrupt context. sl@0: sl@0: @return A pointer to the NThread for the currently scheduled thread. sl@0: sl@0: @pre Call in any context. sl@0: */ sl@0: EXPORT_C NThread* NKern::CurrentThread() sl@0: { sl@0: return (NThread*)TheScheduler.iCurrentThread; sl@0: } sl@0: sl@0: sl@0: /** Returns the CPU number of the calling CPU. sl@0: sl@0: @return the CPU number of the calling CPU. sl@0: sl@0: @pre Call in any context. sl@0: */ sl@0: EXPORT_C TInt NKern::CurrentCpu() sl@0: { sl@0: return 0; sl@0: } sl@0: sl@0: sl@0: /** Returns the number of CPUs available to Symbian OS sl@0: sl@0: @return the number of CPUs sl@0: sl@0: @pre Call in any context. sl@0: */ sl@0: EXPORT_C TInt NKern::NumberOfCpus() sl@0: { sl@0: return 1; sl@0: } sl@0: sl@0: sl@0: /** Check if the kernel is locked the specified number of times. sl@0: sl@0: @param aCount The number of times the kernel should be locked sl@0: If zero, tests if it is locked at all sl@0: @return TRUE if the tested condition is true. sl@0: sl@0: @internalTechnology sl@0: */ sl@0: EXPORT_C TBool NKern::KernelLocked(TInt aCount) sl@0: { sl@0: if (aCount) sl@0: return TheScheduler.iKernCSLocked == aCount; sl@0: return TheScheduler.iKernCSLocked!=0; sl@0: } sl@0: sl@0: sl@0: /****************************************************************************** sl@0: * Priority lists sl@0: ******************************************************************************/ sl@0: sl@0: #ifndef __PRI_LIST_MACHINE_CODED__ sl@0: /** Returns the priority of the highest priority item present on a priority list. sl@0: sl@0: @return The highest priority present or -1 if the list is empty. sl@0: */ sl@0: EXPORT_C TInt TPriListBase::HighestPriority() sl@0: { sl@0: // TUint64 present = MAKE_TUINT64(iPresent[1], iPresent[0]); sl@0: // return __e32_find_ms1_64(present); sl@0: return __e32_find_ms1_64(iPresent64); sl@0: } sl@0: sl@0: sl@0: /** Finds the highest priority item present on a priority list. sl@0: sl@0: If multiple items at the same priority are present, return the first to be sl@0: added in chronological order. sl@0: sl@0: @return A pointer to the item or NULL if the list is empty. sl@0: */ sl@0: EXPORT_C TPriListLink* TPriListBase::First() sl@0: { sl@0: TInt p = HighestPriority(); sl@0: return p >=0 ? static_cast(iQueue[p]) : NULL; sl@0: } sl@0: sl@0: sl@0: /** Adds an item to a priority list. sl@0: sl@0: @param aLink A pointer to the item - must not be NULL. sl@0: */ sl@0: EXPORT_C void TPriListBase::Add(TPriListLink* aLink) sl@0: { sl@0: TInt p = aLink->iPriority; sl@0: SDblQueLink* head = iQueue[p]; sl@0: if (head) sl@0: { sl@0: // already some at this priority sl@0: aLink->InsertBefore(head); sl@0: } sl@0: else sl@0: { sl@0: // 'create' new list sl@0: iQueue[p] = aLink; sl@0: aLink->iNext = aLink->iPrev = aLink; sl@0: iPresent[p>>5] |= 1u << (p & 0x1f); sl@0: } sl@0: } sl@0: sl@0: sl@0: /** Removes an item from a priority list. sl@0: sl@0: @param aLink A pointer to the item - must not be NULL. sl@0: */ sl@0: EXPORT_C void TPriListBase::Remove(TPriListLink* aLink) sl@0: { sl@0: if (!aLink->Alone()) sl@0: { sl@0: // not the last on this list sl@0: TInt p = aLink->iPriority; sl@0: if (iQueue[p] == aLink) sl@0: iQueue[p] = aLink->iNext; sl@0: aLink->Deque(); sl@0: } sl@0: else sl@0: { sl@0: TInt p = aLink->iPriority; sl@0: iQueue[p] = 0; sl@0: iPresent[p>>5] &= ~(1u << (p & 0x1f)); sl@0: KILL_LINK(aLink); sl@0: } sl@0: } sl@0: sl@0: sl@0: /** Changes the priority of an item on a priority list. sl@0: sl@0: @param aLink A pointer to the item to act on - must not be NULL. sl@0: @param aNewPriority A new priority for the item. sl@0: */ sl@0: EXPORT_C void TPriListBase::ChangePriority(TPriListLink* aLink, TInt aNewPriority) sl@0: { sl@0: if (aLink->iPriority!=aNewPriority) sl@0: { sl@0: Remove(aLink); sl@0: aLink->iPriority=TUint8(aNewPriority); sl@0: Add(aLink); sl@0: } sl@0: } sl@0: #endif sl@0: sl@0: /** Adds an item to a priority list at the head of the queue for its priority. sl@0: sl@0: @param aLink A pointer to the item - must not be NULL. sl@0: */ sl@0: EXPORT_C void TPriListBase::AddHead(TPriListLink* aLink) sl@0: { sl@0: TInt p = aLink->iPriority; sl@0: SDblQueLink* head = iQueue[p]; sl@0: iQueue[p] = aLink; sl@0: if (head) sl@0: { sl@0: // already some at this priority sl@0: aLink->InsertBefore(head); sl@0: } sl@0: else sl@0: { sl@0: // 'create' new list sl@0: aLink->iNext = aLink->iPrev = aLink; sl@0: iPresent[p>>5] |= 1u << (p & 0x1f); sl@0: } sl@0: } sl@0: sl@0: