1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/nkernsmp/nkern.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,2708 @@
1.4 +// Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\nkernsmp\nkern.cpp
1.18 +//
1.19 +//
1.20 +
1.21 +// NThreadBase member data
1.22 +#define __INCLUDE_NTHREADBASE_DEFINES__
1.23 +
1.24 +#include "nk_priv.h"
1.25 +
1.26 +/******************************************************************************
1.27 + * Fast mutex
1.28 + ******************************************************************************/
1.29 +
1.30 +/** Acquires the fast mutex.
1.31 +
1.32 + This will block until the mutex is available, and causes
1.33 + the thread to enter an implicit critical section until the mutex is released.
1.34 +
1.35 + Generally threads would use NKern::FMWait() which manipulates the kernel lock
1.36 + for you.
1.37 +
1.38 + @pre Kernel must be locked, with lock count 1.
1.39 + @pre The calling thread holds no fast mutexes.
1.40 +
1.41 + @post Kernel is locked, with lock count 1.
1.42 + @post The calling thread holds the mutex.
1.43 +
1.44 + @see NFastMutex::Signal()
1.45 + @see NKern::FMWait()
1.46 +*/
1.47 +EXPORT_C void NFastMutex::Wait()
1.48 + {
1.49 + NThreadBase* pC = NCurrentThreadL();
1.50 + CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED_ONCE|MASK_NO_FAST_MUTEX,"NFastMutex::Wait");
1.51 +
1.52 + pC->iHeldFastMutex = this; // to handle kill/suspend between here and setting iHeldFastMutex
1.53 + DoWaitL();
1.54 + }
1.55 +
1.56 +void NFastMutex::DoWaitL()
1.57 + {
1.58 + NThreadBase* pC = NCurrentThreadL();
1.59 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T FMWait %M",pC,this));
1.60 + TBool waited = FALSE; // set if we actually had to wait
1.61 + iMutexLock.LockOnly(); // acquire mutex spin lock
1.62 + __e32_atomic_ior_rlx_ptr(&iHoldingThread, 1); // set contention flag to make sure any other thread must acquire the mutex spin lock
1.63 + pC->AcqSLock();
1.64 + FOREVER
1.65 + {
1.66 + if (pC->iFastMutexDefer == 1)
1.67 + --pC->iParent->iFreezeCpu;
1.68 + pC->iFastMutexDefer = 0;
1.69 + NThreadBase* pH = (NThreadBase*)(TLinAddr(iHoldingThread) &~ 1);
1.70 + if (!pH)
1.71 + {
1.72 + // mutex is free
1.73 + TInt wp = iWaitQ.HighestPriority(); // -1 if no other thread wants the mutex
1.74 +
1.75 + // don't grab mutex if we have been suspended/killed/migrated by the previous holding thread
1.76 + if (!pC->iSuspended && pC->iCsFunction!=NThreadBase::ECSDivertPending && (!pC->iParent->iCpuChange || pC->iParent->iFreezeCpu))
1.77 + {
1.78 + TInt p = pC->iPriority;
1.79 + if (p>wp || (p==wp && waited))
1.80 + {
1.81 + // if we are highest priority waiting thread or equal and we have waited then grab the mutex
1.82 + // don't just grab it if we are equal priority and someone else was already waiting
1.83 + // set contention flag if other threads waiting or if current thread has a round robin outstanding
1.84 + pC->iMutexPri = (TUint8)(wp>=0 ? wp : 0); // pC's actual priority doesn't change since p>=wp
1.85 + iHoldingThread = (wp>=0 || TUint32(pC->iTime)==0x80000000u) ? (NThreadBase*)(TLinAddr(pC)|1) : pC;
1.86 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T got mutex %M CF=%d WP=%d",TLinAddr(iHoldingThread)&~1,this,TLinAddr(iHoldingThread)&1,wp));
1.87 + pC->RelSLock();
1.88 + iMutexLock.UnlockOnly();
1.89 +#ifdef BTRACE_FAST_MUTEX
1.90 + BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexWait, this);
1.91 +#endif
1.92 + return;
1.93 + }
1.94 + }
1.95 + }
1.96 + pC->iFastMutexDefer = 2; // signal to scheduler to allow ctxsw without incrementing iParent->iFreezeCpu
1.97 + if (!pC->iSuspended && pC->iCsFunction!=NThreadBase::ECSDivertPending && (!pC->iParent->iCpuChange || pC->iParent->iFreezeCpu))
1.98 + {
1.99 + // this forces priority changes to wait for the mutex lock
1.100 + pC->iLinkedObjType = NThreadBase::EWaitFastMutex;
1.101 + pC->iLinkedObj = this;
1.102 + pC->iWaitState.SetUpWait(NThreadBase::EWaitFastMutex, NThreadWaitState::EWtStObstructed, this);
1.103 + pC->iWaitLink.iPriority = pC->iPriority;
1.104 + iWaitQ.Add(&pC->iWaitLink);
1.105 + pC->RelSLock();
1.106 + if (pH)
1.107 + pH->SetMutexPriority(this);
1.108 +do_pause:
1.109 + iMutexLock.UnlockOnly();
1.110 + RescheduleNeeded();
1.111 +#ifdef BTRACE_FAST_MUTEX
1.112 + BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexBlock, this);
1.113 +#endif
1.114 + NKern::PreemptionPoint(); // we block here until the mutex is released and we are 'nominated' for it or we are suspended/killed
1.115 + iMutexLock.LockOnly();
1.116 + pC->AcqSLock();
1.117 + if (pC->iPauseCount || pC->iSuspended || pC->iCsFunction==NThreadBase::ECSDivertPending || (pC->iParent->iCpuChange && !pC->iParent->iFreezeCpu))
1.118 + {
1.119 + pC->RelSLock();
1.120 + goto do_pause; // let pause/suspend/kill take effect
1.121 + }
1.122 + // if thread was suspended it will have been removed from the wait queue
1.123 + if (!pC->iLinkedObj)
1.124 + goto thread_suspended;
1.125 + iWaitQ.Remove(&pC->iWaitLink); // take ourselves off the wait/contend queue while we try to grab the mutex
1.126 + pC->iWaitLink.iNext = 0;
1.127 + pC->iLinkedObj = 0;
1.128 + pC->iLinkedObjType = NThreadBase::EWaitNone;
1.129 + waited = TRUE;
1.130 + // if we are suspended or killed, we loop round again and do the 'else' clause next time
1.131 + }
1.132 + else
1.133 + {
1.134 + pC->RelSLock();
1.135 + if (pC->iSuspended || pC->iCsFunction==NThreadBase::ECSDivertPending)
1.136 + {
1.137 + // wake up next thread to take this one's place
1.138 + if (!pH && !iWaitQ.IsEmpty())
1.139 + {
1.140 + NThreadBase* pT = _LOFF(iWaitQ.First(), NThreadBase, iWaitLink);
1.141 + pT->AcqSLock();
1.142 + // if thread is still blocked on this fast mutex, release it but leave it on the wait queue
1.143 + // NOTE: it can't be suspended
1.144 + pT->iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, this, KErrNone);
1.145 + pT->RelSLock();
1.146 + }
1.147 + }
1.148 + iMutexLock.UnlockOnly();
1.149 + NKern::PreemptionPoint(); // thread suspends/dies/migrates here
1.150 + iMutexLock.LockOnly();
1.151 + pC->AcqSLock();
1.152 +thread_suspended:
1.153 + waited = FALSE;
1.154 + // set contention flag to make sure any other thread must acquire the mutex spin lock
1.155 + // need to do it again since mutex may have been released while thread was suspended
1.156 + __e32_atomic_ior_rlx_ptr(&iHoldingThread, 1);
1.157 + }
1.158 + }
1.159 + }
1.160 +
1.161 +
1.162 +#ifndef __FAST_MUTEX_MACHINE_CODED__
1.163 +/** Releases a previously acquired fast mutex.
1.164 +
1.165 + Generally, threads would use NKern::FMSignal() which manipulates the kernel lock
1.166 + for you.
1.167 +
1.168 + @pre The calling thread holds the mutex.
1.169 + @pre Kernel must be locked.
1.170 +
1.171 + @post Kernel is locked.
1.172 +
1.173 + @see NFastMutex::Wait()
1.174 + @see NKern::FMSignal()
1.175 +*/
1.176 +EXPORT_C void NFastMutex::Signal()
1.177 + {
1.178 + CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED,"NFastMutex::Signal");
1.179 +#ifdef BTRACE_FAST_MUTEX
1.180 + BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexSignal, this);
1.181 +#endif
1.182 + NThreadBase* pC = NCurrentThreadL();
1.183 + ((volatile TUint32&)pC->iHeldFastMutex) |= 1; // flag to indicate about to release mutex
1.184 +
1.185 + if (__e32_atomic_cas_rel_ptr(&iHoldingThread, &pC, 0))
1.186 + {
1.187 + // tricky if suspend/kill here
1.188 + // suspend/kill should check flag set above and aMutex->iHoldingThread
1.189 + // if bit 0 of iHeldFastMutex set and iHoldingThread==pC then set iHeldFastMutex=0 and proceed
1.190 +
1.191 + // no-one else was waiting for the mutex - simple
1.192 + pC->iHeldFastMutex = 0;
1.193 + return;
1.194 + }
1.195 +
1.196 + // there was contention so do it the hard way
1.197 + DoSignalL();
1.198 + }
1.199 +#endif
1.200 +
1.201 +void NFastMutex::DoSignalL()
1.202 + {
1.203 + NThreadBase* pC = NCurrentThreadL();
1.204 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T FMSignal %M",pC,this));
1.205 + __ASSERT_WITH_MESSAGE_DEBUG(HeldByCurrentThread(),"The calling thread holds the mutex","NFastMutex::Signal");
1.206 +
1.207 + iMutexLock.LockOnly();
1.208 + if (!iWaitQ.IsEmpty())
1.209 + {
1.210 + NThreadBase* pT = _LOFF(iWaitQ.First(), NThreadBase, iWaitLink);
1.211 + pT->AcqSLock();
1.212 +
1.213 + // if thread is still blocked on this fast mutex, release it but leave it on the wait queue
1.214 + // NOTE: it can't be suspended
1.215 + pT->iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, this, KErrNone);
1.216 + pT->RelSLock();
1.217 + iHoldingThread = (NThreadBase*)1; // mark mutex as released but contended
1.218 + }
1.219 + else
1.220 + iHoldingThread = 0; // mark mutex as released and uncontended
1.221 + __KTRACE_OPT(KNKERN,DEBUGPRINT("SiHT=%d",iHoldingThread));
1.222 + pC->AcqSLock();
1.223 + pC->iHeldFastMutex = 0;
1.224 + iMutexLock.UnlockOnly();
1.225 + pC->iMutexPri = 0;
1.226 + if (pC->iPriority != pC->iBasePri)
1.227 + {
1.228 + // lose any inherited priority
1.229 + pC->LoseInheritedPriorityT();
1.230 + }
1.231 + if (TUint32(pC->iTime)==0x80000000u)
1.232 + {
1.233 + pC->iTime = 0;
1.234 + RescheduleNeeded(); // handle deferred timeslicing
1.235 + __KTRACE_OPT(KNKERN,DEBUGPRINT("DTS %T",pC));
1.236 + }
1.237 + if (pC->iFastMutexDefer)
1.238 + {
1.239 + pC->iFastMutexDefer = 0;
1.240 + --pC->iParent->iFreezeCpu;
1.241 + }
1.242 + if (pC->iParent->iCpuChange && !pC->iParent->iFreezeCpu)
1.243 + RescheduleNeeded(); // need to migrate to another CPU
1.244 + if (!pC->iCsCount && pC->iCsFunction)
1.245 + pC->DoCsFunctionT();
1.246 + pC->RelSLock();
1.247 + }
1.248 +
1.249 +
1.250 +/** Checks if the current thread holds this fast mutex
1.251 +
1.252 + @return TRUE if the current thread holds this fast mutex
1.253 + @return FALSE if not
1.254 + @pre Call in thread context.
1.255 +*/
1.256 +EXPORT_C TBool NFastMutex::HeldByCurrentThread()
1.257 + {
1.258 + return (TLinAddr(iHoldingThread)&~1) == (TLinAddr)NKern::CurrentThread();
1.259 + }
1.260 +
1.261 +
1.262 +/** Returns the fast mutex held by the calling thread, if any.
1.263 +
1.264 + @return If the calling thread currently holds a fast mutex, this function
1.265 + returns a pointer to it; otherwise it returns NULL.
1.266 + @pre Call in thread context.
1.267 +*/
1.268 +EXPORT_C NFastMutex* NKern::HeldFastMutex()
1.269 + {
1.270 + NThreadBase* t = NKern::CurrentThread();
1.271 + NFastMutex* m = (NFastMutex*)(TLinAddr(t->iHeldFastMutex)&~3);
1.272 + return (m && m->HeldByCurrentThread()) ? m : 0;
1.273 + }
1.274 +
1.275 +
1.276 +#ifndef __FAST_MUTEX_MACHINE_CODED__
1.277 +/** Acquires a fast mutex.
1.278 +
1.279 + This will block until the mutex is available, and causes
1.280 + the thread to enter an implicit critical section until the mutex is released.
1.281 +
1.282 + @param aMutex The fast mutex to acquire.
1.283 +
1.284 + @post The calling thread holds the mutex.
1.285 +
1.286 + @see NFastMutex::Wait()
1.287 + @see NKern::FMSignal()
1.288 +
1.289 + @pre No fast mutex can be held.
1.290 + @pre Call in a thread context.
1.291 + @pre Kernel must be unlocked
1.292 + @pre interrupts enabled
1.293 +
1.294 +*/
1.295 +EXPORT_C void NKern::FMWait(NFastMutex* aMutex)
1.296 + {
1.297 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NFMW %M", aMutex));
1.298 + CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::FMWait");
1.299 + NThreadBase* pC = NKern::CurrentThread();
1.300 +
1.301 + // If the reschedule IPI from an external suspend or kill occurs after this
1.302 + // point the initiating CPU must observe the write to iHeldFastMutex before
1.303 + // the cas operation.
1.304 + pC->iHeldFastMutex = aMutex; // kill/suspend after this point should set mutex contention flag
1.305 + NThreadBase* expect = 0;
1.306 + if (__e32_atomic_cas_acq_ptr(&aMutex->iHoldingThread, &expect, pC))
1.307 + {
1.308 + // mutex was free and we have just claimed it - simple
1.309 +#ifdef BTRACE_FAST_MUTEX
1.310 + BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexWait, aMutex);
1.311 +#endif
1.312 + return;
1.313 + }
1.314 +
1.315 + // care required if suspend/kill here
1.316 +
1.317 + // there is contention so do it the hard way
1.318 + NKern::Lock();
1.319 + aMutex->DoWaitL();
1.320 + NKern::Unlock();
1.321 + }
1.322 +
1.323 +
1.324 +/** Releases a previously acquired fast mutex.
1.325 +
1.326 + @param aMutex The fast mutex to release.
1.327 +
1.328 + @pre The calling thread holds the mutex.
1.329 +
1.330 + @see NFastMutex::Signal()
1.331 + @see NKern::FMWait()
1.332 +*/
1.333 +EXPORT_C void NKern::FMSignal(NFastMutex* aMutex)
1.334 + {
1.335 + NThreadBase* pC = NKern::CurrentThread();
1.336 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NFMS %M", aMutex));
1.337 +#ifdef BTRACE_FAST_MUTEX
1.338 + BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexSignal, aMutex);
1.339 +#endif
1.340 + ((volatile TUint32&)pC->iHeldFastMutex) |= 1; // flag to indicate about to release mutex
1.341 +
1.342 + if (__e32_atomic_cas_rel_ptr(&aMutex->iHoldingThread, &pC, 0))
1.343 + {
1.344 + // no-one else was waiting for the mutex and we have just released it
1.345 +
1.346 + // tricky if suspend/kill here
1.347 + // suspend/kill should check flag set above and aMutex->iHoldingThread
1.348 + // if bit 0 of iHeldFastMutex set and iHoldingThread==pC then set iHeldFastMutex=0 and proceed
1.349 +
1.350 + // If the reschedule IPI from an external suspend or kill occurs after this
1.351 + // point the initiating CPU must observe the write to iHeldFastMutex after
1.352 + // the cas operation.
1.353 + pC->iHeldFastMutex = 0;
1.354 + return;
1.355 + }
1.356 +
1.357 + // there was contention so do it the hard way
1.358 + NKern::Lock();
1.359 + aMutex->DoSignalL();
1.360 + NKern::Unlock();
1.361 + }
1.362 +
1.363 +/** Acquires the System Lock.
1.364 +
1.365 + This will block until the mutex is available, and causes
1.366 + the thread to enter an implicit critical section until the mutex is released.
1.367 +
1.368 + @post System lock is held.
1.369 +
1.370 + @see NKern::UnlockSystem()
1.371 + @see NKern::FMWait()
1.372 +
1.373 + @pre No fast mutex can be held.
1.374 + @pre Call in a thread context.
1.375 + @pre Kernel must be unlocked
1.376 + @pre interrupts enabled
1.377 +
1.378 +*/
1.379 +EXPORT_C void NKern::LockSystem()
1.380 + {
1.381 + NKern::FMWait(&TheScheduler.iLock);
1.382 + }
1.383 +
1.384 +
1.385 +/** Releases the System Lock.
1.386 +
1.387 + @pre System lock must be held.
1.388 +
1.389 + @see NKern::LockSystem()
1.390 + @see NKern::FMSignal()
1.391 +*/
1.392 +EXPORT_C void NKern::UnlockSystem()
1.393 + {
1.394 + NKern::FMSignal(&TheScheduler.iLock);
1.395 + }
1.396 +
1.397 +
1.398 +/** Temporarily releases a fast mutex if there is contention.
1.399 +
1.400 + If there is another thread attempting to acquire the mutex, the calling
1.401 + thread releases the mutex and then acquires it again.
1.402 +
1.403 + This is more efficient than the equivalent code:
1.404 +
1.405 + @code
1.406 + NKern::FMSignal();
1.407 + NKern::FMWait();
1.408 + @endcode
1.409 +
1.410 + @return TRUE if the mutex was relinquished, FALSE if not.
1.411 +
1.412 + @pre The mutex must be held.
1.413 +
1.414 + @post The mutex is held.
1.415 +*/
1.416 +EXPORT_C TBool NKern::FMFlash(NFastMutex* aM)
1.417 + {
1.418 + NThreadBase* pC = NKern::CurrentThread();
1.419 + __ASSERT_WITH_MESSAGE_DEBUG(aM->HeldByCurrentThread(),"The calling thread holds the mutex","NKern::FMFlash");
1.420 + TBool w = (pC->iMutexPri >= pC->iBasePri); // a thread of greater or equal priority is waiting
1.421 + if (w)
1.422 + {
1.423 + NKern::Lock();
1.424 + aM->Signal();
1.425 + NKern::PreemptionPoint();
1.426 + aM->Wait();
1.427 + NKern::Unlock();
1.428 + }
1.429 +#ifdef BTRACE_FAST_MUTEX
1.430 + else
1.431 + {
1.432 + BTraceContext4(BTrace::EFastMutex, BTrace::EFastMutexFlash, aM);
1.433 + }
1.434 +#endif
1.435 + return w;
1.436 + }
1.437 +
1.438 +
1.439 +/** Temporarily releases the System Lock if there is contention.
1.440 +
1.441 + If there
1.442 + is another thread attempting to acquire the System lock, the calling
1.443 + thread releases the mutex and then acquires it again.
1.444 +
1.445 + This is more efficient than the equivalent code:
1.446 +
1.447 + @code
1.448 + NKern::UnlockSystem();
1.449 + NKern::LockSystem();
1.450 + @endcode
1.451 +
1.452 + Note that this can only allow higher priority threads to use the System
1.453 + lock as lower priority cannot cause contention on a fast mutex.
1.454 +
1.455 + @return TRUE if the system lock was relinquished, FALSE if not.
1.456 +
1.457 + @pre System lock must be held.
1.458 +
1.459 + @post System lock is held.
1.460 +
1.461 + @see NKern::LockSystem()
1.462 + @see NKern::UnlockSystem()
1.463 +*/
1.464 +EXPORT_C TBool NKern::FlashSystem()
1.465 + {
1.466 + CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"NKern::FlashSystem");
1.467 + return NKern::FMFlash(&TheScheduler.iLock);
1.468 + }
1.469 +#endif
1.470 +
1.471 +/******************************************************************************
1.472 + * Fast semaphore
1.473 + ******************************************************************************/
1.474 +
1.475 +/** Sets the owner of a fast semaphore.
1.476 +
1.477 + @param aThread The thread to own this semaphore. If aThread==0, then the
1.478 + owner is set to the current thread.
1.479 +
1.480 + @pre Kernel must be locked.
1.481 + @pre If changing ownership form one thread to another, the there must be no
1.482 + pending signals or waits.
1.483 + @pre Call either in a thread or an IDFC context.
1.484 +
1.485 + @post Kernel is locked.
1.486 +*/
1.487 +EXPORT_C void NFastSemaphore::SetOwner(NThreadBase* aThread)
1.488 + {
1.489 + CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::SetOwner");
1.490 + if (!aThread)
1.491 + aThread = NCurrentThreadL();
1.492 + if (iOwningThread && iOwningThread!=aThread)
1.493 + {
1.494 + __NK_ASSERT_ALWAYS(!iCount); // Can't change owner if iCount!=0
1.495 + }
1.496 + iOwningThread = aThread;
1.497 + }
1.498 +
1.499 +
1.500 +#ifndef __FAST_SEM_MACHINE_CODED__
1.501 +/** Waits on a fast semaphore.
1.502 +
1.503 + Decrements the signal count for the semaphore and
1.504 + removes the calling thread from the ready-list if the semaphore becomes
1.505 + unsignalled. Only the thread that owns a fast semaphore can wait on it.
1.506 +
1.507 + Note that this function does not block, it merely updates the NThread state,
1.508 + rescheduling will only occur when the kernel is unlocked. Generally threads
1.509 + would use NKern::FSWait() which manipulates the kernel lock for you.
1.510 +
1.511 + @pre The calling thread must own the semaphore.
1.512 + @pre No fast mutex can be held.
1.513 + @pre Kernel must be locked.
1.514 +
1.515 + @post Kernel is locked.
1.516 +
1.517 + @see NFastSemaphore::Signal()
1.518 + @see NKern::FSWait()
1.519 + @see NKern::Unlock()
1.520 + */
1.521 +EXPORT_C void NFastSemaphore::Wait()
1.522 + {
1.523 + CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NO_FAST_MUTEX,"NFastSemaphore::Wait");
1.524 + NThreadBase* pC = NCurrentThreadL();
1.525 + __ASSERT_WITH_MESSAGE_ALWAYS(pC==iOwningThread,"The calling thread must own the semaphore","NFastSemaphore::Wait");
1.526 + pC->iWaitState.SetUpWait(NThreadBase::EWaitFastSemaphore, 0, this);
1.527 + if (Dec(pC)) // full barrier
1.528 + pC->iWaitState.CancelWait(); // don't have to wait
1.529 + else
1.530 + RescheduleNeeded(); // have to wait
1.531 + }
1.532 +
1.533 +
1.534 +/** Signals a fast semaphore.
1.535 +
1.536 + Increments the signal count of a fast semaphore by
1.537 + one and releases any waiting thread if the semphore becomes signalled.
1.538 +
1.539 + Note that a reschedule will not occur before this function returns, this will
1.540 + only take place when the kernel is unlocked. Generally threads
1.541 + would use NKern::FSSignal() which manipulates the kernel lock for you.
1.542 +
1.543 + @pre Kernel must be locked.
1.544 + @pre Call either in a thread or an IDFC context.
1.545 +
1.546 + @post Kernel is locked.
1.547 +
1.548 + @see NFastSemaphore::Wait()
1.549 + @see NKern::FSSignal()
1.550 + @see NKern::Unlock()
1.551 + */
1.552 +EXPORT_C void NFastSemaphore::Signal()
1.553 + {
1.554 + CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::Signal");
1.555 + NThreadBase* t = Inc(1); // release semantics
1.556 + if (t)
1.557 + {
1.558 + t->AcqSLock();
1.559 + t->iWaitState.UnBlockT(NThreadBase::EWaitFastSemaphore, this, KErrNone);
1.560 + t->RelSLock();
1.561 + }
1.562 + }
1.563 +
1.564 +
1.565 +/** Signals a fast semaphore multiple times.
1.566 +
1.567 + @pre Kernel must be locked.
1.568 + @pre Call either in a thread or an IDFC context.
1.569 +
1.570 + @post Kernel is locked.
1.571 +
1.572 + @internalComponent
1.573 + */
1.574 +EXPORT_C void NFastSemaphore::SignalN(TInt aCount)
1.575 + {
1.576 + CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::SignalN");
1.577 + __NK_ASSERT_DEBUG(aCount>=0);
1.578 + if (aCount > 0)
1.579 + {
1.580 + NThreadBase* t = Inc(aCount);
1.581 + if (t)
1.582 + {
1.583 + t->AcqSLock();
1.584 + t->iWaitState.UnBlockT(NThreadBase::EWaitFastSemaphore, this, KErrNone);
1.585 + t->RelSLock();
1.586 + }
1.587 + }
1.588 + }
1.589 +
1.590 +
1.591 +/** Cancels a wait on a fast semaphore.
1.592 +
1.593 + @pre Kernel must be locked.
1.594 + @pre Call either in a thread or an IDFC context.
1.595 +
1.596 + @post Kernel is locked.
1.597 +
1.598 + @internalComponent
1.599 + */
1.600 +void NFastSemaphore::WaitCancel()
1.601 + {
1.602 + Inc(1);
1.603 + }
1.604 +
1.605 +
1.606 +/** Waits for a signal on the current thread's I/O semaphore.
1.607 +
1.608 + @pre No fast mutex can be held.
1.609 + @pre Call in a thread context.
1.610 + @pre Kernel must be unlocked
1.611 + @pre interrupts enabled
1.612 + */
1.613 +EXPORT_C void NKern::WaitForAnyRequest()
1.614 + {
1.615 + CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::WaitForAnyRequest");
1.616 + __KTRACE_OPT(KNKERN,DEBUGPRINT("WfAR"));
1.617 + NThreadBase* t = NKern::LockC();
1.618 + NFastSemaphore* s = &t->iRequestSemaphore;
1.619 + t->iWaitState.SetUpWait(NThreadBase::EWaitFastSemaphore, 0, s);
1.620 + if (s->Dec(t)) // fully ordered semantics
1.621 + t->iWaitState.CancelWait(); // don't have to wait
1.622 + else
1.623 + RescheduleNeeded(); // have to wait
1.624 + NKern::Unlock();
1.625 + }
1.626 +#endif
1.627 +
1.628 +
1.629 +/** Resets a fast semaphore.
1.630 +
1.631 + @pre Kernel must be locked.
1.632 + @pre Call either in a thread or an IDFC context.
1.633 +
1.634 + @post Kernel is locked.
1.635 +
1.636 + @internalComponent
1.637 + */
1.638 +EXPORT_C void NFastSemaphore::Reset()
1.639 + {
1.640 + CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NFastSemaphore::Reset");
1.641 + NThreadBase* t = DoReset();
1.642 + if (t)
1.643 + {
1.644 + t->AcqSLock();
1.645 + t->iWaitState.UnBlockT(NThreadBase::EWaitFastSemaphore, this, KErrNone);
1.646 + t->RelSLock();
1.647 + }
1.648 + }
1.649 +
1.650 +
1.651 +/** Sets the owner of a fast semaphore.
1.652 +
1.653 + @param aSem The semaphore to change ownership off.
1.654 + @param aThread The thread to own this semaphore. If aThread==0, then the
1.655 + owner is set to the current thread.
1.656 +
1.657 + @pre If changing ownership form one thread to another, the there must be no
1.658 + pending signals or waits.
1.659 +*/
1.660 +EXPORT_C void NKern::FSSetOwner(NFastSemaphore* aSem,NThreadBase* aThread)
1.661 + {
1.662 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::FSSetOwner %m %T",aSem,aThread));
1.663 + NKern::Lock();
1.664 + aSem->SetOwner(aThread);
1.665 + NKern::Unlock();
1.666 + }
1.667 +
1.668 +#ifndef __FAST_SEM_MACHINE_CODED__
1.669 +/** Waits on a fast semaphore.
1.670 +
1.671 + Decrements the signal count for the semaphore
1.672 + and waits for a signal if the semaphore becomes unsignalled. Only the
1.673 + thread that owns a fast semaphore can wait on it.
1.674 +
1.675 + @param aSem The semaphore to wait on.
1.676 +
1.677 + @pre The calling thread must own the semaphore.
1.678 + @pre No fast mutex can be held.
1.679 +
1.680 + @see NFastSemaphore::Wait()
1.681 +*/
1.682 +EXPORT_C void NKern::FSWait(NFastSemaphore* aSem)
1.683 + {
1.684 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NFSW %m",aSem));
1.685 + NKern::Lock();
1.686 + aSem->Wait();
1.687 + NKern::Unlock();
1.688 + }
1.689 +
1.690 +
1.691 +/** Signals a fast semaphore.
1.692 +
1.693 + Increments the signal count of a fast semaphore
1.694 + by one and releases any waiting thread if the semaphore becomes signalled.
1.695 +
1.696 + @param aSem The semaphore to signal.
1.697 +
1.698 + @see NKern::FSWait()
1.699 +
1.700 + @pre Interrupts must be enabled.
1.701 + @pre Do not call from an ISR
1.702 + */
1.703 +EXPORT_C void NKern::FSSignal(NFastSemaphore* aSem)
1.704 + {
1.705 + CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::FSSignal(NFastSemaphore*)");
1.706 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NFSS %m",aSem));
1.707 + NKern::Lock();
1.708 + aSem->Signal();
1.709 + NKern::Unlock();
1.710 + }
1.711 +
1.712 +
1.713 +/** Signals a fast semaphore multiple times.
1.714 +
1.715 + Increments the signal count of a
1.716 + fast semaphore by aCount and releases any waiting thread if the semphore
1.717 + becomes signalled.
1.718 +
1.719 + @param aSem The semaphore to signal.
1.720 + @param aCount The number of times to signal the semaphore.
1.721 +
1.722 + @see NKern::FSWait()
1.723 +
1.724 + @pre Interrupts must be enabled.
1.725 + @pre Do not call from an ISR
1.726 + */
1.727 +EXPORT_C void NKern::FSSignalN(NFastSemaphore* aSem, TInt aCount)
1.728 + {
1.729 + CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::FSSignalN(NFastSemaphore*, TInt)");
1.730 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NFSSN %m %d",aSem,aCount));
1.731 + __NK_ASSERT_DEBUG(aCount>=0);
1.732 + if (aCount == 0)
1.733 + return;
1.734 + NKern::Lock();
1.735 + aSem->SignalN(aCount);
1.736 + NKern::Unlock();
1.737 + }
1.738 +
1.739 +
1.740 +/** Signals the request semaphore of a nanothread.
1.741 +
1.742 + This function is intended to be used by the EPOC layer and personality
1.743 + layers. Device drivers should use Kern::RequestComplete instead.
1.744 +
1.745 + @param aThread Nanothread to signal. Must be non NULL.
1.746 +
1.747 + @see Kern::RequestComplete()
1.748 +
1.749 + @pre Interrupts must be enabled.
1.750 + @pre Do not call from an ISR
1.751 + */
1.752 +EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread)
1.753 + {
1.754 + NKern::FSSignal(&aThread->iRequestSemaphore);
1.755 + }
1.756 +
1.757 +
1.758 +/** Signals the request semaphore of a nanothread several times.
1.759 +
1.760 + This function is intended to be used by the EPOC layer and personality
1.761 + layers. Device drivers should use Kern::RequestComplete instead.
1.762 +
1.763 + @param aThread Nanothread to signal. If NULL, the current thread is signaled.
1.764 + @param aCount Number of times the request semaphore must be signaled.
1.765 +
1.766 + @pre aCount >= 0
1.767 +
1.768 + @see Kern::RequestComplete()
1.769 + */
1.770 +EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread, TInt aCount)
1.771 + {
1.772 + __ASSERT_WITH_MESSAGE_DEBUG(aCount >= 0,"aCount >= 0","NKern::ThreadRequestSignal");
1.773 + if (!aThread)
1.774 + aThread = (NThread*)NKern::CurrentThread();
1.775 + NKern::FSSignalN(&aThread->iRequestSemaphore, aCount);
1.776 + }
1.777 +#endif
1.778 +
1.779 +
1.780 +
1.781 +/** Atomically signals a fast semaphore and releases a fast mutex.
1.782 +
1.783 + Rescheduling only occurs after both synchronisation operations are complete.
1.784 +
1.785 + @param aSem The semaphore to signal.
1.786 + @param aMutex The mutex to release. If NULL, the System Lock is released
1.787 +
1.788 + @pre The calling thread must hold the mutex.
1.789 +
1.790 + @see NKern::FMSignal()
1.791 + */
1.792 +EXPORT_C void NKern::FSSignal(NFastSemaphore* aSem, NFastMutex* aMutex)
1.793 + {
1.794 + if (!aMutex)
1.795 + aMutex=&TheScheduler.iLock;
1.796 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NFSS %m +FM %M",aSem,aMutex));
1.797 + NKern::Lock();
1.798 + aSem->Signal();
1.799 + aMutex->Signal();
1.800 + NKern::Unlock();
1.801 + }
1.802 +
1.803 +
1.804 +/** Atomically signals a fast semaphore multiple times and releases a fast mutex.
1.805 +
1.806 + Rescheduling only occurs after both synchronisation operations are complete.
1.807 +
1.808 + @param aSem The semaphore to signal.
1.809 + @param aCount The number of times to signal the semaphore.
1.810 + @param aMutex The mutex to release. If NULL, the System Lock is released.
1.811 +
1.812 + @pre The calling thread must hold the mutex.
1.813 +
1.814 + @see NKern::FMSignal()
1.815 + */
1.816 +EXPORT_C void NKern::FSSignalN(NFastSemaphore* aSem, TInt aCount, NFastMutex* aMutex)
1.817 + {
1.818 + if (!aMutex)
1.819 + aMutex=&TheScheduler.iLock;
1.820 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NFSSN %m %d + FM %M",aSem,aCount,aMutex));
1.821 + NKern::Lock();
1.822 + aSem->SignalN(aCount);
1.823 + aMutex->Signal();
1.824 + NKern::Unlock();
1.825 + }
1.826 +
1.827 +
1.828 +/******************************************************************************
1.829 + * Thread
1.830 + ******************************************************************************/
1.831 +
1.832 +void NThreadBase::DoCsFunctionT()
1.833 + {
1.834 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nDoCsFuncT %d",this,iCsFunction));
1.835 + TInt f=iCsFunction;
1.836 + if (f==0)
1.837 + return;
1.838 + if (f>0)
1.839 + {
1.840 + // suspend this thread f times
1.841 + iCsFunction = 0;
1.842 + iSuspendCount += f;
1.843 + iSuspended = 1;
1.844 + RescheduleNeeded();
1.845 + return;
1.846 + }
1.847 + if (f==ECSExitPending || f==ECSDivertPending)
1.848 + {
1.849 + // We need to exit now
1.850 + RelSLock();
1.851 + Exit(); // this won't return
1.852 + }
1.853 +// UnknownState(ELeaveCS,f); // call into RTOS personality
1.854 + __NK_ASSERT_ALWAYS(0);
1.855 + }
1.856 +
1.857 +TBool NThreadBase::DoSuspendOrKillT(TInt aCount, TSubScheduler* aS)
1.858 + {
1.859 + TBool result = TRUE;
1.860 + if (aCount>=0)
1.861 + {
1.862 + if (iSuspended)
1.863 + result = FALSE;
1.864 + iSuspendCount+=aCount;
1.865 + iSuspended = 1;
1.866 + if (!iCurrent)
1.867 + {
1.868 + if (aS)
1.869 + UnReadyT();
1.870 + else if (iReady)
1.871 + {
1.872 + NThreadGroup* g = (NThreadGroup*)iParent;
1.873 + g->iNThreadList.Remove(this);
1.874 + }
1.875 + }
1.876 + if (this == NCurrentThreadL())
1.877 + RescheduleNeeded();
1.878 + if (aS)
1.879 + aS->iReadyListLock.UnlockOnly();
1.880 + }
1.881 + else
1.882 + {
1.883 + iCsFunction = ECSDivertPending;
1.884 + iSuspendCount = 0;
1.885 + iSuspended = 0;
1.886 + if (aS)
1.887 + aS->iReadyListLock.UnlockOnly();
1.888 + DoReleaseT(KErrDied,0);
1.889 + if (!iReady && !iPauseCount)
1.890 + ReadyT(0);
1.891 + }
1.892 + return result;
1.893 + }
1.894 +
1.895 +// If aCount>=0 suspend the thread aCount times
1.896 +// If aCount<0 kill the thread
1.897 +TBool NThreadBase::SuspendOrKill(TInt aCount)
1.898 + {
1.899 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSuspendOrKill %d", this, aCount));
1.900 + if (aCount==0)
1.901 + return FALSE;
1.902 + TBool result = FALSE;
1.903 + TBool concurrent = FALSE;
1.904 + TSubScheduler* ss = 0;
1.905 + AcqSLock();
1.906 + NFastMutex* wfm = 0;
1.907 + if (iLinkedObj && iLinkedObjType==EWaitFastMutex)
1.908 + wfm = (NFastMutex*)iLinkedObj;
1.909 + if (iCsFunction<0)
1.910 + goto done2; // if already exiting ignore suspend or kill
1.911 + if (wfm)
1.912 + {
1.913 + // if thread is waiting on a fast mutex, need to acquire mutex lock
1.914 + ++iPauseCount;
1.915 + RelSLock();
1.916 + wfm->iMutexLock.LockOnly();
1.917 + AcqSLock();
1.918 + UnPauseT();
1.919 + }
1.920 + if (iReady && iParent->iReady)
1.921 + {
1.922 + ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
1.923 + ss->iReadyListLock.LockOnly();
1.924 + }
1.925 + concurrent = (iCurrent && this!=NCurrentThreadL());
1.926 + if (iWaitState.ThreadIsDead()) // already dead so suspension/kill is a no-op
1.927 + goto done;
1.928 + if (concurrent)
1.929 + {
1.930 + // thread is actually running on another CPU
1.931 + // interrupt that CPU and wait for it to enter interrupt mode
1.932 + // this allows a snapshot of the thread state to be observed
1.933 + // in this state, the thread cannot enter or leave a critical section
1.934 + send_resched_ipi_and_wait(iLastCpu);
1.935 + }
1.936 + if (iCsCount)
1.937 + {
1.938 +suspend_or_kill_in_cs:
1.939 + __KTRACE_OPT(KNKERN,DEBUGPRINT("n Suspend %T (CSF %d) %d",this,iCsFunction,aCount));
1.940 + if (aCount>0) // -ve means thread is about to exit
1.941 + iCsFunction+=aCount; // so thread will suspend itself when it leaves the critical section
1.942 + else
1.943 + iCsFunction = ECSExitPending;
1.944 + goto done;
1.945 + }
1.946 + // iCsCount==0 and it can't become nonzero until we release the thread spin lock
1.947 + // (since threads may not set iCsCount to a nonzero value with the kernel lock held)
1.948 + // Make sure the thread isn't actually about to exit by itself
1.949 + if (iCsFunction<0)
1.950 + goto done; // if already exiting ignore suspend or kill
1.951 + if (wfm)
1.952 + {
1.953 + wfm->iWaitQ.Remove(&iWaitLink); // take thread off the wait/contend queue
1.954 + iWaitLink.iNext = 0;
1.955 + iLinkedObj = 0;
1.956 + iLinkedObjType = EWaitNone;
1.957 + result = DoSuspendOrKillT(aCount, ss);
1.958 + if (aCount>0)
1.959 + DoReleaseT(KErrGeneral, 0); // thread isn't blocked any more, just suspended
1.960 + RelSLock();
1.961 +
1.962 + // May need to adjust holding thread's inherited priority.
1.963 + // May need to wake up next thread to take this one's place.
1.964 + NThreadBase* pH = (NThreadBase*)(TLinAddr(wfm->iHoldingThread) &~ 1);
1.965 + if (pH)
1.966 + pH->SetMutexPriority(wfm);
1.967 + else if (!pH && !wfm->iWaitQ.IsEmpty())
1.968 + {
1.969 + NThreadBase* pT = _LOFF(wfm->iWaitQ.First(), NThreadBase, iWaitLink);
1.970 + pT->AcqSLock();
1.971 + pT->iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, wfm, KErrNone);
1.972 + pT->RelSLock();
1.973 + }
1.974 + wfm->iMutexLock.UnlockOnly();
1.975 + return result;
1.976 + }
1.977 + if (CheckFastMutexDefer())
1.978 + goto suspend_or_kill_in_cs;
1.979 +
1.980 + // thread not in critical section, so suspend it
1.981 + result = DoSuspendOrKillT(aCount, ss);
1.982 + goto done2;
1.983 +
1.984 +done:
1.985 + if (wfm)
1.986 + wfm->iMutexLock.UnlockOnly();
1.987 + if (ss)
1.988 + ss->iReadyListLock.UnlockOnly();
1.989 +done2:
1.990 + RelSLock();
1.991 +
1.992 + return result;
1.993 + }
1.994 +
1.995 +
1.996 +/** Suspends a nanothread the specified number of times.
1.997 +
1.998 + For use by RTOS personality layers.
1.999 + Do not use this function directly on a Symbian OS thread.
1.1000 + Since the kernel is locked on entry, any reschedule will be deferred until
1.1001 + it is unlocked.
1.1002 + The suspension will be deferred if the target thread is currently in a
1.1003 + critical section; in this case the suspension will take effect when it exits
1.1004 + the critical section.
1.1005 + The thread's unknown state handler will be invoked with function ESuspend and
1.1006 + parameter aCount if the current NState is not recognised and it is not in a
1.1007 + critical section.
1.1008 +
1.1009 + @param aCount = the number of times to suspend.
1.1010 + @return TRUE, if the suspension has taken immediate effect;
1.1011 + FALSE, if the thread is in a critical section or is already suspended.
1.1012 +
1.1013 + @pre Kernel must be locked.
1.1014 + @pre Call in a thread context.
1.1015 +
1.1016 + @post Kernel is locked.
1.1017 + */
1.1018 +EXPORT_C TBool NThreadBase::Suspend(TInt aCount)
1.1019 + {
1.1020 + CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NThreadBase::Suspend");
1.1021 + __NK_ASSERT_ALWAYS(aCount>=0);
1.1022 +
1.1023 + // If thread is executing a critical section, we must defer the suspend
1.1024 +
1.1025 + return SuspendOrKill(aCount);
1.1026 + }
1.1027 +
1.1028 +
1.1029 +TBool NThreadBase::Resume(TBool aForce)
1.1030 + {
1.1031 + TBool result = FALSE;
1.1032 + AcqSLock();
1.1033 + if (iWaitState.ThreadIsDead() || iCsFunction<0) // already dead or dying so resume is a no-op
1.1034 + goto done;
1.1035 +
1.1036 + if (iCsFunction>0)
1.1037 + {
1.1038 + if (aForce)
1.1039 + iCsFunction = 0;
1.1040 + else
1.1041 + --iCsFunction;
1.1042 + }
1.1043 + else if (iSuspendCount)
1.1044 + {
1.1045 + if (aForce)
1.1046 + iSuspendCount = 0;
1.1047 + else
1.1048 + --iSuspendCount;
1.1049 + if (!iSuspendCount)
1.1050 + {
1.1051 + result = TRUE;
1.1052 + iSuspended = 0;
1.1053 + if (!iPauseCount && !iReady && !iWaitState.iWtC.iWtStFlags)
1.1054 + ReadyT(0);
1.1055 + }
1.1056 + }
1.1057 +
1.1058 +done:
1.1059 + RelSLock();
1.1060 + return result;
1.1061 + }
1.1062 +
1.1063 +/** Resumes a nanothread, cancelling one suspension.
1.1064 +
1.1065 + For use by RTOS personality layers.
1.1066 + Do not use this function directly on a Symbian OS thread.
1.1067 + Since the kernel is locked on entry, any reschedule will be deferred until
1.1068 + it is unlocked.
1.1069 + If the target thread is currently in a critical section this will simply
1.1070 + cancel one deferred suspension.
1.1071 + The thread's unknown state handler will be invoked with function EResume if
1.1072 + the current NState is not recognised and it is not in a critical section.
1.1073 +
1.1074 + @return TRUE, if the resumption has taken immediate effect;
1.1075 + FALSE, if the thread is in a critical section or is still suspended.
1.1076 +
1.1077 + @pre Kernel must be locked.
1.1078 + @pre Call either in a thread or an IDFC context.
1.1079 +
1.1080 + @post Kernel must be locked.
1.1081 + */
1.1082 +EXPORT_C TBool NThreadBase::Resume()
1.1083 + {
1.1084 + CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NThreadBase::Resume");
1.1085 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nRsm",this));
1.1086 +
1.1087 + return Resume(FALSE);
1.1088 + }
1.1089 +
1.1090 +
1.1091 +/** Resumes a nanothread, cancelling all outstanding suspensions.
1.1092 +
1.1093 + For use by RTOS personality layers.
1.1094 + Do not use this function directly on a Symbian OS thread.
1.1095 + Since the kernel is locked on entry, any reschedule will be deferred until
1.1096 + it is unlocked.
1.1097 + If the target thread is currently in a critical section this will simply
1.1098 + cancel all deferred suspensions.
1.1099 + The thread's unknown state handler will be invoked with function EForceResume
1.1100 + if the current NState is not recognised and it is not in a critical section.
1.1101 +
1.1102 + @return TRUE, if the resumption has taken immediate effect;
1.1103 + FALSE, if the thread is in a critical section.
1.1104 +
1.1105 + @pre Kernel must be locked.
1.1106 + @pre Call either in a thread or an IDFC context.
1.1107 +
1.1108 + @post Kernel is locked.
1.1109 + */
1.1110 +EXPORT_C TBool NThreadBase::ForceResume()
1.1111 + {
1.1112 + CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::ForceResume");
1.1113 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nFRsm",this));
1.1114 +
1.1115 + return Resume(TRUE);
1.1116 + }
1.1117 +
1.1118 +
1.1119 +void NThreadBase::DoReleaseT(TInt aReturnCode, TUint aMode)
1.1120 + {
1.1121 + TAny* wobj = 0;
1.1122 + TUint32 b = iWaitState.ReleaseT(wobj, aReturnCode); // cancels timer if necessary
1.1123 +
1.1124 + // if wait pending or no wait, done
1.1125 + // if wait in effect and nothing else stopping it, make thread ready
1.1126 + // cancel any outstanding wait on fast semaphore if abnormal release
1.1127 + // FIXME: Potential problems with abnormal release of generic wait objects
1.1128 + if (aReturnCode<0 && ((b>>8)&0xff)==NThreadBase::EWaitFastSemaphore && wobj)
1.1129 + ((NFastSemaphore*)wobj)->WaitCancel();
1.1130 +
1.1131 + if ((b & NThreadWaitState::EWtStWaitActive) && !iPauseCount && !iSuspended)
1.1132 + ReadyT(aMode);
1.1133 + }
1.1134 +
1.1135 +/** Releases a waiting nanokernel thread.
1.1136 +
1.1137 + For use by RTOS personality layers.
1.1138 + Do not use this function directly on a Symbian OS thread.
1.1139 + This function should make the thread ready (provided it is not explicitly
1.1140 + suspended) and cancel any wait timeout. It should also remove it from any
1.1141 + wait queues.
1.1142 + If aReturnCode is nonnegative it indicates normal completion of the wait.
1.1143 + If aReturnCode is negative it indicates early/abnormal completion of the
1.1144 + wait and so any wait object should be reverted as if the wait had never
1.1145 + occurred (eg semaphore count should be incremented as this thread has not
1.1146 + actually acquired the semaphore).
1.1147 + The thread's unknown state handler will be invoked with function ERelease
1.1148 + and parameter aReturnCode if the current NState is not recognised.
1.1149 +
1.1150 + @param aReturnCode The reason code for release.
1.1151 +
1.1152 + @pre Kernel must be locked.
1.1153 + @pre Call either in a thread or an IDFC context.
1.1154 +
1.1155 + @post Kernel is locked.
1.1156 + */
1.1157 +EXPORT_C void NThreadBase::Release(TInt aReturnCode, TUint aMode)
1.1158 + {
1.1159 + CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::Release");
1.1160 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nRel %d",this,aReturnCode));
1.1161 + AcqSLock();
1.1162 + DoReleaseT(aReturnCode, aMode);
1.1163 + RelSLock();
1.1164 + }
1.1165 +
1.1166 +
1.1167 +/** Signals a nanokernel thread's request semaphore.
1.1168 +
1.1169 + This can also be used on Symbian OS threads.
1.1170 +
1.1171 + @pre Kernel must be locked.
1.1172 + @pre Call either in a thread or an IDFC context.
1.1173 +
1.1174 + @post Kernel is locked.
1.1175 + */
1.1176 +EXPORT_C void NThreadBase::RequestSignal()
1.1177 + {
1.1178 + CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NThreadBase::RequestSignal");
1.1179 + iRequestSemaphore.Signal();
1.1180 + }
1.1181 +
1.1182 +
1.1183 +void exit_sync_fn(TAny* aDfc)
1.1184 + {
1.1185 + ((TDfc*)aDfc)->Enque();
1.1186 + }
1.1187 +
1.1188 +void NThreadBase::Exit()
1.1189 + {
1.1190 + // The current thread is exiting
1.1191 + // Enter with kernel locked, don't return
1.1192 + __NK_ASSERT_DEBUG(this==NCurrentThreadL());
1.1193 +
1.1194 + OnExit();
1.1195 +
1.1196 + TInt threadCS = iCsCount;
1.1197 + TInt kernCS = SubScheduler().iKernLockCount;
1.1198 + iCsCount = 1;
1.1199 + AcqSLock();
1.1200 + iCsFunction = ECSExitInProgress;
1.1201 + NFastMutex* m = NKern::HeldFastMutex();
1.1202 + iHeldFastMutex = 0;
1.1203 + RelSLock();
1.1204 + NKern::Unlock();
1.1205 + __KTRACE_OPT(KSCHED,DEBUGPRINT("Exit %T %u",this,NTickCount()));
1.1206 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nExit, CSC %d HeldFM %M KernCS %d",this,threadCS,iHeldFastMutex,kernCS));
1.1207 + if (kernCS!=1)
1.1208 + FAULT();
1.1209 + if (m)
1.1210 + FAULT();
1.1211 + if (threadCS)
1.1212 + FAULT();
1.1213 + TDfc* pD = NULL;
1.1214 + NThreadExitHandler xh = iHandlers->iExitHandler;
1.1215 + if (xh)
1.1216 + pD = (*xh)((NThread*)this); // call exit handler
1.1217 +
1.1218 + // detach any tied events
1.1219 + DetachTiedEvents();
1.1220 +
1.1221 + NKern::LeaveGroup(); // detach from group if exit handler didn't do it
1.1222 +
1.1223 + NKern::Lock();
1.1224 +#ifdef BTRACE_THREAD_IDENTIFICATION
1.1225 + BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadDestroy,this);
1.1226 +#endif
1.1227 + __NK_ASSERT_ALWAYS(iCsFunction == ECSExitInProgress);
1.1228 + iWaitState.SetDead(pD); // doesn't return
1.1229 + FAULT();
1.1230 + }
1.1231 +
1.1232 +/** Kills a nanokernel thread.
1.1233 +
1.1234 + For use by RTOS personality layers.
1.1235 + Do not use this function directly on a Symbian OS thread.
1.1236 +
1.1237 + When acting on the calling thread, causes the calling thread to exit.
1.1238 +
1.1239 + When acting on another thread, causes that thread to exit unless it is
1.1240 + currently in a critical section. In this case the thread is marked as
1.1241 + "exit pending" and will exit as soon as it leaves the critical section.
1.1242 +
1.1243 + In either case the exiting thread first invokes its exit handler (if it
1.1244 + exists). The handler runs with preemption enabled and with the thread in a
1.1245 + critical section so that it may not be suspended or killed again. The
1.1246 + handler may return a pointer to a TDfc, which will be enqueued just before
1.1247 + the thread finally terminates (after the kernel has been relocked). This DFC
1.1248 + will therefore execute once the NThread has been safely removed from the
1.1249 + scheduler and is intended to be used to cleanup the NThread object and any
1.1250 + associated personality layer resources.
1.1251 +
1.1252 + @pre Kernel must be locked.
1.1253 + @pre Call in a thread context.
1.1254 + @pre If acting on calling thread, calling thread must not be in a
1.1255 + critical section; if it is the kernel will fault. Also, the kernel
1.1256 + must be locked exactly once (iKernCSLocked = 1).
1.1257 +
1.1258 + @post Kernel is locked, if not acting on calling thread.
1.1259 + @post Does not return if it acts on the calling thread.
1.1260 + */
1.1261 +EXPORT_C void NThreadBase::Kill()
1.1262 + {
1.1263 + // Kill a thread
1.1264 + // Enter with kernel locked
1.1265 + // Exit with kernel locked if not current thread, otherwise does not return
1.1266 + CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED_ONCE|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::Kill");
1.1267 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nKill",this));
1.1268 + OnKill(); // platform-specific hook
1.1269 + NThreadBase* pC = NCurrentThreadL();
1.1270 + if (this==pC)
1.1271 + {
1.1272 + if (iCsFunction==ECSExitInProgress)
1.1273 + FAULT();
1.1274 + Exit(); // this will not return
1.1275 + }
1.1276 + SuspendOrKill(-1);
1.1277 + }
1.1278 +
1.1279 +
1.1280 +/** Change the CPU affinity of a thread
1.1281 +
1.1282 + @pre Kernel must be locked.
1.1283 + @pre Call in a thread context.
1.1284 +
1.1285 + @param The number of the CPU to which this thread should be locked, or
1.1286 + KCpuAny if it should be able to run on any CPU.
1.1287 + @return The previous affinity mask.
1.1288 +*/
1.1289 +TUint32 NThreadBase::SetCpuAffinity(TUint32 aAffinity)
1.1290 + {
1.1291 + // check aAffinity is valid
1.1292 + AcqSLock();
1.1293 + TUint32 old_aff = iParent->iCpuAffinity;
1.1294 + TBool migrate = FALSE;
1.1295 + TBool make_ready = FALSE;
1.1296 + TSubScheduler* ss0 = &SubScheduler();
1.1297 + TSubScheduler* ss = 0;
1.1298 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetCpu %08x->%08x, F:%d R:%02x PR:%02x",this,iParent->iCpuAffinity,aAffinity,iParent->iFreezeCpu,iReady,iParent->iReady));
1.1299 + if (i_NThread_Initial)
1.1300 + goto done; // can't change affinity of initial thread
1.1301 + iParent->iCpuAffinity = aAffinity; // set new affinity, might not take effect yet
1.1302 + if (!iParent->iReady)
1.1303 + goto done; // thread/group not currently on a ready list so can just change affinity
1.1304 + migrate = !CheckCpuAgainstAffinity(iParent->iReady & EReadyCpuMask, aAffinity); // TRUE if thread's current CPU is incompatible with the new affinity
1.1305 + if (!migrate)
1.1306 + goto done; // don't need to move thread, so just change affinity
1.1307 + ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
1.1308 + ss->iReadyListLock.LockOnly();
1.1309 + if (iParent->iCurrent)
1.1310 + {
1.1311 + iParent->iCpuChange = TRUE; // mark CPU change pending
1.1312 + if (ss == ss0)
1.1313 + RescheduleNeeded();
1.1314 + else
1.1315 + // kick other CPU now so migration happens before acquisition of fast mutex
1.1316 + send_resched_ipi_and_wait(iParent->iReady & EReadyCpuMask);
1.1317 + }
1.1318 + else
1.1319 + {
1.1320 + // Note: Need to know here if any thread in group would return TRUE from CheckFastMutexDefer()
1.1321 + // This is handled by the scheduler - when a thread belonging to a group is context switched
1.1322 + // out while holding a fast mutex its iFastMutexDefer is set to 1 and the group's iFreezeCpu
1.1323 + // is incremented.
1.1324 + if (iParent->iFreezeCpu || (iParent==this && CheckFastMutexDefer()))
1.1325 + iParent->iCpuChange = TRUE; // CPU frozen or fast mutex held so just mark deferred CPU migration
1.1326 + else
1.1327 + {
1.1328 + ss->Remove(iParent);
1.1329 + iParent->iReady = 0;
1.1330 + make_ready = TRUE;
1.1331 + }
1.1332 + }
1.1333 + ss->iReadyListLock.UnlockOnly();
1.1334 + if (make_ready)
1.1335 + iParent->ReadyT(0);
1.1336 +done:
1.1337 + RelSLock();
1.1338 + return old_aff;
1.1339 + }
1.1340 +
1.1341 +
1.1342 +/******************************************************************************
1.1343 + * Thread wait state
1.1344 + ******************************************************************************/
1.1345 +#ifndef __NTHREAD_WAITSTATE_MACHINE_CODED__
1.1346 +void NThreadWaitState::SetUpWait(TUint aType, TUint aFlags, TAny* aWaitObj)
1.1347 + {
1.1348 + SetUpWait(aType, aFlags, aWaitObj, 0);
1.1349 + }
1.1350 +
1.1351 +void NThreadWaitState::SetUpWait(TUint aType, TUint aFlags, TAny* aWaitObj, TUint32 aTimeout)
1.1352 + {
1.1353 + aFlags &= EWtStObstructed;
1.1354 + aFlags |= EWtStWaitPending;
1.1355 + aType &= 0xff;
1.1356 + TUint64 ws64 = (TUint32)aWaitObj;
1.1357 + ws64 <<= 32;
1.1358 + ws64 |= ((aType<<8)|aFlags);
1.1359 + TUint64 oldws64 = __e32_atomic_swp_rlx64(&iWtSt64, ws64);
1.1360 + if (I64LOW(oldws64)!=0)
1.1361 + __crash(); // ??we were already waiting for something else??
1.1362 + iTimer.iTriggerTime = aTimeout;
1.1363 + }
1.1364 +
1.1365 +void NThreadWaitState::CancelWait()
1.1366 + {
1.1367 + TUint64 oldws64 = __e32_atomic_swp_rlx64(&iWtSt64, 0);
1.1368 + if (oldws64 & (EWtStDead|EWtStWaitActive))
1.1369 + __crash();
1.1370 + }
1.1371 +
1.1372 +TInt NThreadWaitState::DoWait()
1.1373 + {
1.1374 + TUint64 oldws64 = iWtSt64;
1.1375 + TUint64 ws64;
1.1376 + TUint32 timeout = iTimer.iTriggerTime;
1.1377 + TUint32 set = timeout ? (EWtStWaitActive|EWtStTimeout) : EWtStWaitActive;
1.1378 + do {
1.1379 + TUint32 ws32 = I64LOW(oldws64);
1.1380 + if (ws32 & EWtStDead)
1.1381 + return KErrDied;
1.1382 + if (!(ws32 & EWtStWaitPending))
1.1383 + return KErrGeneral;
1.1384 + ws64 = oldws64;
1.1385 + ws64 &= ~TUint64(EWtStWaitPending);
1.1386 + ws64 |= TUint64(set);
1.1387 + } while(!__e32_atomic_cas_rlx64(&iWtSt64, &oldws64, ws64));
1.1388 + if (timeout)
1.1389 + {
1.1390 + if (iTimer.OneShot(timeout, TRUE)!=KErrNone)
1.1391 + __crash();
1.1392 + ++iTimer.iNTimerSpare1;
1.1393 + }
1.1394 + return TUint32(oldws64)>>8;
1.1395 + }
1.1396 +
1.1397 +TInt NThreadWaitState::UnBlockT(TUint aType, TAny* aWaitObj, TInt aReturnValue)
1.1398 + {
1.1399 + TUint64 exp = TUint32(aWaitObj);
1.1400 + exp <<= 32;
1.1401 + exp |= (aType<<8);
1.1402 + TUint64 oldws64 = iWtSt64;
1.1403 + TUint64 ws64;
1.1404 + do {
1.1405 + if ((oldws64 ^ exp) < TUint64(EWtStDead))
1.1406 + ws64 = TUint64(TUint32(aReturnValue))<<32;
1.1407 + else
1.1408 + ws64 = oldws64;
1.1409 + } while(!__e32_atomic_cas_rel64(&iWtSt64, &oldws64, ws64));
1.1410 + if ((oldws64 ^ exp) >= TUint64(EWtStDead))
1.1411 + return KErrGeneral; // not unblocked - no matching wait
1.1412 + if (oldws64 & EWtStTimeout)
1.1413 + CancelTimerT();
1.1414 + if (oldws64 & EWtStWaitActive)
1.1415 + {
1.1416 + NThreadBase* t = Thread();
1.1417 + if (!t->iPauseCount && !t->iSuspended)
1.1418 + t->ReadyT(0);
1.1419 + }
1.1420 + return KErrNone;
1.1421 + }
1.1422 +
1.1423 +TUint32 NThreadWaitState::ReleaseT(TAny*& aWaitObj, TInt aReturnValue)
1.1424 + {
1.1425 + TUint64 leave = EWtStDead;
1.1426 + TUint64 set = TUint64(TUint32(aReturnValue))<<32;
1.1427 + TUint64 ws64 = __e32_atomic_axo_ord64(&iWtSt64, leave, set);
1.1428 + aWaitObj = (TAny*)I64HIGH(ws64);
1.1429 + TUint32 ws32 = I64LOW(ws64);
1.1430 + if (ws32 & EWtStTimeout)
1.1431 + CancelTimerT();
1.1432 + return ws32;
1.1433 + }
1.1434 +#endif
1.1435 +
1.1436 +void NThreadWaitState::SetDead(TDfc* aKillDfc)
1.1437 + {
1.1438 + TDfc syncDfc(&exit_sync_fn, aKillDfc, TheTimerQ.iDfc.iDfcQ, 0);
1.1439 + NThreadBase* t = Thread();
1.1440 + t->AcqSLock();
1.1441 + iWtC.iWtStFlags = NThreadWaitState::EWtStDead;
1.1442 + iWtC.iWtObjType = NThreadBase::EWaitNone;
1.1443 + CancelTimerT();
1.1444 + if (aKillDfc && iTimer.iNTimerSpare1)
1.1445 + {
1.1446 + // There is an outstanding timer expiry handler still running
1.1447 + // so we must synchronise with DfcThread1.
1.1448 + // Add a priority 0 DFC to DfcThread1 so this thread's exit DFC can
1.1449 + // only run after the timer expiry handler has completed.
1.1450 + aKillDfc = &syncDfc;
1.1451 + }
1.1452 + iWtC.iKillDfc = aKillDfc;
1.1453 + RescheduleNeeded();
1.1454 + t->RelSLock();
1.1455 + NKern::Unlock(); // this won't return
1.1456 + }
1.1457 +
1.1458 +void NThreadWaitState::CancelTimerT()
1.1459 + {
1.1460 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nCancelTimerT ",Thread()));
1.1461 + if (iTimer.Cancel())
1.1462 + --iTimer.iNTimerSpare1;
1.1463 + else
1.1464 + {
1.1465 + // Potential race condition - timer must have completed but expiry
1.1466 + // handler has not yet run. Signal to the handler that it should do
1.1467 + // nothing by flipping the bottom bit of iTimer.iPtr
1.1468 + // This condition cannot possibly recur until the expiry handler has
1.1469 + // run since all expiry handlers run in DfcThread1.
1.1470 + volatile TLinAddr& x = *(volatile TLinAddr*)&iTimer.iPtr;
1.1471 + x ^= 1;
1.1472 + }
1.1473 + }
1.1474 +
1.1475 +// Timeout handler, called in DfcThread1
1.1476 +// NOTE: aPtr is sampled with the timer queue locked, so if Cancel() on the timer fails
1.1477 +// and iTimer.iPtr is then changed, aPtr here will differ from iTimer.iPtr.
1.1478 +// This fact is used here to detect expiry of cancelled timers.
1.1479 +void NThreadWaitState::TimerExpired(TAny* aPtr)
1.1480 + {
1.1481 + TLinAddr cookie = (TLinAddr)aPtr;
1.1482 + NThreadWaitState* pW = (NThreadWaitState*)(cookie &~ 3);
1.1483 + NThread* pT = (NThread*)pW->Thread();
1.1484 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T nTmExp",pT));
1.1485 + NThreadTimeoutHandler th = pT->iHandlers->iTimeoutHandler;
1.1486 + pT->LAcqSLock();
1.1487 + TUint flags = pW->iWtSt32[0];
1.1488 + if (!(flags & EWtStWaitActive) || ((flags>>8)&0xff)!=NThreadBase::EWaitBlocked)
1.1489 + th = 0;
1.1490 + if (th)
1.1491 + {
1.1492 + // Use higher level timeout handler
1.1493 + pT->RelSLockU();
1.1494 + (*th)(pT, NThreadBase::ETimeoutPreamble);
1.1495 + TInt param = NThreadBase::ETimeoutPostamble;
1.1496 + pT->LAcqSLock();
1.1497 + TLinAddr current_cookie = *(volatile TLinAddr*)&pW->iTimer.iPtr;
1.1498 + if ((cookie ^ current_cookie) & 1)
1.1499 + {
1.1500 + // The timer was cancelled just after expiring but before this function
1.1501 + // managed to acquire the thread spin lock, so it's spurious
1.1502 + param = NThreadBase::ETimeoutSpurious;
1.1503 + }
1.1504 + pT->RelSLockU();
1.1505 + (*th)(pT, param);
1.1506 + pT->LAcqSLock();
1.1507 + --pW->iTimer.iNTimerSpare1; // note timer has expired
1.1508 + pT->RelSLockU();
1.1509 + return;
1.1510 + }
1.1511 + TLinAddr current_cookie = *(volatile TLinAddr*)&pW->iTimer.iPtr;
1.1512 + if ((cookie ^ current_cookie) & 1)
1.1513 + // The timer was cancelled just after expiring but before this function
1.1514 + // managed to acquire the thread spin lock, so just return without doing anything.
1.1515 + goto done;
1.1516 + pT->DoReleaseT(KErrTimedOut,0);
1.1517 +done:
1.1518 + pT->RelSLockU();
1.1519 + }
1.1520 +
1.1521 +
1.1522 +
1.1523 +/******************************************************************************
1.1524 + * NKern:: static functions
1.1525 + ******************************************************************************/
1.1526 +
1.1527 +/** Suspends the execution of a thread.
1.1528 +
1.1529 + This function is intended to be used by the EPOC layer and personality layers.
1.1530 + Do not use this function directly on a Symbian OS thread - use Kern::ThreadSuspend().
1.1531 +
1.1532 + If the thread is in a critical section or holds a fast mutex, the suspension will
1.1533 + be deferred until the thread leaves the critical section or signals the fast mutex.
1.1534 + Otherwise the thread will be suspended with immediate effect. If the thread it's
1.1535 + running, the execution of the thread will be suspended and a reschedule will occur.
1.1536 +
1.1537 + @param aThread Thread to be suspended.
1.1538 + @param aCount Number of times to suspend this thread.
1.1539 +
1.1540 + @return TRUE, if the thread had changed the state from non-suspended to suspended;
1.1541 + FALSE, otherwise.
1.1542 +
1.1543 + @see Kern::ThreadSuspend()
1.1544 +*/
1.1545 +EXPORT_C TBool NKern::ThreadSuspend(NThread* aThread, TInt aCount)
1.1546 + {
1.1547 + NKern::Lock();
1.1548 + TBool r=aThread->Suspend(aCount);
1.1549 + NKern::Unlock();
1.1550 + return r;
1.1551 + }
1.1552 +
1.1553 +
1.1554 +/** Resumes the execution of a thread.
1.1555 +
1.1556 + This function is intended to be used by the EPOC layer and personality layers.
1.1557 + Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
1.1558 +
1.1559 + This function resumes the thread once. If the thread was suspended more than once
1.1560 + the thread will remain suspended.
1.1561 + If the thread is in a critical section, this function will decrease the number of
1.1562 + deferred suspensions.
1.1563 +
1.1564 + @param aThread Thread to be resumed.
1.1565 +
1.1566 + @return TRUE, if the thread had changed the state from suspended to non-suspended;
1.1567 + FALSE, otherwise.
1.1568 +
1.1569 + @see Kern::ThreadResume()
1.1570 +*/
1.1571 +EXPORT_C TBool NKern::ThreadResume(NThread* aThread)
1.1572 + {
1.1573 + NKern::Lock();
1.1574 + TBool r=aThread->Resume();
1.1575 + NKern::Unlock();
1.1576 + return r;
1.1577 + }
1.1578 +
1.1579 +
1.1580 +/** Resumes the execution of a thread and signals a mutex.
1.1581 +
1.1582 + This function is intended to be used by the EPOC layer and personality layers.
1.1583 + Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
1.1584 +
1.1585 + This function resumes the thread once. If the thread was suspended more than once
1.1586 + the thread will remain suspended.
1.1587 + If the thread is in a critical section, this function will decrease the number of
1.1588 + deferred suspensions.
1.1589 +
1.1590 + @param aThread Thread to be resumed.
1.1591 + @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
1.1592 +
1.1593 + @return TRUE, if the thread had changed the state from suspended to non-suspended;
1.1594 + FALSE, otherwise.
1.1595 +
1.1596 + @see Kern::ThreadResume()
1.1597 +*/
1.1598 +EXPORT_C TBool NKern::ThreadResume(NThread* aThread, NFastMutex* aMutex)
1.1599 + {
1.1600 + if (!aMutex)
1.1601 + aMutex=&TheScheduler.iLock;
1.1602 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NRsm + FM %M",aThread,aMutex));
1.1603 + NKern::Lock();
1.1604 + TBool r=aThread->Resume();
1.1605 + aMutex->Signal();
1.1606 + NKern::Unlock();
1.1607 + return r;
1.1608 + }
1.1609 +
1.1610 +
1.1611 +/** Forces the execution of a thread to be resumed.
1.1612 +
1.1613 + This function is intended to be used by the EPOC layer and personality layers.
1.1614 + Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
1.1615 +
1.1616 + This function cancels all suspensions on a thread.
1.1617 +
1.1618 + @param aThread Thread to be resumed.
1.1619 +
1.1620 + @return TRUE, if the thread had changed the state from suspended to non-suspended;
1.1621 + FALSE, otherwise.
1.1622 +
1.1623 + @see Kern::ThreadResume()
1.1624 +*/
1.1625 +EXPORT_C TBool NKern::ThreadForceResume(NThread* aThread)
1.1626 + {
1.1627 + NKern::Lock();
1.1628 + TBool r=aThread->ForceResume();
1.1629 + NKern::Unlock();
1.1630 + return r;
1.1631 + }
1.1632 +
1.1633 +
1.1634 +/** Forces the execution of a thread to be resumed and signals a mutex.
1.1635 +
1.1636 + This function is intended to be used by the EPOC layer and personality layers.
1.1637 + Do not use this function directly on a Symbian OS thread - use Kern::ThreadResume().
1.1638 +
1.1639 + This function cancels all suspensions on a thread.
1.1640 +
1.1641 + @param aThread Thread to be resumed.
1.1642 + @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
1.1643 +
1.1644 + @return TRUE, if the thread had changed the state from suspended to non-suspended;
1.1645 + FALSE, otherwise.
1.1646 +
1.1647 + @see Kern::ThreadResume()
1.1648 +*/
1.1649 +EXPORT_C TBool NKern::ThreadForceResume(NThread* aThread, NFastMutex* aMutex)
1.1650 + {
1.1651 + if (!aMutex)
1.1652 + aMutex=&TheScheduler.iLock;
1.1653 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NFRsm + FM %M",aThread,aMutex));
1.1654 + NKern::Lock();
1.1655 + TBool r=aThread->ForceResume();
1.1656 + aMutex->Signal();
1.1657 + NKern::Unlock();
1.1658 + return r;
1.1659 + }
1.1660 +
1.1661 +
1.1662 +/** Awakens a nanothread.
1.1663 +
1.1664 + This function is used to implement synchronisation primitives in the EPOC
1.1665 + kernel (e.g. DMutex and DSemaphore) and in personality layers. It is not
1.1666 + intended to be used directly by device drivers.
1.1667 +
1.1668 + If the nanothread is waiting on a fast semaphore, waiting for a DFC, or is
1.1669 + blocked in a call to NKern::Block, it is awakened and put back on the ready
1.1670 + list. Otherwise, the thread state is unchanged. In particular, nothing
1.1671 + happens if the nanothread has been explicitly suspended.
1.1672 +
1.1673 + @param aThread Thread to release.
1.1674 + @param aReturnValue Value returned by NKern::Block if the thread was blocked.
1.1675 +
1.1676 + @see NKern::Block()
1.1677 +
1.1678 + @pre Interrupts must be enabled.
1.1679 + @pre Do not call from an ISR
1.1680 + */
1.1681 +EXPORT_C void NKern::ThreadRelease(NThread* aThread, TInt aReturnValue)
1.1682 + {
1.1683 + CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR,"NKern::ThreadRelease(NThread*, TInt)");
1.1684 + NKern::Lock();
1.1685 + aThread->Release(aReturnValue,0);
1.1686 + NKern::Unlock();
1.1687 + }
1.1688 +
1.1689 +
1.1690 +/** Atomically awakens a nanothread and signals a fast mutex.
1.1691 +
1.1692 + This function is used to implement synchronisation primitives in the EPOC
1.1693 + kernel (e.g. DMutex and DSemaphore) and in personality layers. It is not
1.1694 + intended to be used directly by device drivers.
1.1695 +
1.1696 + @param aThread Thread to release.
1.1697 + @param aReturnValue Value returned by NKern::Block if the thread was blocked.
1.1698 + @param aMutex Fast mutex to signal. If NULL, the system lock is signalled.
1.1699 +
1.1700 + @see NKern::ThreadRelease(NThread*, TInt)
1.1701 + @see NKern::Block()
1.1702 +
1.1703 + @pre Call in a thread context.
1.1704 + @pre Interrupts must be enabled.
1.1705 + @pre Kernel must be unlocked.
1.1706 + @pre Specified mutex must be held
1.1707 + */
1.1708 +EXPORT_C void NKern::ThreadRelease(NThread* aThread, TInt aReturnValue, NFastMutex* aMutex)
1.1709 + {
1.1710 + CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadRelease(NThread*,TInt,NFastMutex*)");
1.1711 + if (!aMutex)
1.1712 + aMutex=&TheScheduler.iLock;
1.1713 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NRel ret %d + FM %M",aThread,aReturnValue,aMutex));
1.1714 + NKern::Lock();
1.1715 + aThread->Release(aReturnValue,0);
1.1716 + aMutex->Signal();
1.1717 + NKern::Unlock();
1.1718 + }
1.1719 +
1.1720 +
1.1721 +/** Changes the priority of a thread.
1.1722 +
1.1723 + This function is intended to be used by the EPOC layer and personality layers.
1.1724 + Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority().
1.1725 +
1.1726 + @param aThread Thread to receive the new priority.
1.1727 + @param aPriority New priority for aThread.
1.1728 +
1.1729 + @see Kern::SetThreadPriority()
1.1730 +*/
1.1731 +EXPORT_C void NKern::ThreadSetPriority(NThread* aThread, TInt aPriority)
1.1732 + {
1.1733 + NKern::Lock();
1.1734 + aThread->SetPriority(aPriority);
1.1735 + NKern::Unlock();
1.1736 + }
1.1737 +
1.1738 +
1.1739 +/** Changes the priority of a thread and signals a mutex.
1.1740 +
1.1741 + This function is intended to be used by the EPOC layer and personality layers.
1.1742 + Do not use this function directly on a Symbian OS thread - use Kern::ThreadSetPriority().
1.1743 +
1.1744 + @param aThread Thread to receive the new priority.
1.1745 + @param aPriority New priority for aThread.
1.1746 + @param aMutex Mutex to be signalled. If NULL, the scheduler's mutex will be signalled.
1.1747 +
1.1748 + @see Kern::SetThreadPriority()
1.1749 +*/
1.1750 +EXPORT_C void NKern::ThreadSetPriority(NThread* aThread, TInt aPriority, NFastMutex* aMutex)
1.1751 + {
1.1752 + if (!aMutex)
1.1753 + aMutex=&TheScheduler.iLock;
1.1754 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NSPri->%d + FM %M",aThread,aPriority,aMutex));
1.1755 + NKern::Lock();
1.1756 + aThread->SetPriority(aPriority);
1.1757 + aMutex->Signal();
1.1758 + NKern::Unlock();
1.1759 + }
1.1760 +
1.1761 +
1.1762 +/** Atomically signals the request semaphore of a nanothread and a fast mutex.
1.1763 +
1.1764 + This function is intended to be used by the EPOC layer and personality
1.1765 + layers. Device drivers should use Kern::RequestComplete instead.
1.1766 +
1.1767 + @param aThread Nanothread to signal. Must be non NULL.
1.1768 + @param aMutex Fast mutex to signal. If NULL, the system lock is signaled.
1.1769 +
1.1770 + @see Kern::RequestComplete()
1.1771 +
1.1772 + @pre Call in a thread context.
1.1773 + @pre Interrupts must be enabled.
1.1774 + @pre Kernel must be unlocked.
1.1775 + @pre Specified mutex must be held
1.1776 + */
1.1777 +EXPORT_C void NKern::ThreadRequestSignal(NThread* aThread, NFastMutex* aMutex)
1.1778 + {
1.1779 + CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadRequestSignal(NThread*,NFastMutex*)");
1.1780 + if (!aMutex)
1.1781 + aMutex = &TheScheduler.iLock;
1.1782 + NKern::Lock();
1.1783 + aThread->iRequestSemaphore.Signal();
1.1784 + aMutex->Signal();
1.1785 + NKern::Unlock();
1.1786 + }
1.1787 +
1.1788 +
1.1789 +/** Kills a nanothread.
1.1790 +
1.1791 + This function is intended to be used by the EPOC layer and personality layers.
1.1792 + Do not use this function directly on a Symbian OS thread - use Kern::ThreadKill().
1.1793 +
1.1794 + This function does not return if the current thread is killed.
1.1795 + This function is asynchronous (i.e. the thread to kill may still be alive when the call returns).
1.1796 +
1.1797 + @param aThread Thread to kill. Must be non NULL.
1.1798 +
1.1799 + @pre If acting on calling thread, calling thread must not be in a
1.1800 + critical section
1.1801 + @pre Thread must not already be exiting.
1.1802 +
1.1803 + @see Kern::ThreadKill()
1.1804 + */
1.1805 +EXPORT_C void NKern::ThreadKill(NThread* aThread)
1.1806 + {
1.1807 + NKern::Lock();
1.1808 + aThread->Kill();
1.1809 + NKern::Unlock();
1.1810 + }
1.1811 +
1.1812 +
1.1813 +/** Atomically kills a nanothread and signals a fast mutex.
1.1814 +
1.1815 + This function is intended to be used by the EPOC layer and personality layers.
1.1816 + Do not use this function directly on a Symbian OS thread - use Kern::ThreadKill().
1.1817 +
1.1818 + @param aThread Thread to kill. Must be non NULL.
1.1819 + @param aMutex Fast mutex to signal. If NULL, the system lock is signalled.
1.1820 +
1.1821 + @pre If acting on calling thread, calling thread must not be in a
1.1822 + critical section
1.1823 + @pre Thread must not already be exiting.
1.1824 +
1.1825 + @see NKern::ThreadKill(NThread*)
1.1826 + */
1.1827 +EXPORT_C void NKern::ThreadKill(NThread* aThread, NFastMutex* aMutex)
1.1828 + {
1.1829 + if (!aMutex)
1.1830 + aMutex = &TheScheduler.iLock;
1.1831 + NThreadBase* pC = NKern::LockC();
1.1832 + if (aThread==pC)
1.1833 + {
1.1834 + __NK_ASSERT_DEBUG(pC->iCsCount==0); // Make sure thread isn't in critical section
1.1835 + __NK_ASSERT_ALWAYS(aMutex->HeldByCurrentThread());
1.1836 + pC->AcqSLock();
1.1837 + aThread->iCsFunction = NThreadBase::ECSExitPending;
1.1838 + pC->RelSLock();
1.1839 + aMutex->iHoldingThread = (NThreadBase*)(TLinAddr(aThread) | 1);
1.1840 + aMutex->Signal(); // this will make us exit
1.1841 + FAULT(); // should never get here
1.1842 + }
1.1843 + else
1.1844 + {
1.1845 + aThread->Kill();
1.1846 + aMutex->Signal();
1.1847 + }
1.1848 + NKern::Unlock();
1.1849 + }
1.1850 +
1.1851 +
1.1852 +/** Enters thread critical section.
1.1853 +
1.1854 + This function can safely be used in device drivers.
1.1855 +
1.1856 + The current thread will enter its critical section. While in critical section
1.1857 + the thread cannot be suspended or killed. Any suspension or kill will be deferred
1.1858 + until the thread leaves the critical section.
1.1859 + Some API explicitly require threads to be in critical section before calling that
1.1860 + API.
1.1861 + Only User threads need to call this function as the concept of thread critical
1.1862 + section applies to User threads only.
1.1863 +
1.1864 + @pre Call in a thread context.
1.1865 + @pre Kernel must be unlocked.
1.1866 +*/
1.1867 +EXPORT_C void NKern::ThreadEnterCS()
1.1868 + {
1.1869 + CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadEnterCS");
1.1870 + NThreadBase* pC = NKern::CurrentThread();
1.1871 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NEntCS",pC));
1.1872 + __NK_ASSERT_DEBUG(pC->iCsCount>=0);
1.1873 + ++pC->iCsCount;
1.1874 + }
1.1875 +
1.1876 +NThread* NKern::_ThreadEnterCS()
1.1877 + {
1.1878 + NThreadBase* pC = NKern::CurrentThread();
1.1879 + __NK_ASSERT_DEBUG(pC->iCsCount>=0);
1.1880 + ++pC->iCsCount;
1.1881 + return (NThread*)pC;
1.1882 + }
1.1883 +
1.1884 +
1.1885 +/** Leaves thread critical section.
1.1886 +
1.1887 + This function can safely be used in device drivers.
1.1888 +
1.1889 + The current thread will leave its critical section. If the thread was suspended/killed
1.1890 + while in critical section, the thread will be suspended/killed after leaving the
1.1891 + critical section by calling this function.
1.1892 + Only User threads need to call this function as the concept of thread critical
1.1893 + section applies to User threads only.
1.1894 +
1.1895 + @pre Call in a thread context.
1.1896 + @pre Kernel must be unlocked.
1.1897 +*/
1.1898 +EXPORT_C void NKern::ThreadLeaveCS()
1.1899 + {
1.1900 + CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadLeaveCS");
1.1901 + NThreadBase* pC = NKern::LockC();
1.1902 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NLvCS",pC));
1.1903 + pC->AcqSLock();
1.1904 + __NK_ASSERT_DEBUG(pC->iCsCount>0);
1.1905 + if (--pC->iCsCount==0 && pC->iCsFunction!=0)
1.1906 + {
1.1907 + NFastMutex* m = HeldFastMutex();
1.1908 + if (m)
1.1909 + m->iHoldingThread = (NThreadBase*)(TLinAddr(pC) | 1);
1.1910 + else
1.1911 + pC->DoCsFunctionT();
1.1912 + }
1.1913 + pC->RelSLock();
1.1914 + NKern::Unlock();
1.1915 + }
1.1916 +
1.1917 +void NKern::_ThreadLeaveCS()
1.1918 + {
1.1919 + NThreadBase* pC = NKern::LockC();
1.1920 + pC->AcqSLock();
1.1921 + __NK_ASSERT_DEBUG(pC->iCsCount>0);
1.1922 + if (--pC->iCsCount==0 && pC->iCsFunction!=0)
1.1923 + {
1.1924 + NFastMutex* m = HeldFastMutex();
1.1925 + if (m)
1.1926 + m->iHoldingThread = (NThreadBase*)(TLinAddr(pC) | 1);
1.1927 + else
1.1928 + pC->DoCsFunctionT();
1.1929 + }
1.1930 + pC->RelSLock();
1.1931 + NKern::Unlock();
1.1932 + }
1.1933 +
1.1934 +/** Freeze the CPU of the current thread
1.1935 +
1.1936 + After this the current thread will not migrate to another processor
1.1937 +
1.1938 + @return A cookie to be passed to NKern::EndFreezeCpu() to allow nesting
1.1939 +*/
1.1940 +EXPORT_C TInt NKern::FreezeCpu()
1.1941 + {
1.1942 + CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::FreezeCpu");
1.1943 + NKern::Lock();
1.1944 + TSubScheduler& ss = SubScheduler();
1.1945 + NThreadBase* pC = ss.iCurrentThread;
1.1946 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NFrzCpu",pC));
1.1947 + if (pC->iFreezeCpu)
1.1948 + {
1.1949 + NKern::Unlock();
1.1950 + return 1;
1.1951 + }
1.1952 + pC->iFreezeCpu = 1;
1.1953 + if (pC->iParent != pC)
1.1954 + {
1.1955 + pC->AcqSLock();
1.1956 + ++pC->iParent->iFreezeCpu;
1.1957 + pC->RelSLock();
1.1958 + }
1.1959 + NKern::Unlock();
1.1960 + return 0;
1.1961 + }
1.1962 +
1.1963 +
1.1964 +/** Unfreeze the current thread's CPU
1.1965 +
1.1966 + After this the current thread will again be eligible to migrate to another processor
1.1967 +
1.1968 + @param aCookie the value returned by NKern::FreezeCpu()
1.1969 +*/
1.1970 +EXPORT_C void NKern::EndFreezeCpu(TInt aCookie)
1.1971 + {
1.1972 + CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::EndFreezeCpu");
1.1973 + __KTRACE_OPT(KNKERN,DEBUGPRINT("%T NEndFrz %d",NKern::CurrentThread(),aCookie));
1.1974 + if (aCookie)
1.1975 + return;
1.1976 + NKern::Lock();
1.1977 + TSubScheduler& ss = SubScheduler();
1.1978 + NThreadBase* pC = ss.iCurrentThread;
1.1979 + if (pC->iFreezeCpu)
1.1980 + {
1.1981 + pC->iFreezeCpu = 0;
1.1982 + mb();
1.1983 + if (pC->iParent != pC)
1.1984 + {
1.1985 + pC->AcqSLock();
1.1986 + if (!--pC->iParent->iFreezeCpu && pC->iParent->iCpuChange)
1.1987 + RescheduleNeeded();
1.1988 + pC->RelSLock();
1.1989 + }
1.1990 + else if (pC->iCpuChange) // deferred CPU change?
1.1991 + RescheduleNeeded();
1.1992 + }
1.1993 + NKern::Unlock();
1.1994 + }
1.1995 +
1.1996 +
1.1997 +/** Change the CPU affinity of a thread
1.1998 +
1.1999 + @pre Call in a thread context.
1.2000 +
1.2001 + @param The new CPU affinity mask
1.2002 + @return The old affinity mask
1.2003 + */
1.2004 +EXPORT_C TUint32 NKern::ThreadSetCpuAffinity(NThread* aThread, TUint32 aAffinity)
1.2005 + {
1.2006 + NKern::Lock();
1.2007 + TUint32 r = aThread->SetCpuAffinity(aAffinity);
1.2008 + NKern::Unlock();
1.2009 + return r;
1.2010 + }
1.2011 +
1.2012 +
1.2013 +/** Modify a thread's timeslice
1.2014 +
1.2015 + @pre Call in a thread context.
1.2016 +
1.2017 + @param aTimeslice The new timeslice value
1.2018 + */
1.2019 +EXPORT_C void NKern::ThreadSetTimeslice(NThread* aThread, TInt aTimeslice)
1.2020 + {
1.2021 + NKern::Lock();
1.2022 + aThread->AcqSLock();
1.2023 + if (aThread->iTimeslice == aThread->iTime || aTimeslice<0)
1.2024 + aThread->iTime = aTimeslice;
1.2025 + aThread->iTimeslice = aTimeslice;
1.2026 + aThread->RelSLock();
1.2027 + NKern::Unlock();
1.2028 + }
1.2029 +
1.2030 +
1.2031 +/** Blocks current nanothread.
1.2032 +
1.2033 + This function is used to implement synchronisation primitives in the EPOC
1.2034 + layer and in personality layers. It is not intended to be used directly by
1.2035 + device drivers.
1.2036 +
1.2037 + @param aTimeout If greater than 0, the nanothread will be blocked for at most
1.2038 + aTimeout microseconds.
1.2039 + @param aMode Bitmask whose possible values are documented in TBlockMode.
1.2040 + @param aMutex Fast mutex to operate on. If NULL, the system lock is used.
1.2041 +
1.2042 + @see NKern::ThreadRelease()
1.2043 + @see TBlockMode
1.2044 +
1.2045 + @pre Call in a thread context.
1.2046 + @pre Interrupts must be enabled.
1.2047 + @pre Kernel must be unlocked.
1.2048 + @pre Specified mutex must be held
1.2049 + */
1.2050 +EXPORT_C TInt NKern::Block(TUint32 aTimeout, TUint aMode, NFastMutex* aMutex)
1.2051 + {
1.2052 + CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::Block(TUint32,TUint,NFastMutex*)");
1.2053 + if (!aMutex)
1.2054 + aMutex = &TheScheduler.iLock;
1.2055 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Block time %d mode %d FM %M",aTimeout,aMode,aMutex));
1.2056 + if (aMode & EEnterCS)
1.2057 + NKern::_ThreadEnterCS(); // NOTE: MUST DO THIS BEFORE CALLING NKern::Lock()
1.2058 + NThreadBase* pC = NKern::LockC();
1.2059 + TUint flags = (aMode & NKern::EObstruct) ? NThreadWaitState::EWtStObstructed : 0;
1.2060 + pC->iWaitState.SetUpWait(NThreadBase::EWaitBlocked, flags, 0, aTimeout);
1.2061 + if (aMode & ERelease)
1.2062 + aMutex->Signal();
1.2063 + RescheduleNeeded();
1.2064 + NKern::Unlock(); // thread blocks here
1.2065 + TInt r = pC->iWaitState.iWtC.iRetVal; // sample here since it will be overwritten if we block on the fast mutex
1.2066 + if (aMode & EClaim)
1.2067 + FMWait(aMutex);
1.2068 + return r;
1.2069 + }
1.2070 +
1.2071 +
1.2072 +/**
1.2073 +@pre Call in a thread context.
1.2074 +@pre Interrupts must be enabled.
1.2075 +@pre Kernel must be unlocked.
1.2076 +@pre No fast mutex can be held
1.2077 +*/
1.2078 +/** @see NKern::Block(TUint32, TUint, NFastMutex*) */
1.2079 +EXPORT_C TInt NKern::Block(TUint32 aTimeout, TUint aMode)
1.2080 + {
1.2081 + CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::Block(TUint32,TUint)");
1.2082 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::Block time %d mode %d",aTimeout,aMode));
1.2083 + if (aMode & EEnterCS)
1.2084 + NKern::_ThreadEnterCS(); // NOTE: MUST DO THIS BEFORE CALLING NKern::Lock()
1.2085 + NThreadBase* pC = NKern::LockC();
1.2086 + TUint flags = (aMode & NKern::EObstruct) ? NThreadWaitState::EWtStObstructed : 0;
1.2087 + pC->iWaitState.SetUpWait(NThreadBase::EWaitBlocked, flags, 0, aTimeout);
1.2088 + RescheduleNeeded();
1.2089 + NKern::Unlock(); // thread blocks here
1.2090 + return pC->iWaitState.iWtC.iRetVal;
1.2091 + }
1.2092 +
1.2093 +
1.2094 +
1.2095 +
1.2096 +/**
1.2097 +Places the current nanothread into a wait state on an externally
1.2098 +defined wait object.
1.2099 +
1.2100 +For use by RTOS personality layers.
1.2101 +Do not use this function directly on a Symbian OS thread.
1.2102 +
1.2103 +Since the kernel is locked on entry, any reschedule will be deferred until
1.2104 +it is unlocked. The thread should be added to any necessary wait queue after
1.2105 +a call to this function, since this function removes it from the ready list.
1.2106 +The thread's wait timer is started if aTimeout is nonzero.
1.2107 +The thread's NState and wait object are updated.
1.2108 +
1.2109 +Call NThreadBase::Release() when the wait condition is resolved.
1.2110 +
1.2111 +@param aTimeout The maximum time for which the thread should block, in nanokernel timer ticks.
1.2112 + A zero value means wait forever.
1.2113 + If the thread is still blocked when the timeout expires,
1.2114 + then the timeout state handler will be called.
1.2115 +@param aState The nanokernel thread state (N-State) value to be set.
1.2116 + This state corresponds to the externally defined wait object.
1.2117 + This value will be written into the member NThreadBase::iNState.
1.2118 +@param aWaitObj A pointer to an externally defined wait object.
1.2119 + This value will be written into the member NThreadBase::iWaitObj.
1.2120 +
1.2121 +@pre Kernel must be locked.
1.2122 +@pre Call in a thread context.
1.2123 +
1.2124 +@post Kernel is locked.
1.2125 +
1.2126 +@see NThreadBase::Release()
1.2127 +*/
1.2128 +EXPORT_C void NKern::NanoBlock(TUint32 aTimeout, TUint aState, TAny* aWaitObj)
1.2129 + {
1.2130 + CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::NanoBlock");
1.2131 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NanoBlock time %d state %d obj %08x", aTimeout, aState, aWaitObj));
1.2132 + NThreadBase* pC = NCurrentThreadL();
1.2133 + pC->iWaitState.SetUpWait(aState, aState>>8, aWaitObj, aTimeout);
1.2134 + RescheduleNeeded();
1.2135 + }
1.2136 +
1.2137 +
1.2138 +
1.2139 +
1.2140 +EXPORT_C void NKern::Sleep(TUint32 aTime)
1.2141 +/**
1.2142 +Puts the current nanothread to sleep for the specified duration.
1.2143 +
1.2144 +It can be called from Symbian OS threads.
1.2145 +
1.2146 +@param aTime sleep time in nanokernel timer ticks.
1.2147 +
1.2148 +@pre No fast mutex can be held.
1.2149 +@pre Kernel must be unlocked.
1.2150 +@pre Call in a thread context.
1.2151 +@pre Interrupts must be enabled.
1.2152 +*/
1.2153 + {
1.2154 + CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::Sleep");
1.2155 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NSlp %d",aTime));
1.2156 + NThreadBase* pC = NKern::LockC();
1.2157 + pC->iWaitState.SetUpWait(NThreadBase::EWaitSleep, 0, 0, aTime);
1.2158 + RescheduleNeeded();
1.2159 + NKern::Unlock();
1.2160 + }
1.2161 +
1.2162 +
1.2163 +/** Terminates the current nanothread.
1.2164 +
1.2165 + Calls to this function never return.
1.2166 +
1.2167 + For use by RTOS personality layers.
1.2168 + Do not use this function directly on a Symbian OS thread.
1.2169 +
1.2170 + @pre Call in a thread context.
1.2171 + @pre Interrupts must be enabled.
1.2172 + @pre Kernel must be unlocked.
1.2173 + */
1.2174 +EXPORT_C void NKern::Exit()
1.2175 + {
1.2176 + CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::Exit");
1.2177 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NExit"));
1.2178 + NKern::LockC()->Exit(); // this won't return
1.2179 + FAULT();
1.2180 + }
1.2181 +
1.2182 +
1.2183 +/** Terminates the current nanothread at the next possible point.
1.2184 +
1.2185 + If the calling thread is not currently in a critical section and does not
1.2186 + currently hold a fast mutex, it exits immediately and this function does
1.2187 + not return. On the other hand if the thread is in a critical section or
1.2188 + holds a fast mutex the thread continues executing but it will exit as soon
1.2189 + as it leaves the critical section and/or releases the fast mutex.
1.2190 +
1.2191 + @pre Call in a thread context.
1.2192 + @pre Interrupts must be enabled.
1.2193 + @pre Kernel must be unlocked.
1.2194 + */
1.2195 +EXPORT_C void NKern::DeferredExit()
1.2196 + {
1.2197 + CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::DeferredExit");
1.2198 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NDefExit"));
1.2199 + NFastMutex* m = HeldFastMutex();
1.2200 + NThreadBase* pC = NKern::LockC();
1.2201 + if (!m && !pC->iCsCount)
1.2202 + pC->Exit(); // this won't return
1.2203 + pC->AcqSLock();
1.2204 + if (pC->iCsFunction >= 0) // don't touch it if we are already exiting
1.2205 + pC->iCsFunction = NThreadBase::ECSExitPending;
1.2206 + pC->RelSLock();
1.2207 + if (m && !pC->iCsCount)
1.2208 + m->iHoldingThread = (NThreadBase*)(TLinAddr(pC) | 1);
1.2209 + NKern::Unlock();
1.2210 + }
1.2211 +
1.2212 +
1.2213 +/** Prematurely terminates the current thread's timeslice
1.2214 +
1.2215 + @pre Kernel must be unlocked.
1.2216 + @pre Call in a thread context.
1.2217 +
1.2218 + @post Kernel is unlocked.
1.2219 + */
1.2220 +EXPORT_C void NKern::YieldTimeslice()
1.2221 + {
1.2222 + CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::YieldTimeslice");
1.2223 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::YieldTimeslice"));
1.2224 + NThreadBase* t = NKern::LockC();
1.2225 + t->iTime = 0;
1.2226 + mb();
1.2227 + if (t->iNext!=t || t->iParent->iNext!=t->iParent)
1.2228 + RescheduleNeeded();
1.2229 + NKern::Unlock();
1.2230 + }
1.2231 +
1.2232 +
1.2233 +/** Returns the number of CPUs available to Symbian OS
1.2234 +
1.2235 + @return the number of CPUs
1.2236 +
1.2237 + @pre Call in any context.
1.2238 +*/
1.2239 +EXPORT_C TInt NKern::NumberOfCpus()
1.2240 + {
1.2241 + return TheScheduler.iNumCpus;
1.2242 + }
1.2243 +
1.2244 +
1.2245 +/** Rotates the specified CPU ready list for threads at the specified priority.
1.2246 +
1.2247 + For use by RTOS personality layers to allow external control of round-robin
1.2248 + scheduling. Not intended for direct use by device drivers.
1.2249 +
1.2250 + @param aPriority = priority at which threads should be rotated.
1.2251 + -1 means use calling thread's priority.
1.2252 + @param aCpu CPU to act on
1.2253 +
1.2254 + @pre Kernel must be unlocked.
1.2255 + @pre Call in a thread context.
1.2256 +
1.2257 + @post Kernel is unlocked.
1.2258 + */
1.2259 +
1.2260 +EXPORT_C void NKern::RotateReadyList(TInt aPriority, TInt aCpu)
1.2261 + {
1.2262 +// CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::RotateReadyList");
1.2263 +// __KTRACE_OPT(KNKERN,DEBUGPRINT("NKern::RotateReadyList %d",aPriority));
1.2264 +// if (aPriority<0 || aPriority>=KNumPriorities)
1.2265 +// aPriority=NKern::CurrentThread()->iPriority;
1.2266 +// NKern::Lock();
1.2267 +// TheScheduler.RotateReadyList(aPriority);
1.2268 +// NKern::Unlock();
1.2269 + }
1.2270 +
1.2271 +
1.2272 +/** Rotates the ready list for threads at the specified priority.
1.2273 +
1.2274 + For use by RTOS personality layers to allow external control of round-robin
1.2275 + scheduling. Not intended for direct use by device drivers.
1.2276 +
1.2277 + @param aPriority = priority at which threads should be rotated.
1.2278 + -1 means use calling thread's priority.
1.2279 +
1.2280 + @pre Kernel must be unlocked.
1.2281 + @pre Call in a thread context.
1.2282 +
1.2283 + @post Kernel is unlocked.
1.2284 + */
1.2285 +EXPORT_C void NKern::RotateReadyList(TInt aPriority)
1.2286 + {
1.2287 + RotateReadyList(aPriority, -1);
1.2288 + }
1.2289 +
1.2290 +
1.2291 +/** Returns a pointer to the thread group to which the current thread belongs,
1.2292 + if any. Returns NULL if current thread is a standalone thread.
1.2293 +
1.2294 + @pre Call in a thread context.
1.2295 + */
1.2296 +EXPORT_C NThreadGroup* NKern::CurrentGroup()
1.2297 + {
1.2298 + NThreadBase* pC = NKern::CurrentThread();
1.2299 + return (pC->iParent == pC) ? (NThreadGroup*)0 : (NThreadGroup*)pC->iParent;
1.2300 + }
1.2301 +
1.2302 +
1.2303 +/** Detaches the current thread from the group to which it currently belongs,
1.2304 + if any. Returns a pointer to the group (NULL if none).
1.2305 +
1.2306 + @pre Call in a thread context.
1.2307 + @pre Interrupts enabled
1.2308 + @pre Kernel unlocked
1.2309 + */
1.2310 +EXPORT_C NThreadGroup* NKern::LeaveGroup()
1.2311 + {
1.2312 + CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_NOT_IDFC|MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED, "NKern::LeaveGroup");
1.2313 + NKern::Lock();
1.2314 + TSubScheduler& ss = SubScheduler();
1.2315 + NThreadBase* pC = ss.iCurrentThread;
1.2316 + pC->iNewParent = 0; // cancel any pending Join
1.2317 + NThreadGroup* g = (pC->iParent == pC) ? (NThreadGroup*)0 : (NThreadGroup*)pC->iParent;
1.2318 + TBool make_group_ready = FALSE;
1.2319 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NLeaveGroup %T (%G)",pC,g));
1.2320 + if (g)
1.2321 + {
1.2322 + while (!pC->TiedEventLeaveInterlock())
1.2323 + {
1.2324 + TInt irq = NKern::DisableAllInterrupts();
1.2325 + ss.QueueDfcs();
1.2326 + NKern::RestoreInterrupts(irq);
1.2327 + }
1.2328 + pC->AcqSLock();
1.2329 + ss.iReadyListLock.LockOnly();
1.2330 + pC->UnReadyT();
1.2331 + pC->iParent = pC;
1.2332 + g->iCurrent = 0; // since current thread is no longer in g
1.2333 + ss.AddHead(pC);
1.2334 + pC->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset);
1.2335 + pC->iCpuAffinity = g->iCpuAffinity; // keep same CPU affinity
1.2336 + // if we're frozen, the group's freeze count was incremented
1.2337 + if (pC->iFreezeCpu)
1.2338 + --g->iFreezeCpu;
1.2339 + // if we've been marked as deferring, the group's freeze count was incremented
1.2340 + if (pC->iFastMutexDefer == 1)
1.2341 + {
1.2342 + --g->iFreezeCpu;
1.2343 + pC->iFastMutexDefer = 0;
1.2344 + }
1.2345 + // if the group was waiting to change cpu then this thread needs to change still
1.2346 + if (g->iCpuChange)
1.2347 + {
1.2348 + pC->iCpuChange = g->iCpuChange;
1.2349 + RescheduleNeeded();
1.2350 + if (!g->iFreezeCpu)
1.2351 + {
1.2352 + // we were the last thread in the group stopping it from moving
1.2353 + // but there may be no other threads left after UnReadyT'ing this one
1.2354 + g->iCpuChange = FALSE;
1.2355 + if (g->iReady)
1.2356 + {
1.2357 + ss.Remove(g);
1.2358 + g->iReady = 0;
1.2359 + make_group_ready = TRUE;
1.2360 + }
1.2361 + }
1.2362 + }
1.2363 + ss.iReadyListLock.UnlockOnly();
1.2364 + --g->iThreadCount;
1.2365 + if (make_group_ready)
1.2366 + g->ReadyT(0);
1.2367 + g->RelSLock(); // since pC is no longer attached to g
1.2368 + pC->RelSLock();
1.2369 + }
1.2370 + NKern::Unlock();
1.2371 + return g;
1.2372 + }
1.2373 +
1.2374 +
1.2375 +/** Adds the current thread to the specified group.
1.2376 +
1.2377 + @param aGroup = pointer to group to join
1.2378 +
1.2379 + @pre Call in a thread context, not in one of the idle threads.
1.2380 + @pre Interrupts enabled
1.2381 + @pre Kernel unlocked
1.2382 + @pre Thread does not hold a fast mutex
1.2383 + @pre Thread does not have a freeze on CPU migration
1.2384 + @pre Current thread is not already in a group
1.2385 + */
1.2386 +EXPORT_C void NKern::JoinGroup(NThreadGroup* aGroup)
1.2387 + {
1.2388 + CHECK_PRECONDITIONS(MASK_THREAD_STANDARD, "NKern::JoinGroup");
1.2389 + NKern::Lock();
1.2390 + TSubScheduler& ss = SubScheduler();
1.2391 + NThreadBase* pC = ss.iCurrentThread;
1.2392 + __ASSERT_WITH_MESSAGE_DEBUG(pC->iParent==pC, "Thread not already in a group", "NKern::JoinGroup");
1.2393 + __ASSERT_WITH_MESSAGE_DEBUG(!pC->iFreezeCpu, "No interdiction on CPU migration", "NKern::JoinGroup");
1.2394 + __ASSERT_WITH_MESSAGE_DEBUG(!pC->i_NThread_Initial, "Not idle thread", "NKern::JoinGroup");
1.2395 + __NK_ASSERT_ALWAYS(pC->iParent==pC && !pC->iFreezeCpu);
1.2396 + __KTRACE_OPT(KNKERN,DEBUGPRINT("NJoinGroup %T->%G",pC,aGroup));
1.2397 + pC->AcqSLock();
1.2398 + aGroup->AcqSLock();
1.2399 + TBool migrate = !CheckCpuAgainstAffinity(ss.iCpuNum, aGroup->iCpuAffinity); // TRUE if thread's current CPU is incompatible with the group's affinity
1.2400 + if (!aGroup->iReady || aGroup->iReady==pC->iReady)
1.2401 + {
1.2402 + // group not ready or ready on this CPU
1.2403 + if (!migrate)
1.2404 + {
1.2405 + ss.iReadyListLock.LockOnly();
1.2406 + pC->UnReadyT();
1.2407 + pC->iParent = aGroup;
1.2408 + aGroup->iNThreadList.AddHead(pC);
1.2409 + if (!aGroup->iReady)
1.2410 + {
1.2411 + aGroup->iPriority = pC->iPriority;
1.2412 + ss.AddHead(aGroup);
1.2413 + aGroup->iReady = TUint8(ss.iCpuNum | NSchedulable::EReadyOffset);
1.2414 + }
1.2415 + else if (pC->iPriority > aGroup->iPriority)
1.2416 + {
1.2417 + ss.ChangePriority(aGroup, pC->iPriority);
1.2418 + }
1.2419 + pC->iReady = NSchedulable::EReadyGroup;
1.2420 + aGroup->iCurrent = aGroup->iReady;
1.2421 + ss.iReadyListLock.UnlockOnly();
1.2422 + ++aGroup->iThreadCount;
1.2423 + goto done;
1.2424 + }
1.2425 + }
1.2426 + // this thread needs to migrate to another CPU
1.2427 + pC->iNewParent = aGroup;
1.2428 + RescheduleNeeded();
1.2429 +
1.2430 + // the following reschedule definitely joins the group even if the
1.2431 + // thread's CPU affinity is incompatible with that of the group
1.2432 + // (the thread's CPU affinity is subsequently determined by that of
1.2433 + // the group)
1.2434 +
1.2435 +done:
1.2436 + if (pC->iParent != aGroup)
1.2437 + aGroup->RelSLock();
1.2438 + pC->RelSLock();
1.2439 + while (!pC->TiedEventJoinInterlock())
1.2440 + {
1.2441 + TInt irq = NKern::DisableAllInterrupts();
1.2442 + ss.QueueDfcs();
1.2443 + NKern::RestoreInterrupts(irq);
1.2444 + }
1.2445 + NKern::Unlock();
1.2446 + }
1.2447 +
1.2448 +
1.2449 +/******************************************************************************
1.2450 + * Priority Lists
1.2451 + ******************************************************************************/
1.2452 +
1.2453 +#ifndef __PRI_LIST_MACHINE_CODED__
1.2454 +/** Returns the priority of the highest priority item present on a priority list.
1.2455 +
1.2456 + @return The highest priority present or -1 if the list is empty.
1.2457 + */
1.2458 +EXPORT_C TInt TPriListBase::HighestPriority()
1.2459 + {
1.2460 +// TUint64 present = MAKE_TUINT64(iPresent[1], iPresent[0]);
1.2461 +// return __e32_find_ms1_64(present);
1.2462 + return __e32_find_ms1_64(iPresent64);
1.2463 + }
1.2464 +
1.2465 +
1.2466 +/** Finds the highest priority item present on a priority list.
1.2467 +
1.2468 + If multiple items at the same priority are present, return the first to be
1.2469 + added in chronological order.
1.2470 +
1.2471 + @return A pointer to the item or NULL if the list is empty.
1.2472 + */
1.2473 +EXPORT_C TPriListLink* TPriListBase::First()
1.2474 + {
1.2475 + TInt p = HighestPriority();
1.2476 + return p >=0 ? static_cast<TPriListLink*>(iQueue[p]) : NULL;
1.2477 + }
1.2478 +
1.2479 +
1.2480 +/** Adds an item to a priority list at the tail of the queue for its priority.
1.2481 +
1.2482 + @param aLink A pointer to the item - must not be NULL.
1.2483 + */
1.2484 +EXPORT_C void TPriListBase::Add(TPriListLink* aLink)
1.2485 + {
1.2486 + TInt p = aLink->iPriority;
1.2487 + SDblQueLink* head = iQueue[p];
1.2488 + if (head)
1.2489 + {
1.2490 + // already some at this priority
1.2491 + aLink->InsertBefore(head);
1.2492 + }
1.2493 + else
1.2494 + {
1.2495 + // 'create' new list
1.2496 + iQueue[p] = aLink;
1.2497 + aLink->iNext = aLink->iPrev = aLink;
1.2498 + iPresent[p>>5] |= 1u << (p & 0x1f);
1.2499 + }
1.2500 + }
1.2501 +
1.2502 +
1.2503 +/** Removes an item from a priority list.
1.2504 +
1.2505 + @param aLink A pointer to the item - must not be NULL.
1.2506 + */
1.2507 +EXPORT_C void TPriListBase::Remove(TPriListLink* aLink)
1.2508 + {
1.2509 + if (!aLink->Alone())
1.2510 + {
1.2511 + // not the last on this list
1.2512 + TInt p = aLink->iPriority;
1.2513 + if (iQueue[p] == aLink)
1.2514 + iQueue[p] = aLink->iNext;
1.2515 + aLink->Deque();
1.2516 + }
1.2517 + else
1.2518 + {
1.2519 + TInt p = aLink->iPriority;
1.2520 + iQueue[p] = 0;
1.2521 + iPresent[p>>5] &= ~(1u << (p & 0x1f));
1.2522 + KILL_LINK(aLink);
1.2523 + }
1.2524 + }
1.2525 +
1.2526 +
1.2527 +/** Changes the priority of an item on a priority list.
1.2528 +
1.2529 + @param aLink A pointer to the item to act on - must not be NULL.
1.2530 + @param aNewPriority A new priority for the item.
1.2531 + */
1.2532 +EXPORT_C void TPriListBase::ChangePriority(TPriListLink* aLink, TInt aNewPriority)
1.2533 + {
1.2534 + if (aLink->iPriority!=aNewPriority)
1.2535 + {
1.2536 + Remove(aLink);
1.2537 + aLink->iPriority=TUint8(aNewPriority);
1.2538 + Add(aLink);
1.2539 + }
1.2540 + }
1.2541 +#endif
1.2542 +
1.2543 +/** Adds an item to a priority list at the head of the queue for its priority.
1.2544 +
1.2545 + @param aLink A pointer to the item - must not be NULL.
1.2546 + */
1.2547 +EXPORT_C void TPriListBase::AddHead(TPriListLink* aLink)
1.2548 + {
1.2549 + TInt p = aLink->iPriority;
1.2550 + SDblQueLink* head = iQueue[p];
1.2551 + iQueue[p] = aLink;
1.2552 + if (head)
1.2553 + {
1.2554 + // already some at this priority
1.2555 + aLink->InsertBefore(head);
1.2556 + }
1.2557 + else
1.2558 + {
1.2559 + // 'create' new list
1.2560 + aLink->iNext = aLink->iPrev = aLink;
1.2561 + iPresent[p>>5] |= 1u << (p & 0x1f);
1.2562 + }
1.2563 + }
1.2564 +
1.2565 +
1.2566 +/******************************************************************************
1.2567 + * Generic IPIs
1.2568 + ******************************************************************************/
1.2569 +
1.2570 +TGenIPIList::TGenIPIList()
1.2571 + : iGenIPILock(TSpinLock::EOrderGenericIPIList)
1.2572 + {
1.2573 + }
1.2574 +
1.2575 +TGenIPIList GenIPIList;
1.2576 +
1.2577 +extern "C" {
1.2578 +extern void send_generic_ipis(TUint32);
1.2579 +
1.2580 +void generic_ipi_isr(TSubScheduler* aS)
1.2581 + {
1.2582 + TGenericIPI* ipi = aS->iNextIPI;
1.2583 + if (!ipi)
1.2584 + return;
1.2585 + TUint32 m = aS->iCpuMask;
1.2586 + SDblQueLink* anchor = &GenIPIList.iA;
1.2587 + while (ipi != anchor)
1.2588 + {
1.2589 + __e32_atomic_and_acq32(&ipi->iCpusIn, ~m);
1.2590 + (*ipi->iFunc)(ipi);
1.2591 + TInt irq = GenIPIList.iGenIPILock.LockIrqSave();
1.2592 + TGenericIPI* n = (TGenericIPI*)ipi->iNext;
1.2593 + ipi->iCpusOut &= ~m;
1.2594 + if (ipi->iCpusOut == 0)
1.2595 + {
1.2596 + ipi->Deque();
1.2597 + mb();
1.2598 + ipi->iNext = 0;
1.2599 + }
1.2600 + ipi = n;
1.2601 + while (ipi!=anchor && !(ipi->iCpusIn & m))
1.2602 + ipi = (TGenericIPI*)ipi->iNext;
1.2603 + if (ipi == anchor)
1.2604 + aS->iNextIPI = 0;
1.2605 + GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
1.2606 + }
1.2607 + }
1.2608 +}
1.2609 +
1.2610 +void TGenericIPI::Queue(TGenericIPIFn aFunc, TUint32 aCpuMask)
1.2611 + {
1.2612 + __KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI F=%08x M=%08x", aFunc, aCpuMask));
1.2613 + iFunc = aFunc;
1.2614 + TScheduler& s = TheScheduler;
1.2615 + TInt i;
1.2616 + TUint32 ipis = 0;
1.2617 + TInt irq = GenIPIList.iGenIPILock.LockIrqSave();
1.2618 + if (aCpuMask & 0x80000000u)
1.2619 + {
1.2620 + if (aCpuMask==0xffffffffu)
1.2621 + aCpuMask = s.iActiveCpus2;
1.2622 + else if (aCpuMask==0xfffffffeu)
1.2623 + aCpuMask = s.iActiveCpus2 &~ SubScheduler().iCpuMask;
1.2624 + else
1.2625 + aCpuMask = 0;
1.2626 + }
1.2627 + iCpusIn = aCpuMask;
1.2628 + iCpusOut = aCpuMask;
1.2629 + if (!aCpuMask)
1.2630 + {
1.2631 + GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
1.2632 + iNext = 0;
1.2633 + return;
1.2634 + }
1.2635 + GenIPIList.Add(this);
1.2636 + for (i=0; i<s.iNumCpus; ++i)
1.2637 + {
1.2638 + if (!(aCpuMask & (1<<i)))
1.2639 + continue;
1.2640 + TSubScheduler& ss = *s.iSub[i];
1.2641 + if (!ss.iNextIPI)
1.2642 + {
1.2643 + ss.iNextIPI = this;
1.2644 + ipis |= (1<<i);
1.2645 + }
1.2646 + }
1.2647 + send_generic_ipis(ipis);
1.2648 + GenIPIList.iGenIPILock.UnlockIrqRestore(irq);
1.2649 + __KTRACE_OPT(KSCHED2,DEBUGPRINT("GenIPI ipis=%08x", ipis));
1.2650 + }
1.2651 +
1.2652 +void TGenericIPI::QueueAll(TGenericIPIFn aFunc)
1.2653 + {
1.2654 + Queue(aFunc, 0xffffffffu);
1.2655 + }
1.2656 +
1.2657 +void TGenericIPI::QueueAllOther(TGenericIPIFn aFunc)
1.2658 + {
1.2659 + Queue(aFunc, 0xfffffffeu);
1.2660 + }
1.2661 +
1.2662 +// Call from thread or IDFC with interrupts enabled
1.2663 +void TGenericIPI::WaitEntry()
1.2664 + {
1.2665 + CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_INTERRUPTS_ENABLED,"TGenericIPI::WaitEntry");
1.2666 + while (iCpusIn)
1.2667 + {
1.2668 + __chill();
1.2669 + }
1.2670 + mb();
1.2671 + }
1.2672 +
1.2673 +// Call from thread or IDFC with interrupts enabled
1.2674 +void TGenericIPI::WaitCompletion()
1.2675 + {
1.2676 + CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_INTERRUPTS_ENABLED,"TGenericIPI::WaitCompletion");
1.2677 + volatile TInt* p = (volatile TInt*)&iNext;
1.2678 + while (*p)
1.2679 + {
1.2680 + __chill();
1.2681 + }
1.2682 + mb();
1.2683 + }
1.2684 +
1.2685 +/** Stop all other CPUs
1.2686 +
1.2687 + Call with kernel locked
1.2688 +*/
1.2689 +void TStopIPI::StopCPUs()
1.2690 + {
1.2691 + iFlag = 0;
1.2692 + QueueAllOther(&Isr); // send IPIs to all other CPUs
1.2693 + WaitEntry(); // wait for other CPUs to reach the ISR
1.2694 + }
1.2695 +
1.2696 +void TStopIPI::ReleaseCPUs()
1.2697 + {
1.2698 + iFlag = 1; // allow other CPUs to proceed
1.2699 + WaitCompletion(); // wait for them to finish with this IPI
1.2700 + }
1.2701 +
1.2702 +void TStopIPI::Isr(TGenericIPI* a)
1.2703 + {
1.2704 + TStopIPI* s = (TStopIPI*)a;
1.2705 + while (!s->iFlag)
1.2706 + {
1.2707 + __chill();
1.2708 + }
1.2709 + }
1.2710 +
1.2711 +