Update contrib.
1 // Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\nkernsmp\nkerns.cpp
18 // NThreadBase member data
19 #define __INCLUDE_NTHREADBASE_DEFINES__
22 #include <e32cmn_private.h>
25 extern "C" void ExcFault(TAny*);
27 /******************************************************************************
29 ******************************************************************************/
30 /** Create a fast mutex
35 EXPORT_C NFastMutex::NFastMutex()
36 : iHoldingThread(0), iMutexLock(TSpinLock::EOrderFastMutex)
40 /******************************************************************************
42 ******************************************************************************/
43 NSchedulable::NSchedulable()
44 : iSSpinLock(TSpinLock::EOrderThread)
50 iNSchedulableSpare1 = 0;
53 iNSchedulableSpare2 = 0;
57 iParent = (NSchedulable*)0xdeadbeef;
59 new (i_IDfcMem) TDfc(&DeferredReadyIDfcFn, this);
64 /******************************************************************************
66 ******************************************************************************/
67 NThreadGroup::NThreadGroup()
71 new (&iSSpinLock) TSpinLock(TSpinLock::EOrderThreadGroup);
74 /** Create a thread group
79 EXPORT_C TInt NKern::GroupCreate(NThreadGroup* aGroup, SNThreadGroupCreateInfo& aInfo)
81 new (aGroup) NThreadGroup();
82 aGroup->iCpuAffinity = aInfo.iCpuAffinity;
87 /** Destroy a thread group
89 @pre Call in thread context, interrupts enabled, preemption enabled
90 @pre No fast mutex held
91 @pre Calling thread in critical section
92 @pre All threads have left the group
97 EXPORT_C void NKern::GroupDestroy(NThreadGroup* aGroup)
99 NKern::ThreadEnterCS();
100 aGroup->DetachTiedEvents();
101 NKern::ThreadLeaveCS();
105 /******************************************************************************
107 ******************************************************************************/
113 static const SFastExecTable DefaultFastExecTable={0,{0}};
114 static const SSlowExecTable DefaultSlowExecTable={0,(TLinAddr)InvalidExec,0,{{0,0}}};
116 const SNThreadHandlers NThread_Default_Handlers =
118 NTHREAD_DEFAULT_EXIT_HANDLER,
119 NTHREAD_DEFAULT_STATE_HANDLER,
120 NTHREAD_DEFAULT_EXCEPTION_HANDLER,
121 NTHREAD_DEFAULT_TIMEOUT_HANDLER
124 NThreadWaitState::NThreadWaitState()
125 : iTimer(&TimerExpired, this)
128 iTimer.iTriggerTime = 0;
129 iTimer.iNTimerSpare1 = 0;
132 NThreadBase::NThreadBase()
133 : iRequestSemaphore(), iWaitState()
136 iWaitLink.iPriority = 0;
139 i_NThread_Initial = 0;
140 iLinkedObjType = EWaitNone;
142 iNThreadBaseSpare10 = 0;
144 iRequestSemaphore.iOwningThread = (NThreadBase*)this;
150 iUserModeCallbacks = 0;
162 iExtraContextSize = 0;
163 iNThreadBaseSpare6 = 0;
164 iNThreadBaseSpare7 = 0;
165 iNThreadBaseSpare8 = 0;
166 iNThreadBaseSpare9 = 0;
173 TInt NThreadBase::Create(SNThreadCreateInfo& aInfo, TBool aInitial)
175 __KTRACE_OPT(KNKERN,DEBUGPRINT(">NThreadBase::Create %08x(%08x,%d)", this, &aInfo, aInitial));
176 if (aInfo.iPriority<0 || aInfo.iPriority>63)
178 if (aInfo.iPriority==0 && !aInitial)
180 // if (aInfo.iCpu!=KCpuAny && aInfo.iCpu>=TheScheduler.iNumCpus)
181 // return KErrArgument;
182 iStackBase=(TLinAddr)aInfo.iStackBase;
183 iStackSize=aInfo.iStackSize;
184 iTimeslice=(aInfo.iTimeslice>0)?aInfo.iTimeslice:-1;
186 iPriority=TUint8(aInfo.iPriority);
187 iBasePri=TUint8(aInfo.iPriority);
188 iCpuAffinity = aInfo.iCpuAffinity;
189 iHandlers = aInfo.iHandlers ? aInfo.iHandlers : &NThread_Default_Handlers;
190 iFastExecTable=aInfo.iFastExecTable?aInfo.iFastExecTable:&DefaultFastExecTable;
191 iSlowExecTable=(aInfo.iSlowExecTable?aInfo.iSlowExecTable:&DefaultSlowExecTable)->iEntries;
192 i_ThrdAttr=(TUint8)aInfo.iAttributes;
195 TSubScheduler& ss = SubScheduler();
196 iLastCpu = (TUint8)ss.iCpuNum;
197 iReady = (TUint8)(iLastCpu | EReadyOffset);
199 iCpuAffinity = iLastCpu;
200 iEventState = (iLastCpu<<EEventCpuShift) | (iLastCpu<<EThreadCpuShift);
202 i_NThread_Initial = TRUE;
203 ss.iInitialThread = (NThread*)this;
204 NKern::Unlock(); // now that current thread is defined
211 if (iCpuAffinity & NTHREADBASE_CPU_AFFINITY_MASK)
213 ecpu = __e32_find_ls1_32(iCpuAffinity);
214 if (ecpu >= TheScheduler.iNumCpus)
215 ecpu = 0; // FIXME: Inactive CPU?
219 iEventState = (ecpu<<EEventCpuShift) | (ecpu<<EThreadCpuShift);
224 aInfo.iGroup->AcqSLock();
225 iParent = (NSchedulable*)aInfo.iGroup;
226 ++aInfo.iGroup->iThreadCount;
227 iEventState |= EEventParent;
232 __KTRACE_OPT(KNKERN,DEBUGPRINT("<NThreadBase::Create OK"));
236 void NThread_Default_State_Handler(NThread* __DEBUG_ONLY(aThread), TInt __DEBUG_ONLY(aOperation), TInt __DEBUG_ONLY(aParameter))
238 // __KTRACE_OPT(KPANIC,DEBUGPRINT("Unknown NState %d: thread %T op %08x par %08x",aThread,aThread->iNState,aOperation,aParameter));
240 DEBUGPRINT("UnknownState: thread %T op %08x par %08x",aThread,aOperation,aParameter);
245 void NThread_Default_Exception_Handler(TAny* aContext, NThread*)
251 /** Create a nanothread.
253 This function is intended to be used by the EPOC kernel and by personality
254 layers. A nanothread may not use most of the functions available to normal
255 Symbian OS threads. Use Kern::ThreadCreate() to create a Symbian OS thread.
257 @param aThread Pointer to control block for thread to create.
258 @param aInfo Information needed for creating the thread.
260 @see SNThreadCreateInfo
261 @see Kern::ThreadCreate
263 @pre Call in a thread context.
264 @pre Interrupts must be enabled.
265 @pre Kernel must be unlocked.
267 EXPORT_C TInt NKern::ThreadCreate(NThread* aThread, SNThreadCreateInfo& aInfo)
269 CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadCreate");
270 return aThread->Create(aInfo,FALSE);
273 // User-mode callbacks
275 TUserModeCallback::TUserModeCallback(TUserModeCallbackFunc aFunc)
276 : iNext(KUserModeCallbackUnqueued),
281 TUserModeCallback::~TUserModeCallback()
283 __NK_ASSERT_DEBUG(iNext == KUserModeCallbackUnqueued);
286 void NKern::CancelUserModeCallbacks()
288 // Call any queued callbacks with the EUserModeCallbackCancel reason code, in the current
291 TUserModeCallback* listHead =
292 (TUserModeCallback*)__e32_atomic_swp_ord_ptr(&NCurrentThread()->iUserModeCallbacks, NULL);
295 TUserModeCallback* callback = listHead;
296 listHead = listHead->iNext;
297 callback->iNext = KUserModeCallbackUnqueued;
298 __e32_memory_barrier();
299 callback->iFunc(callback, EUserModeCallbackCancel);
303 void NKern::MoveUserModeCallbacks(NThreadBase* aDestThread, NThreadBase* aSrcThread)
305 // Move all queued user-mode callbacks from the source thread to the destination thread, and
306 // prevent any more from being queued. Used by the kernel thread code so that callbacks get
307 // cancelled in another thread if the thread they were originally queued on dies.
309 // Atomically remove list of callbacks and set pointer to 1
310 // The latter ensures any subsequent attempts to add callbacks fail
311 TUserModeCallback* sourceListStart =
312 (TUserModeCallback*)__e32_atomic_swp_ord_ptr(&aSrcThread->iUserModeCallbacks, (TAny*)1);
313 __NK_ASSERT_DEBUG(((TUint)sourceListStart & 3) == 0); // check this only gets called once per thread
315 if (sourceListStart == NULL)
318 TUserModeCallback* sourceListEnd = sourceListStart;
319 while (sourceListEnd->iNext != NULL)
320 sourceListEnd = sourceListEnd->iNext;
323 TUserModeCallback* destListStart = aDestThread->iUserModeCallbacks;
326 __NK_ASSERT_DEBUG(((TUint)destListStart & 3) == 0); // dest thread must not die
327 sourceListEnd->iNext = destListStart;
328 } while (!__e32_atomic_cas_ord_ptr(&aDestThread->iUserModeCallbacks, &destListStart, sourceListStart));
332 /** Initialise the null thread
335 void NKern::Init(NThread* aThread, SNThreadCreateInfo& aInfo)
337 aInfo.iFunction=NULL; // irrelevant
338 aInfo.iPriority=0; // null thread has lowest priority
339 aInfo.iTimeslice=0; // null thread not timesliced
340 aInfo.iAttributes=0; // null thread does not require implicit locks
341 aThread->Create(aInfo,TRUE); // create the null thread
344 /** @internalTechnology */
345 EXPORT_C void NKern::RecordIntLatency(TInt /*aLatency*/, TInt /*aIntMask*/)
350 /** @internalTechnology */
351 EXPORT_C void NKern::RecordThreadLatency(TInt /*aLatency*/)
355 /********************************************
356 * Deterministic Priority List Implementation
357 ********************************************/
360 /** Construct a priority list with the specified number of priorities
362 @param aNumPriorities The number of priorities (must be 1-64).
364 EXPORT_C TPriListBase::TPriListBase(TInt aNumPriorities)
366 memclr(this, sizeof(TPriListBase)+(aNumPriorities-1)*sizeof(SDblQueLink*) );
370 /********************************************
372 ********************************************/
374 /** Get the current value of the high performance counter.
376 If a high performance counter is not available, this uses the millisecond
379 EXPORT_C TUint32 NKern::FastCounter()
381 return (TUint32)Timestamp();
385 /** Get the frequency of counter queried by NKern::FastCounter().
387 EXPORT_C TInt NKern::FastCounterFrequency()
389 return (TInt)TimestampFrequency();
397 EXPORT_C TBool NKern::Crashed()
399 return CrashState!=0;
403 /** Returns number of nanokernel timer ticks since system started.
407 EXPORT_C TUint32 NKern::TickCount()
413 TUint32 BTrace::BigTraceId = 0;
415 TBool BTrace::DoOutBig(TUint32 a0, TUint32 a1, const TAny* aData, TInt aDataSize, TUint32 aContext, TUint32 aPc)
417 SBTraceData& traceData = BTraceData;
419 // see if trace is small enough to fit in single record...
420 if(TUint(aDataSize)<=TUint(KMaxBTraceDataArray+4))
427 a2 = *((TUint32*&)aData)++; // first 4 bytes into a2
428 if(aDataSize>=4 && aDataSize<=8)
429 a3 = *(TUint32*)aData; // only 4 more bytes, so pass by value, not pointer
433 __ACQUIRE_BTRACE_LOCK();
434 TBool r = traceData.iHandler(a0,0,aContext,a1,a2,a3,0,aPc);
435 __RELEASE_BTRACE_LOCK();
439 // adjust for header2, extra, and size word...
440 a0 |= BTrace::EHeader2Present<<(BTrace::EFlagsIndex*8)|BTrace::EExtraPresent<<(BTrace::EFlagsIndex*8);
443 TUint32 traceId = __e32_atomic_add_ord32(&BigTraceId, 1);
444 TUint32 header2 = BTrace::EMultipartFirst;
448 TUint32 size = aDataSize-offset;
449 if(size>KMaxBTraceDataArray)
450 size = KMaxBTraceDataArray;
452 header2 = BTrace::EMultipartLast;
454 *(TUint32*)&aData = *(TUint32*)aData; // 4 bytes or less are passed by value, not pointer
456 __ACQUIRE_BTRACE_LOCK();
457 TBool result = traceData.iHandler(a0+size,header2,aContext,aDataSize,a1,(TUint32)aData,traceId,aPc);
458 __RELEASE_BTRACE_LOCK();
463 *(TUint8**)&aData += size;
465 header2 = BTrace::EMultipartMiddle;
468 while(offset<aDataSize);
473 EXPORT_C TSpinLock* BTrace::LockPtr()
475 #ifdef __USE_BTRACE_LOCK__