Update contrib.
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\nkernsmp\arm\ncthrd.cpp
18 // NThreadBase member data
19 #define __INCLUDE_NTHREADBASE_DEFINES__
21 #define __INCLUDE_REG_OFFSETS__
28 const TInt KNThreadMinStackSize = 0x100; // needs to be enough for interrupt + reschedule stack
30 // Called by a thread when it first runs
31 extern "C" void __StartThread();
33 // Initialise CPU registers
34 extern void initialiseState(TInt aCpu, TSubScheduler* aSS);
36 extern "C" void ExcFault(TAny*);
38 extern TUint32 __mpid();
39 extern void InitAPTimestamp(SNThreadCreateInfo& aInfo);
41 TInt NThread::Create(SNThreadCreateInfo& aInfo, TBool aInitial)
43 // Assert ParameterBlockSize is not negative and is a multiple of 8 bytes
44 __NK_ASSERT_ALWAYS((aInfo.iParameterBlockSize&0x80000007)==0);
45 __NK_ASSERT_ALWAYS(aInfo.iStackBase && aInfo.iStackSize>=aInfo.iParameterBlockSize+KNThreadMinStackSize);
50 cpu = __e32_atomic_add_ord32(&TheScheduler.iNumCpus, 1);
51 aInfo.iCpuAffinity = cpu;
52 // OK since we can't migrate yet
53 TSubScheduler& ss = TheSubSchedulers[cpu];
54 ss.iCurrentThread = this;
55 iRunCount64 = UI64LIT(1);
56 __KTRACE_OPT(KBOOT,DEBUGPRINT("Init: cpu=%d ss=%08x", cpu, &ss));
59 initialiseState(cpu,&ss);
61 ArmLocalTimer& T = LOCAL_TIMER;
62 T.iWatchdogDisable = E_ArmTmrWDD_1;
63 T.iWatchdogDisable = E_ArmTmrWDD_2;
65 T.iTimerIntStatus = E_ArmTmrIntStatus_Event;
67 T.iWatchdogIntStatus = E_ArmTmrIntStatus_Event;
70 T.iTimerCtrl = E_ArmTmrCtrl_IntEn | E_ArmTmrCtrl_Reload | E_ArmTmrCtrl_Enable;
72 __e32_atomic_ior_ord32(&TheScheduler.iActiveCpus1, 1<<cpu);
73 __e32_atomic_ior_ord32(&TheScheduler.iActiveCpus2, 1<<cpu);
74 __e32_atomic_ior_ord32(&TheScheduler.iCpusNotIdle, 1<<cpu);
75 __KTRACE_OPT(KBOOT,DEBUGPRINT("AP MPID=%08x",__mpid()));
79 Arm::DefaultDomainAccess = Arm::Dacr();
80 Arm::ModifyCar(0, 0x00f00000); // full access to CP10, CP11
81 Arm::DefaultCoprocessorAccess = Arm::Car();
84 TInt r=NThreadBase::Create(aInfo,aInitial);
90 TLinAddr stack_top = (TLinAddr)iStackBase + (TLinAddr)iStackSize;
91 TLinAddr sp = stack_top;
92 TUint32 pb = (TUint32)aInfo.iParameterBlock;
93 SThreadStackStub* tss = 0;
94 if (aInfo.iParameterBlockSize)
96 tss = (SThreadStackStub*)stack_top;
98 tss->iExcCode = SThreadExcStack::EStub;
102 sp -= (TLinAddr)aInfo.iParameterBlockSize;
103 wordmove((TAny*)sp, aInfo.iParameterBlock, aInfo.iParameterBlockSize);
107 SThreadInitStack* tis = (SThreadInitStack*)sp;
109 memclr(tis, sizeof(SThreadInitStack));
110 iSavedSP = (TLinAddr)tis;
112 tis->iR.iFpExc = VFP_FPEXC_THRD_INIT;
114 tis->iR.iCar = Arm::DefaultCoprocessorAccess;
115 tis->iR.iDacr = Arm::DefaultDomainAccess;
116 tis->iR.iSpsrSvc = MODE_SVC;
117 tis->iR.iSPRschdFlg = TLinAddr(&tis->iX) | 1;
118 tis->iR.iR15 = (TUint32)&__StartThread;
121 tis->iX.iR4 = (TUint32)this;
122 tis->iX.iR11 = stack_top;
123 tis->iX.iExcCode = SThreadExcStack::EInit;
124 tis->iX.iR15 = (TUint32)aInfo.iFunction;
125 tis->iX.iCPSR = MODE_SVC;
129 NKern::EnableAllInterrupts();
132 ArmLocalTimer& T = LOCAL_TIMER;
133 T.iTimerCtrl = E_ArmTmrCtrl_IntEn | E_ArmTmrCtrl_Reload | E_ArmTmrCtrl_Enable;
135 // synchronize AP's timestamp with BP's
137 InitAPTimestamp(aInfo);
139 #ifdef BTRACE_THREAD_IDENTIFICATION
140 BTrace4(BTrace::EThreadIdentification,BTrace::ENanoThreadCreate,this);
146 /** Called from generic layer when thread is killed asynchronously.
148 For ARM, save reason for last user->kernel switch (if any) so that user
149 context can be accessed from EDebugEventRemoveThread hook. Must be done
150 before forcing the thread to exit as this alters the saved return address
151 which is used to figure out where the context is saved.
156 void NThreadBase::OnKill()
160 /** Called from generic layer when thread exits.
162 For ARM, save that if the thread terminates synchronously the last
163 user->kernel switch was an exec call. Do nothing if non-user thread or
164 reason already saved in OnKill().
170 void NThreadBase::OnExit()
175 void DumpExcInfo(TArmExcInfo& a)
177 DEBUGPRINT("Exc %1d Cpsr=%08x FAR=%08x FSR=%08x",a.iExcCode,a.iCpsr,a.iFaultAddress,a.iFaultStatus);
178 DEBUGPRINT(" R0=%08x R1=%08x R2=%08x R3=%08x",a.iR0,a.iR1,a.iR2,a.iR3);
179 DEBUGPRINT(" R4=%08x R5=%08x R6=%08x R7=%08x",a.iR4,a.iR5,a.iR6,a.iR7);
180 DEBUGPRINT(" R8=%08x R9=%08x R10=%08x R11=%08x",a.iR8,a.iR9,a.iR10,a.iR11);
181 DEBUGPRINT("R12=%08x R13=%08x R14=%08x R15=%08x",a.iR12,a.iR13,a.iR14,a.iR15);
182 DEBUGPRINT("R13Svc=%08x R14Svc=%08x SpsrSvc=%08x",a.iR13Svc,a.iR14Svc,a.iSpsrSvc);
184 TInt irq = NKern::DisableAllInterrupts();
185 TSubScheduler& ss = SubScheduler();
186 NThreadBase* ct = ss.iCurrentThread;
187 TInt inc = TInt(ss.i_IrqNestCount);
188 TInt cpu = ss.iCpuNum;
189 TInt klc = ss.iKernLockCount;
190 NKern::RestoreInterrupts(irq);
191 DEBUGPRINT("Thread %T, CPU %d, KLCount=%d, IrqNest=%d", ct, cpu, klc, inc);
194 void DumpFullRegSet(SFullArmRegSet& a)
196 SNormalRegs& r = a.iN;
197 DEBUGPRINT("MODE_USR:");
198 DEBUGPRINT(" R0=%08x R1=%08x R2=%08x R3=%08x", r.iR0, r.iR1, r.iR2, r.iR3);
199 DEBUGPRINT(" R4=%08x R5=%08x R6=%08x R7=%08x", r.iR4, r.iR5, r.iR6, r.iR7);
200 DEBUGPRINT(" R8=%08x R9=%08x R10=%08x R11=%08x", r.iR8, r.iR9, r.iR10, r.iR11);
201 DEBUGPRINT("R12=%08x R13=%08x R14=%08x R15=%08x", r.iR12, r.iR13, r.iR14, r.iR15);
202 DEBUGPRINT("CPSR=%08x", r.iFlags);
203 DEBUGPRINT("MODE_FIQ:");
204 DEBUGPRINT(" R8=%08x R9=%08x R10=%08x R11=%08x", r.iR8Fiq, r.iR9Fiq, r.iR10Fiq, r.iR11Fiq);
205 DEBUGPRINT("R12=%08x R13=%08x R14=%08x SPSR=%08x", r.iR12Fiq, r.iR13Fiq, r.iR14Fiq, r.iSpsrFiq);
206 DEBUGPRINT("MODE_IRQ:");
207 DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Irq, r.iR14Irq, r.iSpsrIrq);
208 DEBUGPRINT("MODE_SVC:");
209 DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Svc, r.iR14Svc, r.iSpsrSvc);
210 DEBUGPRINT("MODE_ABT:");
211 DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Abt, r.iR14Abt, r.iSpsrAbt);
212 DEBUGPRINT("MODE_UND:");
213 DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Und, r.iR14Und, r.iSpsrUnd);
214 // DEBUGPRINT("MODE_MON:");
215 // DEBUGPRINT("R13=%08x R14=%08x SPSR=%08x", r.iR13Mon, r.iR14Mon, r.iSpsrMon);
217 SAuxiliaryRegs& aux = a.iA;
218 DEBUGPRINT("TEEHBR=%08x CPACR=%08x", aux.iTEEHBR, aux.iCPACR);
220 SBankedRegs& b = a.iB[0];
221 DEBUGPRINT(" SCTLR=%08x ACTLR=%08x PRRR=%08x NMRR=%08x", b.iSCTLR, b.iACTLR, b.iPRRR, b.iNMRR);
222 DEBUGPRINT(" DACR=%08x TTBR0=%08x TTBR1=%08x TTBCR=%08x", b.iDACR, b.iTTBR0, b.iTTBR1, b.iTTBCR);
223 DEBUGPRINT(" VBAR=%08x FCSEID=%08x CTXIDR=%08x", b.iVBAR, b.iFCSEIDR, b.iCTXIDR);
224 DEBUGPRINT("Thread ID RWRW=%08x RWRO=%08x RWNO=%08x", b.iRWRWTID, b.iRWROTID, b.iRWNOTID);
225 DEBUGPRINT(" DFSR=%08x DFAR=%08x IFSR=%08x IFAR=%08x", b.iDFSR, b.iDFAR, b.iIFSR, b.iIFAR);
226 DEBUGPRINT(" ADFSR=%08x AIFSR=%08x", b.iADFSR, b.iAIFSR);
228 DEBUGPRINT("FPEXC %08x", a.iMore[0]);
230 DEBUGPRINT("ExcCode %08x", a.iExcCode);
234 #define CONTEXT_ELEMENT_UNDEFINED(val) \
236 TArmContextElement::EUndefined, \
242 #define CONTEXT_ELEMENT_EXCEPTION(reg) \
244 TArmContextElement::EOffsetFromStackTop, \
245 ((sizeof(SThreadExcStack)-_FOFF(SThreadExcStack,reg))>>2), \
250 #define CONTEXT_ELEMENT_RESCHED(reg) \
252 TArmContextElement::EOffsetFromSp, \
253 (_FOFF(SThreadReschedStack,reg)>>2), \
258 #define CONTEXT_ELEMENT_RESCHED_SP() \
260 TArmContextElement::EOffsetFromSpBic3, \
261 (_FOFF(SThreadReschedStack,iSPRschdFlg)>>2), \
266 #define CONTEXT_ELEMENT_RESCHED_SP_PLUS(offset) \
268 TArmContextElement::EOffsetFromSpBic3_1, \
269 (_FOFF(SThreadReschedStack,iSPRschdFlg)>>2), \
274 #define CONTEXT_ELEMENT_RESCHED_SP_OFFSET(offset) \
276 TArmContextElement::EOffsetFromSpBic3_2, \
277 (_FOFF(SThreadReschedStack,iSPRschdFlg)>>2), \
282 #define CONTEXT_ELEMENT_RESCHED_IRQ(reg) \
284 TArmContextElement::EOffsetFromSpBic3_2, \
285 (_FOFF(SThreadReschedStack,iSPRschdFlg)>>2), \
286 ((_FOFF(SThreadIrqStack,reg)-sizeof(SThreadReschedStack))>>2), \
290 #define CONTEXT_ELEMENT_RESCHED_INIT(reg) \
292 TArmContextElement::EOffsetFromSpBic3_2, \
293 (_FOFF(SThreadReschedStack,iSPRschdFlg)>>2), \
294 ((_FOFF(SThreadInitStack,reg)-sizeof(SThreadReschedStack))>>2), \
299 const TArmContextElement ContextTableException[] =
301 CONTEXT_ELEMENT_EXCEPTION(iR0),
302 CONTEXT_ELEMENT_EXCEPTION(iR1),
303 CONTEXT_ELEMENT_EXCEPTION(iR2),
304 CONTEXT_ELEMENT_EXCEPTION(iR3),
305 CONTEXT_ELEMENT_EXCEPTION(iR4),
306 CONTEXT_ELEMENT_EXCEPTION(iR5),
307 CONTEXT_ELEMENT_EXCEPTION(iR6),
308 CONTEXT_ELEMENT_EXCEPTION(iR7),
309 CONTEXT_ELEMENT_EXCEPTION(iR8),
310 CONTEXT_ELEMENT_EXCEPTION(iR9),
311 CONTEXT_ELEMENT_EXCEPTION(iR10),
312 CONTEXT_ELEMENT_EXCEPTION(iR11),
313 CONTEXT_ELEMENT_EXCEPTION(iR12),
314 CONTEXT_ELEMENT_EXCEPTION(iR13usr),
315 CONTEXT_ELEMENT_EXCEPTION(iR14usr),
316 CONTEXT_ELEMENT_EXCEPTION(iR15),
317 CONTEXT_ELEMENT_EXCEPTION(iCPSR),
318 CONTEXT_ELEMENT_UNDEFINED(0),
321 const TArmContextElement ContextTableUndefined[] =
323 CONTEXT_ELEMENT_UNDEFINED(0),
324 CONTEXT_ELEMENT_UNDEFINED(0),
325 CONTEXT_ELEMENT_UNDEFINED(0),
326 CONTEXT_ELEMENT_UNDEFINED(0),
327 CONTEXT_ELEMENT_UNDEFINED(0),
328 CONTEXT_ELEMENT_UNDEFINED(0),
329 CONTEXT_ELEMENT_UNDEFINED(0),
330 CONTEXT_ELEMENT_UNDEFINED(0),
331 CONTEXT_ELEMENT_UNDEFINED(0),
332 CONTEXT_ELEMENT_UNDEFINED(0),
333 CONTEXT_ELEMENT_UNDEFINED(0),
334 CONTEXT_ELEMENT_UNDEFINED(0),
335 CONTEXT_ELEMENT_UNDEFINED(0),
336 CONTEXT_ELEMENT_UNDEFINED(0),
337 CONTEXT_ELEMENT_UNDEFINED(0),
338 CONTEXT_ELEMENT_UNDEFINED(0),
339 CONTEXT_ELEMENT_UNDEFINED(EUserMode),
340 CONTEXT_ELEMENT_UNDEFINED(0),
343 // Table used for non dying threads which have been preempted by an interrupt
344 // while in user mode.
345 const TArmContextElement ContextTableUserInterrupt[] =
347 CONTEXT_ELEMENT_EXCEPTION(iR0),
348 CONTEXT_ELEMENT_EXCEPTION(iR1),
349 CONTEXT_ELEMENT_EXCEPTION(iR2),
350 CONTEXT_ELEMENT_EXCEPTION(iR3),
351 CONTEXT_ELEMENT_EXCEPTION(iR4),
352 CONTEXT_ELEMENT_EXCEPTION(iR5),
353 CONTEXT_ELEMENT_EXCEPTION(iR6),
354 CONTEXT_ELEMENT_EXCEPTION(iR7),
355 CONTEXT_ELEMENT_EXCEPTION(iR8),
356 CONTEXT_ELEMENT_EXCEPTION(iR9),
357 CONTEXT_ELEMENT_EXCEPTION(iR10),
358 CONTEXT_ELEMENT_EXCEPTION(iR11),
359 CONTEXT_ELEMENT_EXCEPTION(iR12),
360 CONTEXT_ELEMENT_EXCEPTION(iR13usr),
361 CONTEXT_ELEMENT_EXCEPTION(iR14usr),
362 CONTEXT_ELEMENT_EXCEPTION(iR15),
363 CONTEXT_ELEMENT_EXCEPTION(iCPSR),
364 CONTEXT_ELEMENT_UNDEFINED(0),
367 // Table used for threads which have been preempted by an interrupt while in
368 // supervisor mode in the SWI handler either before the return address was
369 // saved or after the registers were restored.
370 const TArmContextElement ContextTableSvsrInterrupt1[] =
372 CONTEXT_ELEMENT_EXCEPTION(iR0),
373 CONTEXT_ELEMENT_EXCEPTION(iR1),
374 CONTEXT_ELEMENT_EXCEPTION(iR2),
375 CONTEXT_ELEMENT_EXCEPTION(iR3),
376 CONTEXT_ELEMENT_EXCEPTION(iR4),
377 CONTEXT_ELEMENT_EXCEPTION(iR5),
378 CONTEXT_ELEMENT_EXCEPTION(iR6),
379 CONTEXT_ELEMENT_EXCEPTION(iR7),
380 CONTEXT_ELEMENT_EXCEPTION(iR8),
381 CONTEXT_ELEMENT_EXCEPTION(iR9),
382 CONTEXT_ELEMENT_EXCEPTION(iR10),
383 CONTEXT_ELEMENT_EXCEPTION(iR11),
384 CONTEXT_ELEMENT_EXCEPTION(iR12),
385 CONTEXT_ELEMENT_EXCEPTION(iR13usr),
386 CONTEXT_ELEMENT_EXCEPTION(iR14usr),
387 CONTEXT_ELEMENT_EXCEPTION(iR15),
388 CONTEXT_ELEMENT_UNDEFINED(EUserMode), // can't get flags so just use 'user mode'
389 CONTEXT_ELEMENT_UNDEFINED(0),
392 // Table used for non-dying threads blocked on their request semaphore.
393 const TArmContextElement ContextTableWFAR[] =
395 CONTEXT_ELEMENT_EXCEPTION(iR0),
396 CONTEXT_ELEMENT_EXCEPTION(iR1),
397 CONTEXT_ELEMENT_EXCEPTION(iR2),
398 CONTEXT_ELEMENT_EXCEPTION(iR3),
399 CONTEXT_ELEMENT_EXCEPTION(iR4),
400 CONTEXT_ELEMENT_EXCEPTION(iR5),
401 CONTEXT_ELEMENT_EXCEPTION(iR6),
402 CONTEXT_ELEMENT_EXCEPTION(iR7),
403 CONTEXT_ELEMENT_EXCEPTION(iR8),
404 CONTEXT_ELEMENT_EXCEPTION(iR9),
405 CONTEXT_ELEMENT_EXCEPTION(iR10),
406 CONTEXT_ELEMENT_EXCEPTION(iR11),
407 CONTEXT_ELEMENT_EXCEPTION(iR12),
408 CONTEXT_ELEMENT_EXCEPTION(iR13usr),
409 CONTEXT_ELEMENT_EXCEPTION(iR14usr),
410 CONTEXT_ELEMENT_EXCEPTION(iR15),
411 CONTEXT_ELEMENT_EXCEPTION(iCPSR),
412 CONTEXT_ELEMENT_UNDEFINED(0),
415 const TArmContextElement ContextTableExec[] =
417 CONTEXT_ELEMENT_EXCEPTION(iR0),
418 CONTEXT_ELEMENT_EXCEPTION(iR1),
419 CONTEXT_ELEMENT_EXCEPTION(iR2),
420 CONTEXT_ELEMENT_EXCEPTION(iR3),
421 CONTEXT_ELEMENT_EXCEPTION(iR4),
422 CONTEXT_ELEMENT_EXCEPTION(iR5),
423 CONTEXT_ELEMENT_EXCEPTION(iR6),
424 CONTEXT_ELEMENT_EXCEPTION(iR7),
425 CONTEXT_ELEMENT_EXCEPTION(iR8),
426 CONTEXT_ELEMENT_EXCEPTION(iR9),
427 CONTEXT_ELEMENT_EXCEPTION(iR10),
428 CONTEXT_ELEMENT_EXCEPTION(iR11),
429 CONTEXT_ELEMENT_EXCEPTION(iR12),
430 CONTEXT_ELEMENT_EXCEPTION(iR13usr),
431 CONTEXT_ELEMENT_EXCEPTION(iR14usr),
432 CONTEXT_ELEMENT_EXCEPTION(iR15),
433 CONTEXT_ELEMENT_EXCEPTION(iCPSR),
434 CONTEXT_ELEMENT_UNDEFINED(0),
437 // Table used to retrieve a thread's kernel side context at the point where
438 // Reschedule() returns.
439 // Used for kernel threads.
440 const TArmContextElement ContextTableKernel[] =
442 CONTEXT_ELEMENT_UNDEFINED(0),
443 CONTEXT_ELEMENT_UNDEFINED(0),
444 CONTEXT_ELEMENT_UNDEFINED(0),
445 CONTEXT_ELEMENT_UNDEFINED(0),
446 CONTEXT_ELEMENT_UNDEFINED(0),
447 CONTEXT_ELEMENT_UNDEFINED(0),
448 CONTEXT_ELEMENT_UNDEFINED(0),
449 CONTEXT_ELEMENT_UNDEFINED(0),
450 CONTEXT_ELEMENT_UNDEFINED(0),
451 CONTEXT_ELEMENT_UNDEFINED(0),
452 CONTEXT_ELEMENT_UNDEFINED(0),
453 CONTEXT_ELEMENT_UNDEFINED(0),
454 CONTEXT_ELEMENT_UNDEFINED(0),
455 CONTEXT_ELEMENT_RESCHED_SP(), // supervisor stack pointer before reschedule
456 CONTEXT_ELEMENT_UNDEFINED(0), // supervisor lr is unknown
457 CONTEXT_ELEMENT_RESCHED(iR15), // return address from reschedule
458 CONTEXT_ELEMENT_UNDEFINED(ESvcMode), // can't get flags so just use 'user mode'
459 CONTEXT_ELEMENT_UNDEFINED(0),
462 // Table used to retrieve a thread's kernel side context at the point where
463 // NKern::Unlock() or NKern::PreemptionPoint() returns.
464 // Used for kernel threads.
465 const TArmContextElement ContextTableKernel1[] =
467 CONTEXT_ELEMENT_UNDEFINED(0),
468 CONTEXT_ELEMENT_UNDEFINED(0),
469 CONTEXT_ELEMENT_UNDEFINED(0),
470 CONTEXT_ELEMENT_UNDEFINED(0),
471 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(4),
472 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(8),
473 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(12),
474 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(16),
475 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(20),
476 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(24),
477 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(28),
478 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(32),
479 CONTEXT_ELEMENT_UNDEFINED(0),
480 CONTEXT_ELEMENT_RESCHED_SP_PLUS(40),
481 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(36),
482 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(36),
483 CONTEXT_ELEMENT_UNDEFINED(ESvcMode),
484 CONTEXT_ELEMENT_UNDEFINED(0),
487 // Table used to retrieve a thread's kernel side context at the point where
488 // NKern::FSWait() or NKern::WaitForAnyRequest() returns.
489 // Used for kernel threads.
490 const TArmContextElement ContextTableKernel2[] =
492 CONTEXT_ELEMENT_UNDEFINED(0),
493 CONTEXT_ELEMENT_UNDEFINED(0),
494 CONTEXT_ELEMENT_UNDEFINED(0),
495 CONTEXT_ELEMENT_UNDEFINED(0),
496 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(4),
497 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(8),
498 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(12),
499 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(16),
500 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(20),
501 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(24),
502 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(28),
503 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(32),
504 CONTEXT_ELEMENT_UNDEFINED(0),
505 CONTEXT_ELEMENT_RESCHED_SP_PLUS(40),
506 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(36),
507 CONTEXT_ELEMENT_RESCHED_SP_OFFSET(36),
508 CONTEXT_ELEMENT_UNDEFINED(ESvcMode),
509 CONTEXT_ELEMENT_UNDEFINED(0),
512 // Table used to retrieve a thread's kernel side context at the point where
513 // an interrupt taken in supervisor mode returns.
514 // Used for kernel threads.
515 const TArmContextElement ContextTableKernel3[] =
517 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR0),
518 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR1),
519 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR2),
520 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR3),
521 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR4),
522 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR5),
523 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR6),
524 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR7),
525 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR8),
526 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR9),
527 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR10),
528 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR11),
529 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR12),
530 CONTEXT_ELEMENT_RESCHED_SP_PLUS((sizeof(SThreadExcStack)+8)),
531 CONTEXT_ELEMENT_RESCHED_IRQ(iR14svc),
532 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iR15),
533 CONTEXT_ELEMENT_RESCHED_IRQ(iX.iCPSR),
534 CONTEXT_ELEMENT_UNDEFINED(0),
537 // Table used to retrieve a thread's kernel side context at the point where
538 // Exec::WaitForAnyRequest() returns.
539 // Used for kernel threads.
540 const TArmContextElement ContextTableKernel4[] =
542 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR0),
543 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR1),
544 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR2),
545 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR3),
546 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR4),
547 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR5),
548 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR6),
549 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR7),
550 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR8),
551 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR9),
552 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR10),
553 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR11),
554 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR12),
555 CONTEXT_ELEMENT_RESCHED_SP_PLUS(sizeof(SThreadExcStack)),
556 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR15),
557 CONTEXT_ELEMENT_RESCHED_INIT(iX.iR15),
558 CONTEXT_ELEMENT_RESCHED_INIT(iX.iCPSR),
559 CONTEXT_ELEMENT_UNDEFINED(0),
562 const TArmContextElement* const ThreadUserContextTables[] =
564 ContextTableUndefined, // EContextNone
565 ContextTableException, // EContextException
566 ContextTableUndefined, // EContextUndefined
567 ContextTableUserInterrupt, // EContextUserInterrupt
568 ContextTableUndefined, // EContextUserInterruptDied (not used)
569 ContextTableSvsrInterrupt1, // EContextSvsrInterrupt1
570 ContextTableUndefined, // EContextSvsrInterrupt1Died (not used)
571 ContextTableUndefined, // EContextSvsrInterrupt2 (not used)
572 ContextTableUndefined, // EContextSvsrInterrupt2Died (not used)
573 ContextTableWFAR, // EContextWFAR
574 ContextTableUndefined, // EContextWFARDied (not used)
575 ContextTableExec, // EContextExec
576 ContextTableKernel, // EContextKernel
577 ContextTableKernel1, // EContextKernel1
578 ContextTableKernel2, // EContextKernel2
579 ContextTableKernel3, // EContextKernel3
580 ContextTableKernel4, // EContextKernel4
584 /** Return table of pointers to user context tables.
586 Each user context table is an array of TArmContextElement objects, one per
587 ARM CPU register, in the order defined in TArmRegisters.
589 The master table contains pointers to the user context tables in the order
590 defined in TUserContextType. There are as many user context tables as
591 scenarii leading a user thread to switch to privileged mode.
593 Stop-mode debug agents should use this function to store the address of the
594 master table at a location known to the host debugger. Run-mode debug
595 agents are advised to use NKern::GetUserContext() and
596 NKern::SetUserContext() instead.
598 @return A pointer to the master table. The master table is NULL
599 terminated. The master and user context tables are guaranteed to remain at
600 the same location for the lifetime of the OS execution so it is safe the
601 cache the returned address.
604 @see TArmContextElement
606 @see TUserContextType
607 @see NKern::SetUserContext
608 @see NKern::GetUserContext
612 EXPORT_C const TArmContextElement* const* NThread::UserContextTables()
614 return &ThreadUserContextTables[0];
618 /** Get a value which indicates where a thread's user mode context is stored.
620 @return A value that can be used as an index into the tables returned by
621 NThread::UserContextTables().
627 @see UserContextTables
630 EXPORT_C NThread::TUserContextType NThread::UserContextType()
632 CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED,"NThread::UserContextType");
635 The SMP nanokernel always saves R0-R12,R13usr,R14usr,ExcCode,PC,CPSR on any
636 entry to the kernel, so getting the user context is always the same.
637 The only possible problem is an FIQ occurring immediately after any other
638 exception, before the registers have been saved. In this case the registers
639 saved by the FIQ will be the ones observed and they will be correct except
640 that the CPSR value will indicate a mode other than USR, which can be used
641 to detect the condition.
643 return EContextException;
647 // Enter and return with kernel locked
648 void NThread::GetUserContext(TArmRegSet& aContext, TUint32& aAvailRegistersMask)
650 NThread* pC = NCurrentThreadL();
651 TSubScheduler* ss = 0;
655 if (iWaitState.ThreadIsDead())
658 aAvailRegistersMask = 0;
661 if (iReady && iParent->iReady)
663 ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
664 ss->iReadyListLock.LockOnly();
668 // thread is actually running on another CPU
669 // interrupt that CPU and wait for it to enter interrupt mode
670 // this allows a snapshot of the thread user state to be observed
671 // and ensures the thread cannot return to user mode
672 send_resched_ipi_and_wait(iLastCpu);
675 SThreadExcStack* txs = (SThreadExcStack*)(TLinAddr(iStackBase) + TLinAddr(iStackSize));
677 if (txs->iExcCode <= SThreadExcStack::EInit) // if not, thread never entered user mode
679 aContext.iR0 = txs->iR0;
680 aContext.iR1 = txs->iR1;
681 aContext.iR2 = txs->iR2;
682 aContext.iR3 = txs->iR3;
683 aContext.iR4 = txs->iR4;
684 aContext.iR5 = txs->iR5;
685 aContext.iR6 = txs->iR6;
686 aContext.iR7 = txs->iR7;
687 aContext.iR8 = txs->iR8;
688 aContext.iR9 = txs->iR9;
689 aContext.iR10 = txs->iR10;
690 aContext.iR11 = txs->iR11;
691 aContext.iR12 = txs->iR12;
692 aContext.iR13 = txs->iR13usr;
693 aContext.iR14 = txs->iR14usr;
694 aContext.iR15 = txs->iR15;
695 aContext.iFlags = txs->iCPSR;
696 if ((aContext.iFlags & 0x1f) == 0x10)
697 aAvailRegistersMask = 0x1ffffu; // R0-R15,CPSR all valid
700 aContext.iFlags = 0x10; // account for FIQ in SVC case
701 aAvailRegistersMask = 0x0ffffu; // CPSR not valid
707 ss->iReadyListLock.UnlockOnly();
712 class TGetContextIPI : public TGenericIPI
715 void Get(TInt aCpu, TArmRegSet& aContext, TUint32& aAvailRegistersMask);
716 static void Isr(TGenericIPI*);
718 TArmRegSet* iContext;
719 TUint32* iAvailRegsMask;
722 extern "C" TLinAddr get_sp_svc();
723 extern "C" TLinAddr get_lr_svc();
724 extern "C" TInt get_kernel_context_type(TLinAddr /*aReschedReturn*/);
726 void TGetContextIPI::Isr(TGenericIPI* aPtr)
728 TGetContextIPI& ipi = *(TGetContextIPI*)aPtr;
729 TArmRegSet& a = *ipi.iContext;
730 SThreadExcStack* txs = (SThreadExcStack*)get_sp_svc();
744 a.iR13 = TUint32(txs) + sizeof(SThreadExcStack);
745 a.iR14 = get_lr_svc();
747 a.iFlags = txs->iCPSR;
748 *ipi.iAvailRegsMask = 0x1ffffu;
751 void TGetContextIPI::Get(TInt aCpu, TArmRegSet& aContext, TUint32& aAvailRegsMask)
753 iContext = &aContext;
754 iAvailRegsMask = &aAvailRegsMask;
755 Queue(&Isr, 1u<<aCpu);
759 void GetRegs(TArmRegSet& aContext, TLinAddr aStart, TUint32 aMask)
761 TUint32* d = (TUint32*)&aContext;
762 const TUint32* s = (const TUint32*)aStart;
763 for (; aMask; aMask>>=1, ++d)
770 // Enter and return with kernel locked
771 void NThread::GetSystemContext(TArmRegSet& aContext, TUint32& aAvailRegsMask)
774 NThread* pC = NCurrentThreadL();
775 __NK_ASSERT_ALWAYS(pC!=this);
776 TSubScheduler* ss = 0;
778 if (iWaitState.ThreadIsDead())
783 if (iReady && iParent->iReady)
785 ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
786 ss->iReadyListLock.LockOnly();
790 // thread is actually running on another CPU
791 // use an interprocessor interrupt to get a snapshot of the state
793 ipi.Get(iLastCpu, aContext, aAvailRegsMask);
797 // thread is not running and can't start
798 SThreadReschedStack* trs = (SThreadReschedStack*)iSavedSP;
799 TInt kct = get_kernel_context_type(trs->iR15);
800 __NK_ASSERT_ALWAYS(kct>=0); // couldn't match return address from reschedule
801 TLinAddr sp = trs->iSPRschdFlg &~ 3;
804 case 0: // thread not yet started
805 case 5: // Exec::WaitForAnyRequest()
806 GetRegs(aContext, sp, 0x01fffu);
807 aContext.iR13 = sp + sizeof(SThreadExcStack);
808 GetRegs(aContext, sp+64, 0x18000u);
809 aAvailRegsMask =0x1bfffu;
812 case 2: // preemption point
813 case 3: // NKern::WaitForAnyRequest() or NKern::FSWait()
814 GetRegs(aContext, sp+4, 0x08ff0u);
815 aContext.iR14 = aContext.iR15;
816 aContext.iR13 = sp+40;
817 aAvailRegsMask =0x0eff0u;
820 GetRegs(aContext, sp+4, 0x04000u);
821 GetRegs(aContext, sp+8, 0x01fffu);
822 GetRegs(aContext, sp+64, 0x18000u);
823 aContext.iR13 = sp + sizeof(SThreadExcStack) + 8;
824 aAvailRegsMask =0x1ffffu;
827 __NK_ASSERT_ALWAYS(0);
831 ss->iReadyListLock.UnlockOnly();
835 // Enter and return with kernel locked
836 void NThread::SetUserContext(const TArmRegSet& aContext, TUint32& aRegMask)
838 NThread* pC = NCurrentThreadL();
839 TSubScheduler* ss = 0;
843 if (iWaitState.ThreadIsDead())
849 if (iReady && iParent->iReady)
851 ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
852 ss->iReadyListLock.LockOnly();
856 // thread is actually running on another CPU
857 // interrupt that CPU and wait for it to enter interrupt mode
858 // this allows a snapshot of the thread user state to be observed
859 // and ensures the thread cannot return to user mode
860 send_resched_ipi_and_wait(iLastCpu);
863 SThreadExcStack* txs = (SThreadExcStack*)(TLinAddr(iStackBase) + TLinAddr(iStackSize));
865 aRegMask &= 0x1ffffu;
866 if (txs->iExcCode <= SThreadExcStack::EInit) // if not, thread never entered user mode
868 if (aRegMask & 0x0001u)
869 txs->iR0 = aContext.iR0;
870 if (aRegMask & 0x0002u)
871 txs->iR1 = aContext.iR1;
872 if (aRegMask & 0x0004u)
873 txs->iR2 = aContext.iR2;
874 if (aRegMask & 0x0008u)
875 txs->iR3 = aContext.iR3;
876 if (aRegMask & 0x0010u)
877 txs->iR4 = aContext.iR4;
878 if (aRegMask & 0x0020u)
879 txs->iR5 = aContext.iR5;
880 if (aRegMask & 0x0040u)
881 txs->iR6 = aContext.iR6;
882 if (aRegMask & 0x0080u)
883 txs->iR7 = aContext.iR7;
884 if (aRegMask & 0x0100u)
885 txs->iR8 = aContext.iR8;
886 if (aRegMask & 0x0200u)
887 txs->iR9 = aContext.iR9;
888 if (aRegMask & 0x0400u)
889 txs->iR10 = aContext.iR10;
890 if (aRegMask & 0x0800u)
891 txs->iR11 = aContext.iR11;
892 if (aRegMask & 0x1000u)
893 txs->iR12 = aContext.iR12;
894 if (aRegMask & 0x2000u)
895 txs->iR13usr = aContext.iR13;
896 if (aRegMask & 0x4000u)
897 txs->iR14usr = aContext.iR14;
898 if (aRegMask & 0x8000u)
899 txs->iR15 = aContext.iR15;
900 // Assert that target thread is in USR mode, and update only the flags part of the PSR
901 __NK_ASSERT_ALWAYS((txs->iCPSR & 0x1f) == 0x10);
902 if (aRegMask & 0x10000u)
904 // NZCVQ.......GE3-0................
905 const TUint32 writableFlags = 0xF80F0000;
906 txs->iCPSR &= ~writableFlags;
907 txs->iCPSR |= aContext.iFlags & writableFlags;
915 ss->iReadyListLock.UnlockOnly();
920 /** Get (subset of) user context of specified thread.
922 The nanokernel does not systematically save all registers in the supervisor
923 stack on entry into privileged mode and the exact subset depends on why the
924 switch to privileged mode occured. So in general only a subset of the
925 register set is available.
927 @param aThread Thread to inspect. It can be the current thread or a
930 @param aContext Pointer to TArmRegSet structure where the context is
933 @param aAvailRegistersMask Bit mask telling which subset of the context is
934 available and has been copied to aContext (1: register available / 0: not
935 available). Bit 0 stands for register R0.
938 @see ThreadSetUserContext
940 @pre Call in a thread context.
941 @pre Interrupts must be enabled.
943 EXPORT_C void NKern::ThreadGetUserContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask)
945 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetUserContext");
946 TArmRegSet& a = *(TArmRegSet*)aContext;
947 memclr(aContext, sizeof(TArmRegSet));
949 aThread->GetUserContext(a, aAvailRegistersMask);
953 /** Get (subset of) system context of specified thread.
955 @param aThread Thread to inspect. It can be the current thread or a
958 @param aContext Pointer to TArmRegSet structure where the context is
961 @param aAvailRegistersMask Bit mask telling which subset of the context is
962 available and has been copied to aContext (1: register available / 0: not
963 available). Bit 0 stands for register R0.
966 @see ThreadSetUserContext
968 @pre Call in a thread context.
969 @pre Interrupts must be enabled.
971 EXPORT_C void NKern::ThreadGetSystemContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask)
973 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadGetSystemContext");
974 TArmRegSet& a = *(TArmRegSet*)aContext;
975 memclr(aContext, sizeof(TArmRegSet));
977 aThread->GetSystemContext(a, aAvailRegistersMask);
981 /** Set (subset of) user context of specified thread.
983 @param aThread Thread to modify. It can be the current thread or a
986 @param aContext Pointer to TArmRegSet structure containing the context
987 to set. The values of registers which aren't part of the context saved
988 on the supervisor stack are ignored.
991 @see ThreadGetUserContext
993 @pre Call in a thread context.
994 @pre Interrupts must be enabled.
996 EXPORT_C void NKern::ThreadSetUserContext(NThread* aThread, TAny* aContext)
998 CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"NKern::ThreadSetUserContext");
999 TArmRegSet& a = *(TArmRegSet*)aContext;
1000 TUint32 mask = 0x1ffffu;
1002 aThread->SetUserContext(a, mask);
1007 #ifdef __CPU_HAS_VFP
1008 extern void VfpContextSave(void*);
1010 /** Complete the saving of a thread's context
1012 This saves the VFP/NEON registers if necessary once we know that we are definitely
1017 void NThread::CompleteContextSave()
1019 #ifdef __CPU_HAS_VFP
1020 if (Arm::VfpThread[NKern::CurrentCpu()] == this)
1022 VfpContextSave(iExtraContext); // Disables VFP
1028 extern "C" TInt HandleSpecialOpcode(TArmExcInfo* aContext, TInt aType)
1030 TUint32 cpsr = aContext->iCpsr;
1031 TUint32 mode = cpsr & 0x1f;
1032 TUint32 opcode = aContext->iFaultStatus;
1034 // Coprocessor abort from CP15 or E7FFDEFF -> crash immediately
1035 if ( (aType==15 && opcode!=0xee000f20)
1036 || (aType==32 && opcode==0xe7ffdeff)
1037 || (aType==33 && opcode==0xdeff)
1041 ExcFault(aContext); // crash instruction in privileged mode
1042 return 0; // crash instruction in user mode - handle normally
1044 if ( (aType==15 && opcode==0xee000f20)
1045 || (aType==32 && opcode==0xe7ffdefc)
1046 || (aType==33 && opcode==0xdefc)
1050 __KTRACE_OPT(KPANIC,DumpExcInfo(*aContext));
1052 aContext->iR15 += 4;
1054 aContext->iR15 += 2;
1060 /** Return the total CPU time so far used by the specified thread.
1062 @return The total CPU time in units of 1/NKern::CpuTimeMeasFreq().
1064 EXPORT_C TUint64 NKern::ThreadCpuTime(NThread* aThread)
1066 TSubScheduler* ss = 0;
1068 aThread->AcqSLock();
1069 if (aThread->i_NThread_Initial)
1070 ss = &TheSubSchedulers[aThread->iLastCpu];
1071 else if (aThread->iReady && aThread->iParent->iReady)
1072 ss = &TheSubSchedulers[aThread->iParent->iReady & NSchedulable::EReadyCpuMask];
1074 ss->iReadyListLock.LockOnly();
1075 TUint64 t = aThread->iTotalCpuTime64;
1076 if (aThread->iCurrent || (aThread->i_NThread_Initial && !ss->iCurrentThread))
1077 t += (NKern::Timestamp() - ss->iLastTimestamp64);
1079 ss->iReadyListLock.UnlockOnly();
1080 aThread->RelSLock();
1085 TInt NKern::QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback)
1087 __e32_memory_barrier();
1088 if (aCallback->iNext != KUserModeCallbackUnqueued)
1090 TInt result = KErrDied;
1092 TUserModeCallback* listHead = aThread->iUserModeCallbacks;
1094 if (TLinAddr(listHead) & 3)
1095 goto done; // thread exiting
1096 aCallback->iNext = listHead;
1097 } while (!__e32_atomic_cas_ord_ptr(&aThread->iUserModeCallbacks, &listHead, aCallback));
1100 if (!listHead) // if this isn't first callback someone else will have done this bit
1103 * If aThread is currently running on another CPU we need to send an IPI so
1104 * that it will enter kernel mode and run the callback.
1105 * The synchronization is tricky here. We want to check if the thread is
1106 * running and if so on which core. We need to avoid any possibility of
1107 * the thread entering user mode without having seen the callback,
1108 * either because we thought it wasn't running so didn't send an IPI or
1109 * because the thread migrated after we looked and we sent the IPI to
1110 * the wrong processor. Sending a redundant IPI is not a problem (e.g.
1111 * because the thread is running in kernel mode - which we can't tell -
1112 * or because the thread stopped running after we looked)
1113 * The following events are significant:
1114 * Event A: Target thread writes to iCurrent when it starts running
1115 * Event B: Target thread reads iUserModeCallbacks before entering user
1117 * Event C: This thread writes to iUserModeCallbacks
1118 * Event D: This thread reads iCurrent to check if aThread is running
1119 * There is a DMB and DSB between A and B since A occurs with the ready
1120 * list lock for the CPU involved or the thread lock for aThread held
1121 * and this lock is released before B occurs.
1122 * There is a DMB between C and D (part of __e32_atomic_cas_ord_ptr).
1123 * Any observer which observes B must also have observed A.
1124 * Any observer which observes D must also have observed C.
1125 * If aThread observes B before C (i.e. enters user mode without running
1126 * the callback) it must observe A before C and so it must also observe
1127 * A before D (i.e. D reads the correct value for iCurrent).
1129 TInt current = aThread->iCurrent;
1132 TInt cpu = current & NSchedulable::EReadyCpuMask;
1133 if (cpu != NKern::CurrentCpu())
1134 send_resched_ipi(cpu);