Update contrib.
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\nkernsmp\arm\ncsched.cia
18 // NThreadBase member data
19 #define __INCLUDE_NTHREADBASE_DEFINES__
22 #define __INCLUDE_TDFC_DEFINES__
30 //#include <highrestimer.h>
31 //#include "emievents.h"
34 #define ASM_KILL_LINK(rp,rs) asm("mov "#rs", #0xdf ");\
35 asm("orr "#rs", "#rs", "#rs", lsl #8 ");\
36 asm("orr "#rs", "#rs", "#rs", lsl #16 ");\
37 asm("str "#rs", ["#rp"] ");\
38 asm("str "#rs", ["#rp", #4] ");
40 #define ASM_KILL_LINK(rp,rs)
43 #define ALIGN_STACK_START \
46 asm("subeq sp, sp, #4"); \
47 asm("str r12, [sp,#-4]!")
49 #define ALIGN_STACK_END \
53 //#define __DEBUG_BAD_ADDR
55 extern "C" void NewThreadTrace(NThread* a);
56 extern "C" void send_accumulated_resched_ipis();
59 __NAKED__ void TScheduler::Reschedule()
62 // Enter in mode_svc with kernel locked, interrupts can be on or off
63 // Exit in mode_svc with kernel unlocked, interrupts off
64 // NOTE: STACK ALIGNMENT UNKNOWN ON ENTRY
65 // NOTE: R4-R11 are modified
67 asm("mov r2, sp "); // bit 0 will be reschedule flag
68 asm("bic sp, sp, #4 "); // align stack
69 GET_RWNO_TID(,r0) // r0->TSubScheduler
70 asm("stmfd sp!, {r2,lr} "); // save original SP/resched flag, return address
71 __ASM_CLI(); // interrupts off
72 asm("ldr r1, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iDfcPendingFlag)&~3)); // check iDfcPendingFlag and iExIDfcPendingFlag
73 asm("mov r11, r0 "); // r11->TSubScheduler
74 asm("ldr r10, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr)); // r10->CPU local timer
76 asm("start_resched: ");
77 asm("movs r1, r1, lsr #16 "); // check if IDFCs or ExIDFCs pending
79 asm("blne " CSM_ZN13TSubScheduler9QueueDfcsEv); // queue any pending DFCs
80 asm("ldrb r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
81 asm("ldr r3, [sp, #0] ");
82 asm("mrs r2, spsr "); // r2=spsr_svc
83 asm("cmp r1, #0 "); // check if a reschedule is required
84 asm("beq no_resched_needed "); // branch out if not
85 __ASM_STI(); // interrupts back on
86 asm("orr r3, r3, #1 ");
87 asm("str r3, [sp, #0] "); // set resched flag
88 asm("stmfd sp!, {r0,r2} "); // store SPSR_SVC
89 asm("ldr r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
91 asm("mrc p14, 6, r7, c1, c0, 0 "); // r7 = TEEHBR
95 GET_RWRO_TID(,r8); // r8 = User RO Thread ID
96 GET_RWRW_TID(,r9); // r9 = User RW Thread ID
98 VFP_FMRX(,0,VFP_XREG_FPEXC); // r0 = FPEXC
99 asm("bic r0, r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); // Store FPEXC with VFP disabled in case this thread runs on a different core next time
103 GET_CAR(, r1); // r1 = CAR
104 asm("mrc p15, 0, r12, c3, c0, 0 "); // r12 = DACR
105 asm("ldr r4, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
107 // Save auxiliary registers
108 // R0=FPEXC, R1=CAR, R7=TEEHBR, R8=RWROTID, R9=RWRWTID, R12=DACR
109 asm("sub sp, sp, #%a0" : : "i" _FOFF(SThreadReschedStack,iSpare));
110 asm("str sp, [r5, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // store original thread's stack pointer
111 asm("stmia sp, {r0-r1,r7-r9,r12} ");
113 // We must move to a temporary stack before selecting the next thread.
114 // This is because another CPU may begin executing this thread before the
115 // select_next_thread() function returns and our stack would then be
116 // corrupted. We use the stack belonging to this CPU's initial thread since
117 // we are guaranteed that will never run on another CPU.
118 asm("ldr sp, [r4, #%a0]" : : "i" _FOFF(NThread,iSavedSP));
120 asm("select_thread: ");
122 asm("bl " CSM_ZN13TSubScheduler16SelectNextThreadEv ); // also sets r0->iCurrentThread
123 #ifdef BTRACE_CPU_USAGE
124 asm("ldr r2, __BTraceFilter ");
126 asm("movs r3, r0 "); // r3 = new thread (might be 0)
127 asm("ldrne sp, [r0, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); // if a thread has been selected, move to its stack
128 asm("beq no_thread "); // branch out if no thread is ready
130 #ifdef BTRACE_CPU_USAGE
131 asm("ldrb r1, [r2, #4] "); // check category 4 trace
134 asm("stmfd sp!, {r0-r3} ");
135 asm("bl NewThreadTrace ");
136 asm("ldmfd sp!, {r0-r3} ");
138 #endif // BTRACE_CPU_USAGE
140 asm("cmp r3, r5 "); // same thread?
141 asm("beq same_thread ");
142 asm("ldrb r1, [r3, #%a0]" : : "i" _FOFF(NThreadBase, i_ThrdAttr));
143 asm("ldr r4, [r11, #%a0]" : : "i" _FOFF(TSubScheduler, iScheduler));
144 asm("mov r2, r3, lsr #6 "); // r2=current thread>>6
145 asm("tst r1, #%a0" : : "i" ((TInt)KThreadAttAddressSpace)); // address space required?
146 asm("ldrne r4, [r4, #%a0]" : : "i" _FOFF(TScheduler,iProcessHandler)); // if so, get pointer to process handler
148 // we are doing a thread switch so restore new thread's auxiliary registers
149 // R0=FPEXC, R1=CAR, R7=TEEHBR, R8=RWROTID, R9=RWRWTID, R12=DACR
150 asm("ldmia sp, {r0-r1,r7-r9,r12} ");
153 asm("mcr p14, 6, r7, c1, c0, 0 "); // r7 = TEEHBR
155 SET_RWRO_TID(,r8); // r8 = User RO Thread ID
156 SET_RWRW_TID(,r9); // r9 = User RW Thread ID
158 VFP_FMXR(,VFP_XREG_FPEXC,0); // r0 = FPEXC
160 SET_CAR(, r1); // r1 = CAR
161 asm("mcr p15, 0, r12, c3, c0, 0 "); // r12 = DACR
163 asm("beq no_as_switch "); // skip if address space change not required
165 // Do address space switching
166 // Handler called with:
167 // r11->subscheduler, r3->current thread
168 // r9->new address space, r5->old address space
169 // Must preserve r10,r11,r3, can modify other registers
170 asm("ldr r5, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iAddressSpace)); // get current address space ptr
171 asm("ldr r9, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iAddressSpace)); // get new address space ptr
172 asm("adr lr, as_switch_return ");
175 asm("no_as_switch: ");
176 asm("mrc p15, 0, r4, c13, c0, 1 "); // r4 = CONTEXTID (threadID:ASID)
177 asm("and r4, r4, #0xff "); // isolate ASID
178 asm("orr r2, r4, r2, lsl #8 "); // r2 = new ContextID (new thread ID : ASID)
179 __DATA_SYNC_BARRIER_Z__(r12); // needed before change to ContextID
180 asm("mcr p15, 0, r2, c13, c0, 1 "); // set ContextID (ASID + debugging thread ID)
181 __INST_SYNC_BARRIER__(r12);
182 #ifdef __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE
183 asm("mcr p15, 0, r12, c7, c5, 6 "); // flush BTAC
186 asm("as_switch_return: ");
187 asm("same_thread: ");
188 asm("add sp, sp, #%a0" : : "i" _FOFF(SThreadReschedStack,iSpare)); // step past auxiliary registers
189 asm("ldmib sp!, {r2,r12} "); // r2=SPSR_SVC, r12=original SP + resched flag
190 __ASM_CLI(); // interrupts off
191 asm("ldr r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
192 asm("msr spsr, r2 "); // restore spsr_svc
194 asm("mov r2, r12 "); // r2 = original SP + reschedule flag
195 asm("cmp r1, #0 "); // check for more IDFCs and/or another reschedule
196 asm("bne start_resched "); // loop if required
197 asm("ldr r14, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iCsFunction));
198 asm("ldr r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
199 asm("str r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
200 asm("cmp r14, #%a0" : : "i" ((TInt)NThreadBase::ECSDivertPending));
201 asm("ldr lr, [sp, #4] "); // restore R10, R11, return address
202 asm("bic sp, r2, #3 "); // restore initial unaligned stack pointer
203 asm("and r2, r2, #1 "); // r2 = reschedule flag
204 asm("beq resched_thread_divert ");
206 // Return with: R0=&SubScheduler, R1=0, R2=TRUE if reschedule occurred, R3=iCurrentThread
210 asm("no_resched_needed: ");
211 asm("ldr r3, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
212 asm("ldr r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
214 asm("str r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
215 asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(NThreadBase,iCsFunction));
216 asm("cmp r2, #%a0" : : "i" ((TInt)NThreadBase::ECSDivertPending));
217 asm("ldmfd sp, {r2,lr} "); // r2 = original SP + reschedule flag, restore lr
218 asm("bic sp, r2, #3 "); // restore initial unaligned stack pointer
219 asm("and r2, r2, #1 "); // r2 = reschedule flag
220 asm("beq resched_thread_divert ");
222 // Return with: R0=&SubScheduler, R1=0, R2=TRUE if reschedule occurred, R3=iCurrentThread
226 asm("resched_thread_divert: ");
228 asm("str r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
229 asm("bic sp, sp, #4 "); // align stack
230 asm("stmfd sp!, {r0-r5,r12,lr} "); // save registers for diagnostic purposes
231 asm("mov r4, r3 "); // don't really need to bother about registers since thread is exiting
233 // need to send any outstanding reschedule IPIs
235 asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
238 asm("ldrb r1, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iFastMutexDefer));
244 asm("strb r2, [r4, #%a0]" : : "i" _FOFF(NThreadBase,iFastMutexDefer));
246 asm("bl " CSM_ZN11NThreadBase4ExitEv );
247 __ASM_CRASH(); // shouldn't get here
249 // There is no thread ready to run
250 // R11->TSubScheduler, R1=unknown, R2=0, R3=__BTraceFilter, R12=unknown
253 asm("ldr r12, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
256 asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
258 __DATA_SYNC_BARRIER_Z__(r1);
261 asm("ldr r1, [r11, #%a0]" : : "i" (_FOFF(TSubScheduler,iDfcPendingFlag)&~3)); // check iDfcPendingFlag and iExIDfcPendingFlag
263 asm("movs r1, r1, lsr #16 ");
264 asm("beq no_thread ");
265 asm("bl " CSM_ZN13TSubScheduler9QueueDfcsEv); // queue any pending DFCs
266 asm("ldrb r1, [r11, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
267 asm("cmp r1, #0 "); // check if a reschedule is required
268 asm("beq no_thread2 ");
269 asm("b select_thread ");
273 /******************************************************************************
276 __CPU_ARM1136_ERRATUM_351912_FIXED
277 Debug hooks in the scheduler
278 ******************************************************************************/
280 asm("__BTraceFilter: ");
281 asm(".word %a0 " : : "i" ((TInt)&BTraceData.iFilter[0]));
286 * Returns the range of linear memory which inserting the scheduler hooks needs to modify.
288 * @param aStart Set to the lowest memory address which needs to be modified.
289 * @param aEnd Set to the highest memory address +1 which needs to be modified.
291 @pre Kernel must be locked.
292 @pre Call in a thread context.
293 @pre Interrupts must be enabled.
295 EXPORT_C __NAKED__ void NKern::SchedulerHooks(TLinAddr& aStart, TLinAddr& aEnd)
298 ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
299 #ifdef __DEBUGGER_SUPPORT__
300 asm("adr r2,resched_trampoline_hook_address");
302 asm("adr r2,resched_trampoline_hook_address+4");
315 * Modifies the scheduler code so that it can call the function set by
316 * NKern::SetRescheduleCallback().
318 * This requires that the region of memory indicated by NKern::SchedulerHooks() is writable.
320 @pre Kernel must be locked.
321 @pre Call in a thread context.
322 @pre Interrupts must be enabled.
324 EXPORT_C __NAKED__ void NKern::InsertSchedulerHooks()
327 ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
328 #ifdef __DEBUGGER_SUPPORT__
329 asm("adr r0,resched_trampoline_hook_address");
330 asm("adr r1,resched_trampoline");
331 asm("sub r1, r1, r0");
332 asm("sub r1, r1, #8");
333 asm("mov r1, r1, asr #2");
334 asm("add r1, r1, #0xea000000"); // r1 = a branch instruction from resched_trampoline_hook_address to resched_trampoline
336 #if defined(__CPU_MEMORY_TYPE_REMAPPING)
337 // These platforms have shadow memory in non-writable page. We cannot use the standard
338 // Epoc::CopyToShadowMemory interface as we hold Kernel lock here.
339 // Instead, we'll temporarily disable access permission checking in MMU by switching
340 // domain#0 into Manager Mode (see Domain Access Control Register).
341 asm("mrs r12, CPSR "); // save cpsr setting and ...
342 CPSIDAIF; // ...disable interrupts
343 asm("mrc p15, 0, r2, c3, c0, 0 "); // read DACR
344 asm("orr r3, r2, #3"); // domain #0 is the first two bits. manager mode is 11b
345 asm("mcr p15, 0, r3, c3, c0, 0 "); // write DACR
347 asm("mcr p15, 0, r2, c3, c0, 0 "); // write back the original value of DACR
348 asm("msr CPSR_cxsf, r12 "); // restore cpsr setting (re-enable interrupts)
360 * Reverts the modification of the Scheduler code performed by NKern::InsertSchedulerHooks()
362 * This requires that the region of memory indicated by NKern::SchedulerHooks() is writable.
364 @pre Kernel must be locked.
365 @pre Call in a thread context.
366 @pre Interrupts must be enabled.
368 EXPORT_C __NAKED__ void NKern::RemoveSchedulerHooks()
371 ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
372 #ifdef __DEBUGGER_SUPPORT__
373 asm("adr r0,resched_trampoline_hook_address");
374 asm("ldr r1,resched_trampoline_unhook_data");
376 #if defined(__CPU_MEMORY_TYPE_REMAPPING)
377 // See comments above in InsertSchedulerHooks
378 asm("mrs r12, CPSR "); // save cpsr setting and ...
379 CPSIDAIF; // ...disable interrupts
380 asm("mrc p15, 0, r2, c3, c0, 0 "); // read DACR
381 asm("orr r3, r2, #3"); // domain #0 is the first two bits. manager mode is 11b
382 asm("mcr p15, 0, r3, c3, c0, 0 "); // write DACR
384 asm("mcr p15, 0, r2, c3, c0, 0 "); // write back the original value of DACR
385 asm("msr CPSR_cxsf, r12 "); // restore cpsr setting (re-enable interrupts)
397 * Set the function which is to be called on every thread reschedule.
399 * @param aCallback Pointer to callback function, or NULL to disable callback.
401 @pre Kernel must be locked.
402 @pre Call in a thread context.
403 @pre Interrupts must be enabled.
405 EXPORT_C __NAKED__ void NKern::SetRescheduleCallback(TRescheduleCallback /*aCallback*/)
408 ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_LOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
409 #ifdef __DEBUGGER_SUPPORT__
410 asm("ldr r1, __TheScheduler ");
411 asm("str r0, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleHook));
419 /** Disables interrupts to specified level.
421 Note that if we are not disabling all interrupts we must lock the kernel
422 here, otherwise a high priority interrupt which is still enabled could
423 cause a reschedule and the new thread could then reenable interrupts.
425 @param aLevel Interrupts are disbabled up to and including aLevel. On ARM,
426 level 1 stands for IRQ only and level 2 stands for IRQ and FIQ.
427 @return CPU-specific value passed to RestoreInterrupts.
429 @pre 1 <= aLevel <= maximum level (CPU-specific)
431 @see NKern::RestoreInterrupts()
433 EXPORT_C __NAKED__ TInt NKern::DisableInterrupts(TInt /*aLevel*/)
435 #ifdef __FIQ_IS_UNCONTROLLED__
436 asm("mrs r1, cpsr ");
441 asm("and r0, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* return original CPSR I and F bits */
445 asm("bhi " CSM_ZN5NKern20DisableAllInterruptsEv); // if level>1, disable all
446 asm("mrs r2, cpsr "); // r2=original CPSR
447 asm("bcc 1f "); // skip if level=0
448 __ASM_CLI(); // Disable all interrupts to prevent migration
449 GET_RWNO_TID(,r12); // r12 -> TSubScheduler
450 asm("ldr r3, [r12, #%a0]!" : : "i" _FOFF(TSubScheduler,iKernLockCount));
451 asm("and r0, r2, #0xc0 ");
452 asm("cmp r3, #0 "); // test if kernel locked
453 asm("addeq r3, r3, #1 "); // if not, lock the kernel
454 asm("streq r3, [r12] ");
455 asm("orreq r0, r0, #0x80000000 "); // and set top bit to indicate kernel locked
456 __ASM_STI2(); // reenable FIQs only
459 asm("and r0, r2, #0xc0 ");
465 /** Disables all interrupts (e.g. both IRQ and FIQ on ARM).
467 @return CPU-specific value passed to NKern::RestoreInterrupts().
469 @see NKern::RestoreInterrupts()
471 EXPORT_C __NAKED__ TInt NKern::DisableAllInterrupts()
473 asm("mrs r1, cpsr ");
474 asm("and r0, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* return original CPSR I and F bits */
480 /** Enables all interrupts (e.g. IRQ and FIQ on ARM).
482 This function never unlocks the kernel. So it must be used
483 only to complement NKern::DisableAllInterrupts. Never use it
484 to complement NKern::DisableInterrupts.
486 @see NKern::DisableInterrupts()
487 @see NKern::DisableAllInterrupts()
491 EXPORT_C __NAKED__ void NKern::EnableAllInterrupts()
498 /** Restores interrupts to previous level and unlocks the kernel if it was
499 locked when disabling them.
501 @param aRestoreData CPU-specific data returned from NKern::DisableInterrupts
502 or NKern::DisableAllInterrupts specifying the previous interrupt level.
504 @see NKern::DisableInterrupts()
505 @see NKern::DisableAllInterrupts()
507 EXPORT_C __NAKED__ void NKern::RestoreInterrupts(TInt /*aRestoreData*/)
509 asm("tst r0, r0 "); // test state of top bit of aLevel
510 asm("mrs r1, cpsr ");
511 asm("and r0, r0, #%a0" : : "i" ((TInt)KAllInterruptsMask));
512 asm("bic r1, r1, #%a0" : : "i" ((TInt)KAllInterruptsMask));
513 asm("orr r1, r1, r0 "); // replace I and F bits with those supplied
514 asm("msr cpsr_c, r1 "); // flags are unchanged (in particular N)
515 __JUMP(pl,lr); // if top bit of aLevel clear, finished
517 // if top bit of aLevel set, fall through to unlock the kernel
521 /** Unlocks the kernel.
523 Decrements iKernLockCount for current CPU; if it becomes zero and IDFCs
524 or a reschedule are pending, calls the scheduler to process them.
525 Must be called in mode_svc.
527 @pre Call either in a thread or an IDFC context.
528 @pre Do not call from an ISR.
530 EXPORT_C __NAKED__ void NKern::Unlock()
532 ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
534 GET_RWNO_TID(,r0) // r0=&SubScheduler()
535 __ASM_CLI(); // interrupts off
536 asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
537 asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
538 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
539 asm("subs r3, r1, #1 ");
540 asm("strne r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
541 asm("bne 0f "); // kernel still locked -> return
542 asm("cmp r2, #0 "); // check for DFCs or reschedule
544 asm("cmp r12, #0 "); // IPIs outstanding?
545 asm("str r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount)); // unlock the kernel
548 __ASM_STI(); // interrupts back on
551 // need to run IDFCs and/or reschedule
553 asm("stmfd sp!, {r0,r4-r11,lr} ");
554 asm("bl " CSM_ZN10TScheduler10RescheduleEv );
555 asm(".global nkern_unlock_resched_return ");
556 asm("nkern_unlock_resched_return: ");
558 // need to send any outstanding reschedule IPIs
560 asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
561 asm("ldmfd sp!, {r0,r4-r11,lr} ");
566 asm("stmfd sp!, {r0,lr} ");
567 asm("bl " CSM_CFUNC(send_accumulated_resched_ipis));
568 asm("ldmfd sp!, {r0,lr} ");
574 /** Locks the kernel.
576 Increments iKernLockCount for the current CPU, thereby deferring IDFCs
577 and preemption. Must be called in mode_svc.
579 @pre Call either in a thread or an IDFC context.
580 @pre Do not call from an ISR.
582 EXPORT_C __NAKED__ void NKern::Lock()
584 ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
588 asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
589 asm("add r3, r3, #1 "); // lock the kernel
590 asm("str r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
596 /** Locks the kernel and returns a pointer to the current thread.
598 Increments iKernLockCount for the current CPU, thereby deferring IDFCs
599 and preemption. Must be called in mode_svc.
601 @pre Call either in a thread or an IDFC context.
602 @pre Do not call from an ISR.
604 EXPORT_C __NAKED__ NThread* NKern::LockC()
606 ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
610 asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
611 asm("ldr r0, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
612 asm("add r3, r3, #1 "); // lock the kernel
613 asm("str r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
619 /** Allows IDFCs and rescheduling if they are pending.
621 If IDFCs or a reschedule are pending and iKernCSLocked is exactly equal to 1
622 calls the scheduler to process the IDFCs and possibly reschedule.
623 Must be called in mode_svc.
625 @return Nonzero if a reschedule actually occurred, zero if not.
627 @pre Call either in a thread or an IDFC context.
628 @pre Do not call from an ISR.
630 EXPORT_C __NAKED__ TInt NKern::PreemptionPoint()
632 ASM_CHECK_PRECONDITIONS(MASK_NOT_ISR);
634 GET_RWNO_TID(,r0) // r0=&SubScheduler()
635 asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
636 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag));
637 asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
639 asm("bgt 0f "); // if locked more than once return FALSE
640 asm("cmp r2, #0 "); // locked once and IDFCs/reschedule pending?
641 asm("bne 1f "); // skip if so
642 asm("cmp r12, #0 "); // locked once and resched IPIs outstanding?
643 asm("bne 2f "); // skip if so
646 __JUMP(, lr); // else return FALSE
648 // need to run IDFCs and/or reschedule
650 asm("stmfd sp!, {r1,r4-r11,lr} ");
651 asm("bl " CSM_ZN10TScheduler10RescheduleEv );
652 asm(".global nkern_preemption_point_resched_return ");
653 asm("nkern_preemption_point_resched_return: ");
654 asm("str r2, [sp] ");
656 asm("str r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iKernLockCount));
658 // need to send any outstanding reschedule IPIs
660 asm("blne " CSM_CFUNC(send_accumulated_resched_ipis));
661 asm("ldmfd sp!, {r0,r4-r11,lr} "); // return TRUE if reschedule occurred
666 asm("stmfd sp!, {r2,lr} ");
667 asm("bl " CSM_CFUNC(send_accumulated_resched_ipis));
668 asm("ldmfd sp!, {r0,lr} "); // return TRUE if reschedule occurred
675 // Do the actual VFP context save
676 __NAKED__ void VfpContextSave(void*)
678 VFP_FMRX(,1,VFP_XREG_FPEXC);
679 asm("tst r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); // Check to see if VFP in use
680 __JUMP(eq, lr); // Return immediately if not
681 asm("tst r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EX) ); // Check to see if an exception has occurred
682 asm("beq 1f "); // Skip ahead if not
683 asm("bic r1, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EX));
684 VFP_FMXR(,VFP_XREG_FPEXC,1); // Reset exception flag
685 asm("orr r1, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EX)); // But store it for later
689 VFP_FMRX(,2,VFP_XREG_FPSCR);
690 asm("stmia r0!, {r2} "); // Save FPSCR
693 VFP_FMRX(,2,VFP_XREG_FPINST);
694 VFP_FMRX(,3,VFP_XREG_FPINST2);
695 asm("stmia r0!, {r2-r3} "); // Save FPINST, FPINST2
698 VFP_FSTMIADW(CC_AL,0,0,16); // Save D0 - D15
701 VFP_FMRX(,2,VFP_XREG_MVFR0);
702 asm("tst r2, #%a0" : : "i" ((TInt)VFP_MVFR0_ASIMD32)); // Check to see if all 32 Advanced SIMD registers are present
703 asm("beq 0f "); // Skip ahead if not
705 asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_D32DIS)); // Check to see if access to the upper 16 registers is disabled
706 VFP_FSTMIADW(CC_EQ,0,16,16); // If not then save D16 - D31
710 asm("bic r1, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
711 VFP_FMXR(,VFP_XREG_FPEXC,1); // Disable VFP
718 /** Check if the kernel is locked the specified number of times.
720 @param aCount The number of times the kernel should be locked
721 If zero, tests if it is locked at all
722 @return TRUE if the tested condition is true.
726 EXPORT_C __NAKED__ TBool NKern::KernelLocked(TInt /*aCount*/)
728 asm("mrs r12, cpsr ");
731 asm("movs r1, r0 "); // r1 = aCount
732 asm("ldr r0, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount));
733 asm("moveq r1, r0 "); // if aCount=0, aCount=iKernLockCount
734 asm("cmp r1, r0 "); //
735 asm("movne r0, #0 "); // if aCount!=iKernLockCount, return FALSE else return iKernLockCount
736 asm("msr cpsr, r12 ");
741 // Only call this if thread migration is disabled, i.e.
742 // interrupts disabled, kernel locked or current thread in 'freeze cpu' mode
743 extern "C" __NAKED__ TSubScheduler& SubScheduler()
749 /** Returns the NThread control block for the currently scheduled thread.
751 Note that this is the calling thread if called from a thread context, or the
752 interrupted thread if called from an interrupt context.
754 @return A pointer to the NThread for the currently scheduled thread.
756 @pre Call in any context.
758 EXPORT_C __NAKED__ NThread* NKern::CurrentThread()
760 asm("mrs r12, cpsr ");
764 asm("ldrne r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
765 asm("msr cpsr, r12 ");
770 /** Returns the NThread control block for the currently scheduled thread.
772 Note that this is the calling thread if called from a thread context, or the
773 interrupted thread if called from an interrupt context.
775 @return A pointer to the NThread for the currently scheduled thread.
777 @pre Call with migration disabled - i.e. from an ISR, IDFC, with interrupts
778 disabled or with preemption disabled.
780 extern "C" __NAKED__ NThread* NCurrentThreadL()
783 asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCurrentThread));
788 /** Returns the CPU number of the calling CPU.
790 @return the CPU number of the calling CPU.
792 @pre Call in any context.
794 EXPORT_C __NAKED__ TInt NKern::CurrentCpu()
796 asm("mrs r12, cpsr ");
800 asm("ldrne r0, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iCpuNum));
801 asm("msr cpsr, r12 ");
806 /** Returns the current processor context type (thread, IDFC or interrupt).
808 @return A value from NKern::TContext enumeration (but never EEscaped).
810 @pre Call in any context.
814 EXPORT_C __NAKED__ TInt NKern::CurrentContext()
816 asm("mrs r1, cpsr ");
817 __ASM_CLI(); // interrupts off to stop migration
818 GET_RWNO_TID(,r3); // r3 = &SubScheduler()
819 asm("mov r0, #2 "); // 2 = interrupt
820 asm("and r2, r1, #0x1f "); // r1 = mode
821 asm("cmp r2, #0x13 ");
822 asm("ldreqb r0, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,iInIDFC));
823 asm("bne 0f "); // if not svc, must be interrupt
825 asm("movne r0, #1 "); // if iInIDFC, return 1 else return 0
827 asm("msr cpsr, r1 "); // restore interrupts
832 extern "C" __NAKED__ void send_irq_ipi(TSubScheduler*)
834 __DATA_SYNC_BARRIER_Z__(r3); // need DSB before sending any IPI
835 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask));
836 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs
837 asm("mov r1, #%a0" : : "i" ((TInt)TRANSFERRED_IRQ_VECTOR));
838 asm("orr r1, r1, r3, lsl #16 ");
839 asm("str r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs
843 // Send any outstanding reschedule IPIs when the kernel is unlocked on this CPU.
844 // Call with interrupts disabled, R0->TSubScheduler, R12=R0->iReschedIPIs
845 // Return with R0 unaltered.
846 extern "C" __NAKED__ void send_accumulated_resched_ipis()
848 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr));
850 asm("str r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedIPIs));
851 __DATA_SYNC_BARRIER__(r1); // need DSB before sending any IPI
852 asm("mov r1, r12, lsl #16 ");
853 // asm("orr r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0
854 asm("str r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs
858 // Send a reschedule IPI to the specified CPU
859 extern "C" __NAKED__ void send_resched_ipi(TInt /*aCpu*/)
862 __DATA_SYNC_BARRIER_Z__(r2); // need DSB before sending any IPI
863 asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs
864 ASM_DEBUG1(SendReschedIPI,r0);
865 asm("mov r1, #0x10000 ");
866 asm("mov r1, r1, lsl r0 "); // 0x10000<<aCpu
867 // asm("orr r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0
868 asm("str r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs
872 // Send a reschedule IPI to the current processor
873 // *** DON'T DO ANY TRACING OR INSTRUMENTATION ***
874 extern "C" __NAKED__ void send_self_resched_ipi()
877 __DATA_SYNC_BARRIER_Z__(r2); // need DSB before sending any IPI
878 asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs
879 asm("mov r1, #0x02000000 "); // target = requesting CPU only
880 // asm("orr r1, r1, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0
881 asm("str r1, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPI
885 extern "C" __NAKED__ void send_resched_ipis(TUint32 aMask)
887 ASM_DEBUG1(SendReschedIPIs,r0);
888 __DATA_SYNC_BARRIER_Z__(r2); // need DSB before sending any IPI
889 asm("cmp r0, #0 "); // any bits set in aMask?
891 asm("ldrne r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs
892 asm("movne r0, r0, lsl #16 ");
893 // asm("orrne r0, r0, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0
894 asm("strne r0, [r2, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs if any
899 extern "C" __NAKED__ void send_resched_ipi_and_wait(TInt /*aCpu*/)
901 asm("ldr r1, __TheSubSchedulers ");
902 asm("mov r2, #0x10000 ");
903 asm("mov r2, r2, lsl r0 "); // 0x10000<<aCpu
904 ASM_DEBUG1(SendReschedIPIAndWait,r0);
905 asm("add r0, r1, r0, lsl #9 "); // sizeof(TSubScheduler)=512
906 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_GicDistAddr)); // we assume i_GicDistAddr is the same for all CPUs
907 asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
908 __DATA_SYNC_BARRIER_Z__(r1); // make sure i_IrqCount is read before IPI is sent
909 // asm("orr r2, r2, #%a0" : : "i" ((TInt)RESCHED_IPI_VECTOR)); RESCHED_IPI_VECTOR=0
910 asm("str r2, [r3, #%a0]" : : "i" _FOFF(GicDistributor, iSoftIrq)); // trigger IPIs
911 __DATA_SYNC_BARRIER__(r1); // make sure IPI has been sent
913 asm("ldrb r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, iRescheduleNeededFlag));
914 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqNestCount));
915 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler, i_IrqCount));
917 asm("beq 0f "); // iRescheduleNeededFlag not set -> wait
919 asm("bge 2f "); // if other CPU is in an ISR, finish
920 asm("cmp r3, r12 "); // if not, has i_IrqCount changed?
922 ARM_WFEcc(CC_EQ); // if not, wait for something to happen ...
923 asm("beq 1b "); // ... and loop
925 __DATA_MEMORY_BARRIER__(r1); // make sure subsequent memory accesses don't jump the gun
926 // guaranteed to observe final thread state after this
929 asm("__TheSubSchedulers: ");
930 asm(".word TheSubSchedulers ");
933 /* If the current thread is subject to timeslicing, update its remaining time
934 from the current CPU's local timer. Don't stop the timer.
935 If the remaining time is negative, save it as zero.
937 __NAKED__ void TSubScheduler::SaveTimesliceTimer(NThreadBase* /*aThread*/)
939 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
940 asm("ldrb r12, [r1, #%a0]" : : "i" _FOFF(NThreadBase,i_NThread_Initial));
941 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));
943 asm("ble 0f "); // thread isn't timesliced or timeslice already expired so skip
945 asm("bne 0f "); // initial (i.e. idle) thread, so skip
946 asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
947 asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));
949 asm("movmi r0, #0 "); // if timer count is negative, save zero
951 asm("umull r0, r3, r12, r3 "); // scale up to max timer clock
952 asm("adds r0, r0, #0x00800000 ");
953 asm("adcs r3, r3, #0 ");
954 asm("mov r0, r0, lsr #24 ");
955 asm("orr r0, r0, r3, lsl #8 ");
957 asm("str r0, [r1, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
962 /* Update aOld's execution time and set up the timer for aNew
963 Update this CPU's timestamp value
965 if (!aOld) aOld=iInitialThread
966 if (!aNew) aNew=iInitialThread
967 newcount = aNew->iTime>0 ? Max(aNew->iTime*i_TimerMultF/2^32, 1) : 2^31-1
969 oldcount = timer count
970 if (oldcount<=0 || aOld!=aNew)
972 timer count = newcount
973 elapsed = i_LastTimerSet - oldcount
974 i_LastTimerSet = newcount
975 elapsed = elapsed * i_TimerMultI / 2^24
976 aOld->iTotalCpuTime64 += elapsed
977 correction = i_TimestampError;
978 if (correction > i_MaxCorrection)
979 correction = i_MaxCorrection
980 else if (correction < -i_MaxCorrection)
981 correction = -i_MaxCorrection
982 i_TimestampError -= correction
983 i_LastTimestamp += elapsed + i_TimerGap - correction
987 __NAKED__ void TSubScheduler::UpdateThreadTimes(NThreadBase* /*aOld*/, NThreadBase* /*aNew*/)
990 asm("ldreq r2, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
991 asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultF));
993 asm("ldreq r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iInitialThread));
994 asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iTime));
995 asm("stmfd sp!, {r4-r7} ");
996 asm("ldr r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64));
997 asm("ldr r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4));
1000 asm("adds r6, r6, #1 ");
1001 asm("str r6, [r2, #%a0]" : : "i" _FOFF(NThreadBase,iRunCount64));
1002 asm("ldr r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64));
1003 asm("ldr r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4));
1004 asm("adcs r7, r7, #0 ");
1005 asm("str r7, [r2, #%a0]" : : "i" (_FOFF(NThreadBase,iRunCount64)+4));
1006 asm("adds r4, r4, #1 ");
1007 asm("adcs r6, r6, #0 ");
1008 asm("str r4, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iReschedCount64));
1009 asm("str r6, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iReschedCount64)+4));
1011 asm("cmp r3, #1 "); // aNew->iTime > 0 ?
1012 asm("umullge r4, r3, r12, r3 ");
1013 asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LocalTimerAddr));
1014 asm("movlt r3, #0x7fffffff ");
1015 asm("addges r3, r3, r4, lsr #31 "); // round up top 32 bits if bit 31 set
1016 asm("moveq r3, #1 "); // if result zero, limit to 1
1017 asm("ldr r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));
1019 asm("ldr r4, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount));
1023 asm("bgt 0f "); // same thread, timeslice not expired -> leave timer alone
1025 asm("str r3, [r5, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount)); // set new timeslice value in timer
1026 asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI));
1027 asm("str r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet));
1028 asm("sub r12, r12, r4 "); // r12 = elapsed (actual timer ticks)
1029 asm("umull r4, r5, r12, r5 ");
1030 asm("ldr r3, [r1, #%a0]!" : : "i" _FOFF(NThreadBase,iTotalCpuTime64));
1031 asm("ldr r12, [r1, #4] ");
1032 asm("adds r4, r4, #0x00800000 ");
1033 asm("adcs r5, r5, #0 ");
1034 asm("mov r4, r4, lsr #24 ");
1035 asm("orr r4, r4, r5, lsl #8 "); // r4 = elapsed
1036 asm("adds r3, r3, r4 ");
1037 asm("adcs r12, r12, #0 ");
1038 asm("stmia r1, {r3,r12} "); // aOld->iTotalCpuTime64 += elapsed
1039 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError));
1040 asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_MaxCorrection));
1041 asm("ldr r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));
1042 asm("ldr r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4));
1043 asm("mov r12, r3 ");
1045 asm("movgt r3, r5 "); // if (correction>i_MaxCorrection) correction=i_MaxCorrection
1047 asm("rsblt r3, r5, #0 "); // if (correction+i_MaxCorrection<0) correction=-i_MaxCorrection
1048 asm("ldr r5, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerGap));
1049 asm("sub r12, r12, r3 ");
1050 asm("str r12, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimestampError));
1051 asm("add r4, r4, r5 "); // r4 = elapsed + i_TimerGap
1052 asm("adds r1, r1, r4 ");
1053 asm("adcs r2, r2, #0 "); // iLastTimestamp64 + (elapsed + i_TimerGap)
1054 asm("subs r1, r1, r3 ");
1055 asm("sbcs r1, r1, r3, asr #32 "); // iLastTimestamp64 + (elapsed + i_TimerGap - correction)
1056 asm("str r1, [r0, #%a0]" : : "i" _FOFF(TSubScheduler,iLastTimestamp64));
1057 asm("str r2, [r0, #%a0]" : : "i" (_FOFF(TSubScheduler,iLastTimestamp64)+4));
1060 asm("ldmfd sp!, {r4-r7} ");