Update contrib.
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\nkern\arm\vectors.cia
18 #define __INCLUDE_NTHREADBASE_DEFINES__
22 void FastMutexNestAttempt();
23 void FastMutexSignalError();
24 extern "C" void ExcFault(TAny*);
27 #define __USE_CP15_FAULT_INFO__
31 #define __CHECK_LOCK_STATE__
34 //#define __FAULT_ON_FIQ__
36 #ifdef __CHECK_LOCK_STATE__
37 // Check that the kernel is unlocked, no fast mutexes are held and that the thread is not in a
38 // critical section. Called when returning to user mode
39 __NAKED__ void CheckLockState()
41 asm("stmfd sp!, {r14}");
42 asm("ldr r12, __TheScheduler ");
43 asm("ldr r14, [r12, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
45 asm("movne r12, #0xdd000000 ");
46 asm("strne r12, [r12, #1] ");
47 asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
48 asm("ldr r14, [r12, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));
51 asm("badLockState: ");
52 asm("mov r12, #0xd7 ");
53 asm("msr cpsr, r12 ");
54 asm("mov r12, #0xdd000000 ");
55 asm("str r12, [r12, #3] ");
59 __ASSERT_COMPILE(EUserModeCallbackRun == 0);
61 __NAKED__ void CallUserModeCallbacks()
63 // called with interrupts disabled
64 // preserves r0 and r1 in additional to usual registers
65 // leaves current thread in r2
66 // the vast majority of times this is called with zero or one callback pending
68 asm(".global callUserModeCallbacks ");
69 asm("callUserModeCallbacks: ");
71 asm("ldr ip, __TheScheduler ");
72 asm("ldr r2, [ip, #%a0]" : : "i" _FOFF(TScheduler, iCurrentThread));
74 asm("callUserModeCallbacks2: ");
76 USER_MEMORY_GUARD_ASSERT_ON(ip);
78 #ifdef __CHECK_LOCK_STATE__
79 asm("ldr ip, [r2, #%a0]" : : "i" _FOFF(NThread,iCsCount));
81 asm("bne badLockState ");
84 asm("ldr ip, [r2, #%a0]" : : "i" _FOFF(NThread, iUserModeCallbacks));
90 asm("stmfd sp!, {r0-r2, r4-r11, lr}");
92 // if r3 != 0 it is the user context type to set the thread to
93 asm("strneb r3, [r2, #%a0]" : : "i" _FOFF(NThread, iUserContextType));
95 // Remove first callback and enter critical section - we can just set iCsCount to 1 as we are
96 // guaranteed not be be in a critical section already
97 asm("ldmia ip, {r1, r3} "); // HARDCODED: TUserModeCallback layout
99 asm("str r0, [r2, #%a0]" : : "i" _FOFF(NThread, iCsCount));
100 asm("str r1, [r2, #%a0]" : : "i" _FOFF(NThread,iUserModeCallbacks));
102 // Re-enable interrupts and call callback
103 SET_MODE(r0, MODE_SVC, INTS_ALL_ON);
104 asm("mov r1, #%a0 " : : "i" ((TInt)KUserModeCallbackUnqueued));
105 asm("str r1, [ip, #%a0]" : : "i" _FOFF(TUserModeCallback, iNext));
107 asm("mov r1, #0 "); // 0 == EUserModeCallbackRun
110 SET_MODE(r0, MODE_SVC, INTS_ALL_OFF);
113 // Leave critical section, avoid calling NKern::ThreadLeaveCS unless we have to
114 asm("ldmfd sp!, {r0-r2, r4-r11, lr}");
115 // reset user context type to undefined if r3 != 0
116 asm("mov ip, #%a0" : : "i" (NThread::EContextUndefined));
117 asm("strneb ip, [r2, #%a0]" : : "i" _FOFF(NThread, iUserContextType));
118 asm("ldr ip, [r2, #%a0]" : : "i" _FOFF(NThread, iCsFunction));
120 asm("streq ip, [r2, #%a0]" : : "i" _FOFF(NThread, iCsCount));
121 asm("beq callUserModeCallbacks2 ");
124 asm("sub sp, sp, #48 ");
125 SET_MODE(r0, MODE_SVC, INTS_ALL_ON);
126 asm("bl " CSM_ZN5NKern13ThreadLeaveCSEv);
127 SET_MODE(r0, MODE_SVC, INTS_ALL_OFF);
128 asm("ldmfd sp!, {r0-r2, r4-r11, lr}");
129 asm("b callUserModeCallbacks2 ");
132 /***************************************************************************
134 ***************************************************************************/
136 extern "C" __NAKED__ void __ArmVectorSwi()
138 // IRQs disabled, FIQs enabled here
139 asm("ldr r12, [lr, #-4] "); // get SWI opcode
140 asm("stmfd sp!, {r11, lr} "); // save return address, r11 for 8 byte align
141 USER_MEMORY_GUARD_ON_IF_MODE_USR(r11);
142 asm("ldr r11, __TheScheduler ");
143 asm("adr lr, fast_swi_exit ");
144 asm("movs r12, r12, lsl #9 "); // 512*SWI number into r12
145 asm("bcc slow_swi "); // bit 23=0 for slow/unprot
146 asm("ldr r1, [r11, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
147 asm("beq wait_for_any_request "); // special case for Exec::WaitForAnyRequest
148 #ifdef __CPU_ARM_HAS_CPS
149 asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iFastExecTable));
150 CPSIDIF; // all interrupts off
151 asm("ldr r3, [r2], r12, lsr #7 "); // r3=limit, r2->dispatch table entry
152 asm("ldr r2, [r2] "); // r2->kernel function
153 asm("cmp r3, r12, lsr #9 "); // r3-SWI number
154 __JUMP(hi, r2); // if SWI number valid, call kernel function
156 SET_INTS(r2, MODE_SVC, INTS_ALL_OFF);
157 asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iFastExecTable));
158 asm("ldr r3, [r2], r12, lsr #7 "); // r3=limit, r2->dispatch table entry
159 asm("cmp r3, r12, lsr #9 "); // r3-SWI number
160 asm("ldrhi pc, [r2] "); // if SWI number valid, call kernel function
162 asm("mvn r12, #0 "); // put invalid SWI number into r12
163 asm("b slow_swi "); // go through slow SWI routine to call invalid SWI handler
165 asm("fast_swi_exit: ");
166 #ifdef __CHECK_LOCK_STATE__
167 asm("mrs r12, spsr ");
168 asm("tst r12, #0x0f ");
169 asm("bleq " CSM_Z14CheckLockStatev);
171 USER_MEMORY_GUARD_OFF_IF_MODE_USR(r11);
172 ERRATUM_353494_MODE_CHANGE(,r11);
173 asm("ldmfd sp!, {r11, pc}^ "); // return and restore cpsr
176 asm("slow_swi: "); // IRQs off, FIQs on here
177 asm("stmfd sp!, {r3-r10} "); // save nonvolatile registers, r3 for 8 byte align
178 asm("ldr r9, [r11, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); // r9->current thread
179 SET_INTS(lr, MODE_SVC, INTS_ALL_ON); // all interrupts on
180 asm("mov r10, r11 "); // r10->scheduler
181 asm("ldr r4, [r9, #%a0]" : : "i" _FOFF(NThread,iSlowExecTable));
182 asm("mrs r11, spsr "); // spsr_svc into r11
183 asm("adr lr, slow_swi_exit ");
184 asm("add r6, r4, r12, lsr #6 "); // r6->dispatch table entry
185 asm("ldr r5, [r4, #-12] "); // r5=limit
186 SET_INTS_1(r7, MODE_SVC, INTS_ALL_OFF);
187 asm("cmp r5, r12, lsr #9 "); // r5-SWI number
188 asm("ldmhiia r6, {r5,r6} "); // if SWI number OK, flags into r5, function addr into r6
189 asm("ldrls pc, [r4, #-8] "); // if SWI number invalid, call invalid handler
190 asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagExtraArgMask)); // extra arguments needed?
191 asm("addne r2, sp, #4 "); // if so, point r2 at saved registers on stack
192 asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagClaim)); // claim system lock?
193 asm("beq slow_swi_no_wait "); // skip if not
195 SET_INTS_2(r7, MODE_SVC, INTS_ALL_OFF); // interrupts off
197 asm("ldr r12, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));
199 asm("bne " CSM_Z20FastMutexNestAttemptv); // debug check that current thread doesn't already hold a fast mutex
201 asm("ldr r12, [r10, #%a0]!" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // r12=iLock.iHoldingThread
202 SET_INTS_1(r7, MODE_SVC, INTS_ALL_ON);
203 asm("cmp r12, #0 "); // is system lock already held?
204 asm("bne ss_fast_mutex_held "); // branch if it is
205 asm("ss_fast_mutex_obtained: ");
206 asm("str r10, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=&iLock
207 asm("str r9, [r10], #-%a0" : : "i" _FOFF(TScheduler,iLock)); // iLock.iHoldingThread=current thread, r10->scheduler
208 #ifdef BTRACE_FAST_MUTEX
209 asm("ldrb r12, [r10,#%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
211 asm("bne syslock_trace_wait");
212 asm("syslock_trace_wait_done:");
214 SET_INTS_2(r7, MODE_SVC, INTS_ALL_ON); // all interrupts on
216 asm("slow_swi_no_wait: ");
217 asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagPreprocess)); // preprocess (handle lookup)? can use r4, r7, r8, r12, r0
219 asm("ldrne pc, [r4, #-4] "); // call preprocess handler if required
221 __JUMP(,r6); // call exec function, preserve r5,r11 if release syslock not required
222 // preserve r5,r9,r10,r11 if release required
223 asm("tst r5, #%a0" : : "i" ((TInt)KExecFlagRelease)); // release system lock?
224 asm("beq slow_swi_exit "); // skip if not
226 SET_INTS(r12, MODE_SVC, INTS_ALL_OFF); // disable interrupts
228 asm("add r8, r10, #%a0" : : "i" _FOFF(TScheduler,iLock));
229 asm("ldr r12, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex));
231 asm("bne " CSM_Z20FastMutexSignalErrorv); // debug check that current thread holds system lock
233 #ifdef BTRACE_FAST_MUTEX
234 asm("ldrb r12, [r10,#%a0]" : : "i" (_FOFF(TScheduler,iFastMutexFilter)));
236 asm("bne syslock_trace_signal");
237 asm("syslock_trace_signal_done:");
240 asm("str r12, [r10, #%a0]" : : "i" _FOFF(TScheduler,iLock.iHoldingThread)); // iLock.iHoldingThread=NULL
241 asm("str r12, [r9, #%a0]" : : "i" _FOFF(NThread,iHeldFastMutex)); // current thread->iHeldFastMutex=NULL
242 asm("ldr r3, [r10, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting)); // r3=iLock.iWaiting
243 asm("str r12, [r10, #%a0]" : : "i" _FOFF(TScheduler,iLock.iWaiting)); // iLock.iWaiting=0
244 SET_INTS_1(r8, MODE_SVC, INTS_ALL_ON);
245 asm("cmp r3, #0 "); // check waiting flag
246 asm("bne ss_signal_check "); // branch if set
247 asm("ss_signal_done: ");
248 SET_INTS_2(r8, MODE_SVC, INTS_ALL_ON); // otherwise reenable interrupts
250 asm("slow_swi_exit: ");
251 #ifdef __CHECK_LOCK_STATE__
252 asm("tst r11, #0x0f ");
253 asm("bleq " CSM_Z14CheckLockStatev);
255 SET_INTS(r12, MODE_SVC, INTS_ALL_OFF); // disable interrupts
256 asm("msr spsr, r11 "); // restore spsr_svc
257 asm("tst r11, #0x0f ");
259 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED)
260 asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround
261 // Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr
263 asm("bleq callUserModeCallbacks "); // call user-mode callbacks
264 USER_MEMORY_GUARD_OFF_IF_MODE_USR(r11);
265 ERRATUM_353494_MODE_CHANGE(,r11);
266 asm("ldmfd sp!, {r3-r11,pc}^ "); // return from EXEC function
269 // Come here if we need to wait for the system lock
270 // r9->current thread, r10=&iLock, r12=iLock.iHoldingThread
271 asm("ss_fast_mutex_held: ");
273 asm("str r8, [r10, #%a0]" : : "i" (_FOFF(TScheduler,iKernCSLocked)-_FOFF(TScheduler,iLock))); // lock the kernel
274 SET_INTS_2(r7, MODE_SVC, INTS_ALL_ON); // enable interrupts
275 asm("str r8, [r10, #4] "); // iWaiting=1
276 asm("str r10, [r9, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // current thread->iWaitFastMutex=&iLock
277 asm("stmfd sp!, {r0-r3} "); // save exec call arguments
278 asm("mov r0, r12 "); // parameter for YieldTo
279 ASM_DEBUG1(NKFMWaitYield,r0);
280 asm("bl " CSM_ZN10TScheduler7YieldToEP11NThreadBase); // yield to the mutex holding thread
281 // will not return until the mutex is free
282 // on return r0=Scheduler,r1=0,r2!=0,r3=current thread, kernel unlocked, interrupts disabled
283 asm("str r1, [r9, #%a0]" : : "i" _FOFF(NThread,iWaitFastMutex)); // iWaitFastMutex=NULL
284 asm("ldmfd sp!, {r0-r3} "); // retrieve exec call arguments
285 asm("b ss_fast_mutex_obtained "); // branch back to main code path
287 // Come here if we need to reschedule after releasing the system lock
288 // kernel unlocked, interrupts enabled, r0 contains return value from Exec call
289 // r9->current thread, r10=&TheScheduler, r3=1, r8=0x13
290 asm("ss_signal_check: ");
291 asm("str r3, [r10, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel (assumes iWaiting always 0 or 1)
292 SET_INTS_2(r8, MODE_SVC, INTS_ALL_ON); // reenable interrupts
293 asm("strb r3, [r10, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
294 asm("ldr r3, [r9, #%a0]" : : "i" _FOFF(NThread,iCsFunction)); // r3=current thread->iCsFunction
295 asm("ldr r2, [r9, #%a0]" : : "i" _FOFF(NThread,iCsCount)); // r2=current thread->iCsCount
296 asm("mov r4, r0 "); // save return value
297 asm("cmp r3, #0 "); // outstanding CS function?
298 asm("beq 2f "); // branch if not
299 asm("cmp r2, #0 "); // iCsCount!=0 ?
300 asm("moveq r0, r9 "); // if iCsCount=0, DoCsFunction()
301 asm("bleq " CSM_ZN11NThreadBase12DoCsFunctionEv);
303 asm("bl " CSM_ZN10TScheduler10RescheduleEv); // reschedule to allow waiting thread in
304 asm("mov r0, r4 "); // recover return value
305 asm("b ss_signal_done "); // branch back to main code path
307 #ifdef BTRACE_FAST_MUTEX
308 asm("syslock_trace_wait:");
309 asm("ldr r12, [sp,#9*4]"); // r12 = return address from SWI
310 asm("mov r8, r3"); // save r3
311 asm("stmdb sp!,{r0-r2,r12}"); // 4th item on stack is PC value for trace
312 asm("ldr r0, fmwait_trace_header");
313 asm("mov r2, r9"); // current thread
314 asm("add r3, r10, #%a0" : : "i" _FOFF(TScheduler,iLock));
316 asm("ldr pc, [r10, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
317 asm("ldmia sp!,{r0-r2,r12}");
318 asm("mov r3, r8"); // restore r3
319 asm("b syslock_trace_wait_done");
321 asm("syslock_trace_signal:");
322 asm("ldr r12, [sp,#9*4]"); // r12 = return address from SWI
323 asm("stmdb sp!,{r0-r2,r12}"); // 4th item on stack is PC value for trace
324 asm("ldr r0, fmsignal_trace_header");
325 asm("mov r2, r9"); // current thread
326 asm("add r3, r10, #%a0" : : "i" _FOFF(TScheduler,iLock));
328 asm("ldr pc, [r10, #%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
329 asm("ldmia sp!,{r0-r2,r12}");
330 asm("b syslock_trace_signal_done");
332 asm("fmsignal_trace_header:");
333 asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex<< BTrace::ECategoryIndex*8) + (BTrace::EFastMutexSignal << BTrace::ESubCategoryIndex*8)) );
335 asm("fmwait_trace_header:");
336 asm(".word %a0" : : "i" ((TInt)(16<<BTrace::ESizeIndex) + ((BTrace::EContextIdPresent|BTrace::EPcPresent) << BTrace::EFlagsIndex*8) + (BTrace::EFastMutex << BTrace::ECategoryIndex*8) + (BTrace::EFastMutexWait << BTrace::ESubCategoryIndex*8)) );
341 /***************************************************************************
343 * This routine is called after the IRQ has been dispatched
344 * spsr_irq, r4-r11 are unmodified
345 * spsr_irq,r0-r3,r12,return address are on the top of the IRQ stack
346 ***************************************************************************/
348 extern "C" __NAKED__ void __ArmVectorIrq()
350 // FIQs enabled here but not IRQs
351 asm("ldr r1, __TheScheduler ");
352 asm("mrs r0, spsr "); // check interrupted mode
353 asm("add r12, sp, #%a0 " : : "i" (4*(6+USER_MEMORY_GUARD_SAVE_WORDS))); // r12=sp_irq+6 or 8 words
354 asm("and r2, r0, #0x1f ");
355 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // r3=KernCSLocked
356 asm("cmp r2, #0x10 "); // check for mode_usr
357 asm("cmpne r2, #0x13 "); // or mode_svc
358 asm("cmpeq r3, #0 "); // and then check if kernel locked
359 asm("bne IrqExit0 "); // if wrong mode or locked, return immediately
360 SET_INTS(r2, MODE_IRQ, INTS_ALL_OFF); // disable FIQs before we check for reschedule
361 asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag)); // r2=DfcPendingFlag/RescheduleNeededFlag
362 asm("add r3, r3, #1 ");
363 SET_MODE_1(lr, MODE_SVC, INTS_ALL_ON);
364 asm("cmp r2, #0 "); // check if reschedule needed
365 asm("beq IrqExit0 "); // if not, return immediately
366 asm("str r3, [r1, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel
367 SET_MODE_2(lr, MODE_SVC, INTS_ALL_ON); // mode_svc, interrupts back on
369 asm("ldmdb r12!, {r1-r3} "); // move saved registers (r0-r3,r12,pc) over to mode_svc stack
370 asm("stmfd sp!, {r1-r3} ");
371 asm("ldmdb r12!, {r1-r3} ");
372 asm("stmfd sp!, {r1-r3} ");
373 asm("stmfd sp!, {r0,lr} "); // store lr_svc and interrupted cpsr on current mode_svc stack
374 #ifdef __USER_MEMORY_GUARDS_ENABLED__
375 asm("ldmdb r12, {r1-r2} ");
376 asm("stmfd sp!, {r1-r2} "); // move user guard over to mode_svc stack
379 SET_MODE_1(r2, MODE_SVC, INTS_ALL_ON);
380 SET_MODE(lr, MODE_IRQ, INTS_IRQ_OFF); // mode_irq, IRQs off
381 asm("add sp, r12, #24 "); // restore mode_irq stack balance
382 SET_MODE_2(r2, MODE_SVC, INTS_ALL_ON); // back to mode_svc, IRQs on
384 // reschedule - this also switches context if necessary
385 // enter this function in mode_svc, interrupts on, kernel locked
386 // exit this function in mode_svc, all interrupts off, kernel unlocked
387 asm("irq_do_resched: ");
388 asm("bl " CSM_ZN10TScheduler10RescheduleEv);
389 asm(".global irq_resched_return ");
390 asm("irq_resched_return: ");
392 SET_MODE(r2, MODE_SVC, INTS_ALL_OFF); // all interrupts off
393 asm("ldr r1, [sp, #%a0] " : : "i" (4*USER_MEMORY_GUARD_SAVE_WORDS)); // get interrupted cpsr, don't unbalance stack
395 #ifdef __CHECK_LOCK_STATE__
397 asm("tst r1, #0x0f ");
398 asm("bleq " CSM_Z14CheckLockStatev);
402 asm("tst r1, #0x0f ");
403 asm("mov r3, #%a0 " : : "i" (NThread::EContextUserIntrCallback));
404 asm("bleq callUserModeCallbacks "); // call user-mode callbacks
406 #ifdef __USER_MEMORY_GUARDS_ENABLED__
407 asm("ldr r1, [sp], #%a0 " : : "i" (4*USER_MEMORY_GUARD_SAVE_WORDS)); // pop saved DACR, adjust sp
408 USER_MEMORY_GUARD_RESTORE(r1,lr);
411 asm("ldmfd sp!, {r1, lr} "); // restore lr_svc
412 asm("add sp, sp, #24 "); // restore mode_svc stack balance
413 asm("mov r12, sp "); // r12=address of remaining saved registers
415 SET_MODE(r2, MODE_IRQ, INTS_ALL_OFF); // back into mode_irq, all interrupts off
417 asm("msr spsr, r1 "); // interrupted cpsr into spsr_irq
418 ERRATUM_353494_MODE_CHANGE(,r12);
419 asm("ldmdb r12, {r0-r3,r12,pc}^ "); // return from interrupt
423 #ifdef __CHECK_LOCK_STATE__
424 asm("tst r0, #0x0f ");
425 asm("bleq " CSM_Z14CheckLockStatev);
428 asm("IrqExit1: "); // entry point for __ArmVectorIrqPostambleNoResched()
429 #ifdef __USER_MEMORY_GUARDS_ENABLED__
430 asm("ldr lr, [sp], #%a0 " : : "i" (4*USER_MEMORY_GUARD_SAVE_WORDS)); // pop saved DACR, adjust sp
431 USER_MEMORY_GUARD_RESTORE(lr,r12);
434 #ifdef BTRACE_CPU_USAGE
435 asm("ldrb r2, [r1,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter));
436 asm("mov r0, #%a0" : : "i" ((TInt)4 ) );
437 asm("add r0, r0, #%a0" : : "i" ((TInt)(BTrace::ECpuUsage<<BTrace::ECategoryIndex*8)+(BTrace::EIrqEnd<<BTrace::ESubCategoryIndex*8)) );
440 asm("ldrne pc, [r1,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
442 ERRATUM_353494_MODE_CHANGE(,r12);
443 asm("ldmfd sp!, {r0-r3,r12,pc}^ "); // return from interrupt
446 /***************************************************************************
447 * IRQ Postamble which will not reschedule (can be returned to by co-resident OS).
448 * This routine is called after the IRQ has been dispatched
449 * spsr_irq, r4-r11 are unmodified
450 * spsr_irq,r0-r3,r12,return address are on the top of the IRQ stack
451 ***************************************************************************/
453 extern "C" EXPORT_C __NAKED__ void __ArmVectorIrqPostambleNoResched()
455 // FIQs enabled here but not IRQs
456 asm("ldr r1, __TheScheduler ");
461 /***************************************************************************
463 * This routine is called after the FIQ has been dispatched
464 * spsr_fiq, r0-r3 are unmodified
465 * Return address is on the top of the FIQ stack
466 ***************************************************************************/
468 extern "C" __NAKED__ void __ArmVectorFiq()
470 #ifdef __FAULT_ON_FIQ__
471 asm(".word 0xe7f10f10 ");
473 // IRQs and FIQs disabled here
474 // r0-r7 are unaltered from when FIQ occurred
475 asm("ldr r9, __TheScheduler ");
476 #ifdef __USER_MEMORY_GUARDS_ENABLED__
477 asm("ldr r12, [sp], #4 "); // pop saved DACR
479 asm("mrs r8, spsr "); // check interrupted mode
480 asm("and r10, r8, #0x1f ");
481 asm("cmp r10, #0x10 "); // check for mode_usr
482 asm("ldr r11, [r9, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
483 asm("cmpne r10, #0x13 "); // or mode_svc
484 asm("ldreq r10, [r9, #%a0]" : : "i" _FOFF(TScheduler,iRescheduleNeededFlag));
485 asm("cmpeq r11, #0 "); // and check if kernel locked
486 asm("bne FiqExit0 "); // if wrong mode or kernel locked, return immediately
487 asm("cmp r10, #0 "); // check if reschedule needed
488 asm("beq FiqExit0 "); // if not, return from interrupt
490 // we interrupted mode_usr or mode_svc, kernel unlocked, reschedule needed
491 asm("add r11, r11, #1 ");
492 asm("str r11, [r9, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); // lock the kernel
493 asm("stmfd sp!, {r1-r3} "); // save interrupted r1-r3 on FIQ stack
494 asm("mov r1, r8 "); // r1=interrupted cpsr
495 asm("mov r3, sp "); // r3 points to saved registers
496 #ifdef __USER_MEMORY_GUARDS_ENABLED__
497 asm("mov r2, r12 "); // saved DACR into R2
499 SET_MODE(lr, MODE_SVC, INTS_ALL_ON); // switch to mode_svc, IRQs and FIQs back on
500 #ifdef __USER_MEMORY_GUARDS_ENABLED__
501 asm("str r2, [sp, #%a0]! " : : "i" (-4*(8+USER_MEMORY_GUARD_SAVE_WORDS))); // save DACR and leave room for spare, cpsr, lr_svc, r0-r3, r12, pc
503 asm("sub sp, sp, #32 "); // make room for saved registers on mode_svc stack
505 asm("ldr r2, [r3, #12] "); // r2=return address
506 asm("str r12, [sp, #%a0] " : : "i" (4*(6+USER_MEMORY_GUARD_SAVE_WORDS))); // save r12 on mode_svc stack
507 asm("str r2, [sp, #%a0] " : : "i" (4*(7+USER_MEMORY_GUARD_SAVE_WORDS))); // save return address on mode_svc stack
508 asm("add r12, sp, #%a0 " : : "i" (4*(USER_MEMORY_GUARD_SAVE_WORDS)));
510 asm("stmia r12!, {r1,lr} "); // save interrupted cpsr and lr_svc
511 asm("ldmia r3, {r1,r2,lr} "); // retrieve original r1-r3 from mode_fiq stack
512 asm("stmia r12, {r0-r2,lr} "); // save original r0-r3 - saved register order is now cpsr,lr_svc,r0-r3,r12,pc
513 SET_MODE_1(r2, MODE_SVC, INTS_ALL_ON);
514 SET_MODE(lr, MODE_FIQ, INTS_ALL_OFF); // mode_fiq, IRQs and FIQs off
515 asm("add sp, r3, #16 "); // restore mode_fiq stack balance
516 SET_MODE_2(r2, MODE_SVC, INTS_ALL_ON); // back to mode_svc, IRQs on
517 asm("adr lr, irq_resched_return ");
518 asm("b " CSM_ZN10TScheduler10RescheduleEv); // do reschedule and return to irq_resched_return
520 asm("FiqExit0:"); // also entry point for __ArmVectorFiqPostambleNoResched()
521 USER_MEMORY_GUARD_RESTORE(r12,lr);
523 #ifndef BTRACE_CPU_USAGE
524 ERRATUM_353494_MODE_CHANGE(,r11);
525 asm("ldmfd sp!, {pc}^ "); // return from interrupt
527 asm("ldrb r8, [r9,#%a0]" : : "i" _FOFF(TScheduler,iCpuUsageFilter));
528 asm("mov r10, #%a0" : : "i" ((TInt)(BTrace::ECpuUsage<<BTrace::ECategoryIndex*8)+(BTrace::EFiqEnd<<BTrace::ESubCategoryIndex*8)) );
529 asm("adr lr, FiqTraceExit0");
531 ERRATUM_353494_MODE_CHANGE(eq,r8);
532 asm("ldmeqfd sp!, {pc}^ "); // return from interrupt if trace not enabled
533 asm("stmfd sp!, {r0-r3} ");
534 asm("add r0, r10, #%a0" : : "i" ((TInt)4 ) );
535 asm("ldr pc, [r9,#%a0]" : : "i" _FOFF(TScheduler,iBTraceHandler));
536 asm("FiqTraceExit0:");
537 ERRATUM_353494_MODE_CHANGE(,r3);
538 asm("ldmfd sp!, {r0-r3,pc}^ "); // return from interrupt
541 asm("__TheScheduler: ");
542 asm(".word TheScheduler ");
545 /***************************************************************************
546 * FIQ Postamble which will not reschedule (can be returned to by co-resident OS).
547 * This routine is called after the FIQ has been dispatched
548 * spsr_fiq, r0-r3 are unmodified
549 * Return address is on the top of the FIQ stack
550 ***************************************************************************/
552 extern "C" EXPORT_C __NAKED__ void __ArmVectorFiqPostambleNoResched()
554 #ifdef __FAULT_ON_FIQ__
555 asm(".word 0xe7f10f10 ");
557 // IRQs and FIQs disabled here
558 // r0-r7 are unaltered from when FIQ occurred
559 asm("ldr r9, __TheScheduler ");
560 #ifdef __USER_MEMORY_GUARDS_ENABLED__
561 asm("ldr r12, [sp], #4 "); // pop saved DACR
567 extern "C" __NAKED__ void __ArmVectorAbortData()
572 #if defined(__CPU_CORTEX_A8__) && (!defined(__CPU_ARM_A8_ERRATUM_447862_FIXED) || !defined(__CPU_ARM_A8_ERRATUM_451027_FIXED))
573 ARM_DMBSH; // ARM Cortex-A8 erratum 447862/451027 workaround
575 asm("sub lr, lr, #8"); // lr now points to aborted instruction
576 asm("stmfd sp!, {r0-r4,r12,lr}"); // save it along with r0-r4,r12
577 #if defined(__CPU_ARM_HAS_WORKING_CLREX)
578 CLREX // reset exclusive monitor
579 #elif defined(__CPU_ARM_HAS_LDREX_STREX)
580 STREX(12,0,13); // dummy STREX to reset exclusivity monitor
582 asm("mov r1, #%a0 " : : "i" ((TInt)EArmExceptionDataAbort));
583 // generic exception handler
584 // come here with r1=exception code, lr points to aborted instruction, r0-r4,r12,lr saved
585 asm("handle_exception: ");
586 asm("mrs r0, spsr "); // r0=value of cpsr when abort occurred
588 asm("handle_exception2: ");
589 asm("mrs r12, cpsr ");
590 asm("and r3, r0, #0x1f "); // r3=processor mode when abort occurred
591 asm("bic r12, r12, #0xc0 ");
592 asm("cmp r3, #0x10 "); // aborted in user mode?
593 asm("cmpne r3, #0x13 "); // if not, aborted in mode_svc?
594 asm("bne fatal_exception_mode "); // if neither, fault
595 asm("msr cpsr, r12 "); // reenable interrupts - rescheduling disabled by mode_abt/mode_und
596 asm("ldr r2, __TheScheduler ");
597 asm("mov r3, sp "); // r3 points to saved registers
598 asm("ldr r4, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
599 asm("cmp r4, #0 "); // exception with kernel locked?
600 asm("bne fatal_exception_mode "); // if so, fault
601 asm("add r4, r4, #1 "); // lock the kernel
602 asm("str r4, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
603 asm("mov r4, #0x13 ");
604 asm("msr cpsr, r4 "); // mode_svc, interrupts on, kernel locked
606 asm("ldr r4, [r2, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
607 asm("tst r0, #0x0f "); // check if exception in mode_usr
608 asm("mov r2, #%a0 " : : "i" ((TInt)NThread::EContextException));
609 asm("streqb r2, [r4, #%a0]" : : "i" _FOFF(NThread,iSpare3)); // if so, set iUserContextType = EContextException
610 asm("add r4, r4, #%a0" : : "i" _FOFF(NThread,iStackBase));
611 asm("ldmia r4, {r2,r4} "); // r2=supervisor stack area base, r4=size
612 asm("subs r2, sp, r2 "); // r2=amount of mode_svc stack remaining
613 asm("blo fatal_exception_stack "); // if stack pointer invalid, fault
615 asm("bhi fatal_exception_stack ");
616 asm("cmp r2, #128 "); // check enough stack to handle exception
617 asm("blo fatal_exception_stack "); // if not, fault
619 // At this point we are in mode_svc with interrupts enabled and the kernel locked.
620 // We know the supervisor stack is valid and has enough free space to store the exception info.
621 // Registers: R0=aborted cpsr, R1=exception type, R2,R4 scratch, R3 points to saved registers
622 // on mode_abt or mode_und stack, R12 holds mode of exception (mode_abt or mode_und).
624 asm("ldr r4, [r3, #16] "); // restore original r4
625 asm("mov r2, sp "); // r2=sp_svc when abort occurred
626 asm("sub sp, sp, #92 "); // push 23 words onto mode_svc stack
627 asm("stmia sp, {r0-r2,r4-r11,lr} "); // save cpsr, exc id, sp_svc, r4-r11, lr_svc
628 asm("ldmia r3!, {r4-r10} "); // get registers from mode_abt or mode_und stack
629 asm("stmdb r2!, {r4-r7,r9,r10} "); // transfer saved registers from exception stack except r4
630 asm("stmdb r2, {r13,r14}^ "); // save sp_usr and lr_usr
631 asm("sub r2, r2, #20 ");
633 // Set r0 = fault address and r1 = fault status.
634 // For prefetch aborts use IFAR if it exists otherwise use the return address.
635 #ifdef __USE_CP15_FAULT_INFO__
636 asm("cmp r1, #%a0 " : : "i" ((TInt)EArmExceptionPrefetchAbort));
637 #ifdef __CPU_ARM_HAS_SPLIT_FSR
638 asm("mrcne p15, 0, r1, c5, c0, 0"); // r1 = data fault status
639 asm("mrcne p15, 0, r0, c6, c0, 0"); // r0 = DFAR fault address
640 asm("mrceq p15, 0, r1, c5, c0, 1"); // r1 = instruction fault status
641 #ifdef __CPU_ARM_HAS_CP15_IFAR
642 asm("mrceq p15, 0, r0, c6, c0, 2"); // r0 = IFAR fault address
644 asm("moveq r0, r10"); // r0 = return address.
645 #endif // __CPU_ARM_HAS_CP15_IFAR
647 asm("mrcne p15, 0, r0, c6, c0"); // r0 = fault address
648 asm("moveq r0, r10"); // r0 = return address.
649 asm("mrc p15, 0, r1, c5, c0"); // r1 = fault status
650 #endif // __CPU_ARM_HAS_SPLIT_FSR
651 #endif // __USE_CP15_FAULT_INFO__
653 asm("mrs r3, spsr "); // r3=spsr_svc
654 asm("stmia r2, {r0,r1,r3} "); // save these
655 asm("msr cpsr, r12 "); // back into exception mode
656 asm("add sp, sp, #28 "); // restore exception stack balance
657 asm("mov r5, #0x13 ");
658 asm("msr cpsr, r5 "); // back into mode_svc
660 // Now we can unlock the kernel and process the exception
661 asm("bl " CSM_ZN10TScheduler10RescheduleEv);
662 asm("msr cpsr, r5 "); // enable interrupts
664 // call the exception dispatcher, r3 is the current thread
665 asm("ldr r12, [r3, #%a0]" : : "i" _FOFF(NThread,iHandlers));
667 asm("mov r0, sp "); // r0 points to saved exception information
668 asm("sub sp, sp, #4 "); // make room for r0
669 asm("bic sp, sp, #4 "); // align stack to 8 byte boundary
670 asm("str r0, [sp] "); // save original stack pointer
672 USER_MEMORY_GUARD_ON(,r11,lr);
673 asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(SNThreadHandlers,iExceptionHandler));
675 __JUMP(,r12); // call exception handler
676 USER_MEMORY_GUARD_RESTORE(r11,lr);
677 asm("ldr sp, [sp, #0] "); // restore stack pointer
679 // return from exception
680 asm("ldr r0, __TheScheduler ");
682 asm("ldr r2, [r0, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
683 asm("ldr r0, [r3], #12 "); // r0=cpsr, skip exc id and sp_svc
684 asm("ldmfd r3!, {r4-r11,lr} "); // restore r4-r11 and lr_svc
685 asm("ldr r12, [r3, #8]! "); // skip fault address and fault status, r12=spsr_svc
686 asm("ldmib r3, {r13,r14}^ "); // restore sp_usr and lr_usr
687 asm("add r1, r3, #12 "); // r3 points to saved r0-r3,r12,pc
688 asm("mov r3, #0xd3 ");
689 asm("msr cpsr, r3 "); // mode_svc, all interrupts off
690 asm("msr spsr, r12 "); // restore spsr_svc
691 asm("tst r0, #0x0f "); // check if exception in mode_usr
692 #if defined(__CPU_CORTEX_A9__) && !defined(__CPU_ARM_A9_ERRATUM_571622_FIXED)
693 asm("nop "); // ARM Cortex-A9 MPCore erratum 571622 workaround
694 asm("nop "); // Insert nops so branch doesn't occur in 2nd or 3rd position after a msr spsr
696 #ifdef __CHECK_LOCK_STATE__
697 asm("bleq " CSM_Z14CheckLockStatev);
698 asm("tst r0, #0x0f "); // recheck if exception in mode_usr
702 #ifdef __USER_MEMORY_GUARDS_ENABLED__
703 USER_MEMORY_GUARD_ON(,lr,r12);
704 asm("tst lr, #0xc0000000 "); // user memory enabled?
705 asm("adrne lr, 2f "); // yes - enable it after callbacks
707 asm("adreq lr, 1f "); // no - leave it disabled after callbacks
709 asm("b callUserModeCallbacks2 "); // call user-mode callbacks
711 USER_MEMORY_GUARD_OFF(,lr,lr);
714 asm("tst r0, #0x0f "); // check if exception in mode_usr
715 asm("mov r3, #%a0 " : : "i" ((TInt)NThread::EContextUndefined));
716 asm("streqb r3, [r2, #%a0]" : : "i" _FOFF(NThread,iSpare3)); // if so, set iUserContextType = EContextUndefined
717 asm("add sp, r1, #24 "); // restore mode_svc stack balance
718 asm("mov r2, #0xd7 ");
719 asm("msr cpsr, r2 "); // mode_abt, all interrupts off
720 asm("msr spsr, r0 "); // spsr_abt=aborted cpsr
721 ERRATUM_353494_MODE_CHANGE(,r12);
722 asm("ldmia r1, {r0-r3,r12,pc}^ "); // restore r0-r3,r12 and return from exception
724 // get here if exception occurred in mode other than usr or svc
725 // we are in mode_abt or mode_und with IRQs disabled
726 asm("fatal_exception_mode: ");
727 asm("ldr r2, __TheScheduler ");
728 asm("ldr lr, [r2, #%a0]" : : "i" _FOFF(TScheduler,iMonitorExceptionHandler));
730 __JUMP(ne,lr); // if crash debugger running, let it handle exception
732 // get here if mode_svc stack has overflowed
733 // we are in mode_svc with interrupts enabled and the kernel locked
734 // R0=original CPSR R1=exc code R12=mode of exception
735 asm("fatal_exception_stack: ");
736 asm("orr r3, r12, #0xC0 ");
737 asm("msr cpsr, r3 "); // back to exception mode, all interrupts off
739 asm("ldr r0, __TheScheduler ");
740 asm("ldr r0, [r0, #%a0]" : : "i" _FOFF(TScheduler,i_Regs)); // pass in address of stored registers
741 asm("bl " CSM_ZN3Arm9SaveStateER14SFullArmRegSet );
742 asm("str r1, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iExcCode));
743 asm("str r2, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iFlags));
744 asm("ldmia sp!, {r3-r7} "); // get original R0-R4
745 asm("stmia r0, {r1-r5} "); // save original R0-R4
746 asm("ldmia sp!, {r6,r7} "); // get original R12 and aborted instruction address
747 asm("str r6, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iR12));
748 asm("str r7, [r0, #%a0]" : : "i" _FOFF(SFullArmRegSet,iN.iR15));
749 asm("mov r1, #13 "); // r1 = regnum
750 asm("mrs r2, cpsr "); // r2 = mode
752 asm("bl " CSM_ZN3Arm3RegER14SFullArmRegSetim ); // r0 = pointer to exception mode R13
753 asm("str sp, [r0] "); // save correct original value for exception mode R13
755 // call the exception fault dispatcher
760 extern "C" __NAKED__ void __ArmVectorAbortPrefetch()
765 asm("sub lr, lr, #4"); // lr now points to instruction whose prefetch was aborted
766 asm("stmfd sp!, {r0-r4,r12,lr}"); // save it along with r0-r4,r12
767 #if defined(__CPU_ARM_HAS_WORKING_CLREX)
768 CLREX // reset exclusive monitor
769 #elif defined(__CPU_ARM_HAS_LDREX_STREX)
770 STREX(12,0,13); // dummy STREX to reset exclusivity monitor
772 asm("mov r1, #%a0 " : : "i" ((TInt)EArmExceptionPrefetchAbort));
773 asm("b handle_exception ");
776 extern "C" __NAKED__ void __ArmVectorUndef()
778 // Undefined instruction exception
781 asm("sub lr, lr, #4"); // lr now points to undefined instruction
782 asm("stmfd sp!, {r0-r4,r12,lr}"); // save it along with r0-r4,r12
783 #if defined(__CPU_ARM_HAS_WORKING_CLREX)
784 CLREX // reset exclusive monitor
785 #elif defined(__CPU_ARM_HAS_LDREX_STREX)
786 STREX(12,0,13); // dummy STREX to reset exclusivity monitor
788 asm("mrs r0, spsr "); // r0=CPSR at time of exception
789 asm("mov r1, #%a0 " : : "i" ((TInt)EArmExceptionUndefinedOpcode));
790 asm("tst r0, #0x20 "); // exception in THUMB mode?
791 asm("addne lr, lr, #2 "); // if so, correct saved return address
792 asm("strne lr, [sp, #24] ");
793 asm("b handle_exception2 ");