First public contribution.
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\nkern\arm\ncutilf.cia
20 #include "highrestimer.h"
22 #ifdef __SCHEDULER_MACHINE_CODED__
23 /** Signals the request semaphore of a nanothread.
25 This function is intended to be used by the EPOC layer and personality
26 layers. Device drivers should use Kern::RequestComplete instead.
28 @param aThread Nanothread to signal. Must be non NULL.
30 @see Kern::RequestComplete()
32 @pre Interrupts must be enabled.
33 @pre Do not call from an ISR.
35 EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/)
37 ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR);
39 asm("ldr r2, __TheScheduler ");
40 asm("str lr, [sp, #-4]! ");
41 asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
42 asm("add r0, r0, #%a0" : : "i" _FOFF(NThread,iRequestSemaphore));
43 asm("add r3, r3, #1 ");
44 asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
45 asm("bl " CSM_ZN14NFastSemaphore6SignalEv); // alignment OK since target is also assembler
46 asm("ldr lr, [sp], #4 ");
47 asm("b " CSM_ZN5NKern6UnlockEv);
51 /** Atomically signals the request semaphore of a nanothread and a fast mutex.
53 This function is intended to be used by the EPOC layer and personality
54 layers. Device drivers should use Kern::RequestComplete instead.
56 @param aThread Nanothread to signal. Must be non NULL.
57 @param aMutex Fast mutex to signal. If NULL, the system lock is signaled.
59 @see Kern::RequestComplete()
61 @pre Kernel must be unlocked.
62 @pre Call in a thread context.
63 @pre Interrupts must be enabled.
65 EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/, NFastMutex* /*aMutex*/)
67 ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC);
69 asm("ldr r2, __TheScheduler ");
71 asm("ldreq r1, __SystemLock ");
72 asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
73 asm("stmfd sp!, {r1,lr} ");
74 asm("add r0, r0, #%a0" : : "i" _FOFF(NThread,iRequestSemaphore));
75 asm("add r3, r3, #1 ");
76 asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked));
77 asm("bl " CSM_ZN14NFastSemaphore6SignalEv);
78 asm("ldr r0, [sp], #4 ");
79 asm("bl " CSM_ZN10NFastMutex6SignalEv); // alignment OK since target is also assembler
80 asm("ldr lr, [sp], #4 ");
81 asm("b " CSM_ZN5NKern6UnlockEv);
83 asm("__SystemLock: ");
84 asm(".word %a0" : : "i" ((TInt)&TheScheduler.iLock));
85 asm("__TheScheduler: ");
86 asm(".word TheScheduler ");
91 #ifndef __USER_CONTEXT_TYPE_MACHINE_CODED__
92 // called by C++ version of NThread::UserContextType()
93 __NAKED__ TBool RescheduledAfterInterrupt(TUint32 /*aAddr*/)
95 asm("ldr r1, __irq_resched_return ");
99 asm("__irq_resched_return: ");
100 asm(".word irq_resched_return ");
105 /** Get a value which indicates where a thread's user mode context is stored.
107 @return A value that can be used as an index into the tables returned by
108 NThread::UserContextTables().
114 @see UserContextTables
117 EXPORT_C __NAKED__ NThread::TUserContextType NThread::UserContextType()
119 ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR);
121 // Optimisation note: It may be possible to coalesce the first and second
122 // checks below by creating separate "EContextXxxDied" context types for each
123 // possible way a thread can die and ordering these new types before
124 // EContextException.
127 // Dying thread? use context saved earlier by kernel
129 asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NThread,iCsFunction));
130 asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iSpare3)); // r2 = iUserContextType
131 asm("mov r1, r0 "); // r1 = this
132 asm("cmp r3, #%a0" : : "i" ((TInt)NThread::ECSExitInProgress));
136 // Exception or no user context?
138 asm("ldr r3, __TheScheduler");
139 asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextException));
140 asm("ldr r3, [r3, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread));
141 asm("movls r0, r2 "); // Return EContextNone or EContextException
143 asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextUserIntrCallback));
145 asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextWFARCallback));
146 asm("movls r0, r2 "); // Return EContextUserIntrCallback or EContextWFARCallback
149 // Getting current thread context? must be in exec call as exception
150 // and dying thread cases were tested above.
154 asm("moveq r0, #%a0" : : "i" ((TInt)NThread::EContextExec));
157 asm("ldr r0, [r1, #%a0]" : : "i" _FOFF(NThread,iStackBase));
158 asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iStackSize));
159 asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(NThread,iSavedSP));
160 asm("add r2, r2, r0");
161 asm("ldr r0, [r3, #%a0]" : : "i" (EXTRA_STACK_SPACE+11*4)); // get saved return address from reschedule
162 asm("ldr r12, __irq_resched_return ");
163 asm("sub r2, r2, r3");
165 asm("beq preempted ");
167 // Transition to supervisor mode must have been due to a SWI
169 asm("not_preempted:");
170 asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+15*4)));
171 asm("moveq r0, #%a0 " : : "i" ((TInt)NThread::EContextWFAR)); // thread must have blocked doing Exec::WaitForAnyRequest
172 asm("movne r0, #%a0 " : : "i" ((TInt)NThread::EContextExec)); // Thread must have been in a SLOW or UNPROTECTED Exec call
175 // thread was preempted due to an interrupt
176 // interrupt and reschedule will have pushed ? words + USER_MEMORY_GUARD_SAVE_WORDS + EXTRA_STACK_SPACE onto the stack
179 asm("ldr r12, [r3, #%a0]" : : "i" (EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+12*4)); // first word on stack before reschedule
180 asm("mov r0, #%a0 " : : "i" ((TInt)NThread::EContextUserInterrupt));
181 asm("and r12, r12, #0x1f ");
182 asm("cmp r12, #0x10 "); // interrupted mode = user?
185 asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+30*4)));
186 asm("bcs not_preempted "); // thread was interrupted in supervisor mode, return address and r4-r11 were saved
188 // interrupt occurred in exec call entry before r4-r11 saved
189 asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+20*4)));
190 asm("moveq r0, #%a0 " : : "i" ((TInt)NThread::EContextSvsrInterrupt1)); // interrupt before return address was saved or after registers restored
191 asm("movne r0, #%a0 " : : "i" ((TInt)NThread::EContextSvsrInterrupt2)); // interrupt after return address saved
194 asm("__irq_resched_return: ");
195 asm(".word irq_resched_return ");
198 #endif // __USER_CONTEXT_TYPE_MACHINE_CODED__
200 __NAKED__ void Arm::GetUserSpAndLr(TAny*)
202 asm("stmia r0, {r13, r14}^ ");
203 asm("mov r0, r0"); // NOP needed between stm^ and banked register access
207 __NAKED__ void Arm::SetUserSpAndLr(TAny*)
209 asm("ldmia r0, {r13, r14}^ ");
210 asm("mov r0, r0"); // NOP needed between ldm^ and banked register access
214 #ifdef __CPU_ARM_USE_DOMAINS
215 __NAKED__ TUint32 Arm::Dacr()
217 asm("mrc p15, 0, r0, c3, c0, 0 ");
221 __NAKED__ void Arm::SetDacr(TUint32)
223 asm("mcr p15, 0, r0, c3, c0, 0 ");
228 __NAKED__ TUint32 Arm::ModifyDacr(TUint32, TUint32)
230 asm("mrc p15, 0, r2, c3, c0, 0 ");
231 asm("bic r2, r2, r0 ");
232 asm("orr r2, r2, r1 ");
233 asm("mcr p15, 0, r2, c3, c0, 0 ");
240 #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
241 __NAKED__ void Arm::SetCar(TUint32)
251 /** Get the CPU's coprocessor access register value
253 @return The value of the CAR, 0 if CPU doesn't have CAR
258 EXPORT_C __NAKED__ TUint32 Arm::Car()
260 #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
270 /** Modify the CPU's coprocessor access register value
271 Does nothing if CPU does not have CAR.
273 @param aClearMask Mask of bits to clear (1 = clear this bit)
274 @param aSetMask Mask of bits to set (1 = set this bit)
275 @return The original value of the CAR, 0 if CPU doesn't have CAR
280 EXPORT_C __NAKED__ TUint32 Arm::ModifyCar(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
282 #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG
284 asm("bic r0, r2, r0 ");
285 asm("orr r0, r0, r1 ");
296 __NAKED__ void Arm::SetFpExc(TUint32)
298 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED)
299 // If we are about to enable VFP, disable dynamic branch prediction
300 // If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled
301 asm("mrs r3, cpsr ");
303 asm("mrc p15, 0, r1, c1, c0, 1 ");
304 asm("tst r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
305 asm("bic r1, r1, #2 "); // clear DB bit (disable dynamic prediction)
306 asm("and r2, r1, #1 "); // r2 bit 0 = RS bit (1 if return stack enabled)
307 asm("orreq r1, r1, r2, lsl #1 "); // if VFP is being disabled set DB = RS
308 asm("mcr p15, 0, r1, c1, c0, 1 ");
309 asm("mcr p15, 0, r2, c7, c5, 6 "); // flush BTAC
310 VFP_FMXR(,VFP_XREG_FPEXC,0);
311 asm("msr cpsr, r3 ");
314 VFP_FMXR(,VFP_XREG_FPEXC,0);
322 /** Get the value of the VFP FPEXC register
324 @return The value of FPEXC, 0 if there is no VFP
329 EXPORT_C __NAKED__ TUint32 Arm::FpExc()
332 VFP_FMRX(,0,VFP_XREG_FPEXC);
341 /** Modify the VFP FPEXC register
342 Does nothing if there is no VFP
344 @param aClearMask Mask of bits to clear (1 = clear this bit)
345 @param aSetMask Mask of bits to set (1 = set this bit)
346 @return The original value of FPEXC, 0 if no VFP present
351 EXPORT_C __NAKED__ TUint32 Arm::ModifyFpExc(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
354 VFP_FMRX(,12,VFP_XREG_FPEXC);
355 asm("bic r0, r12, r0 ");
356 asm("orr r0, r0, r1 ");
358 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED)
359 // If we are about to enable VFP, disable dynamic branch prediction
360 // If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled
361 asm("mrs r3, cpsr ");
363 asm("mrc p15, 0, r1, c1, c0, 1 ");
364 asm("tst r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) );
365 asm("bic r1, r1, #2 "); // clear DB bit (disable dynamic prediction)
366 asm("and r2, r1, #1 "); // r2 bit 0 = RS bit (1 if return stack enabled)
367 asm("orreq r1, r1, r2, lsl #1 "); // if VFP is being disabled set DB = RS
368 asm("mcr p15, 0, r1, c1, c0, 1 ");
369 asm("mcr p15, 0, r2, c7, c5, 6 "); // flush BTAC
370 VFP_FMXR(,VFP_XREG_FPEXC,0);
371 asm("msr cpsr, r3 ");
373 VFP_FMXR(,VFP_XREG_FPEXC,0);
374 #endif // erratum 351912
383 /** Get the value of the VFP FPSCR register
385 @return The value of FPSCR, 0 if there is no VFP
390 EXPORT_C __NAKED__ TUint32 Arm::FpScr()
393 VFP_FMRX(,0,VFP_XREG_FPSCR);
402 /** Modify the VFP FPSCR register
403 Does nothing if there is no VFP
405 @param aClearMask Mask of bits to clear (1 = clear this bit)
406 @param aSetMask Mask of bits to set (1 = set this bit)
407 @return The original value of FPSCR, 0 if no VFP present
412 EXPORT_C __NAKED__ TUint32 Arm::ModifyFpScr(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/)
415 VFP_FMRX(,2,VFP_XREG_FPSCR);
416 asm("bic r0, r2, r0 ");
417 asm("orr r0, r0, r1 ");
418 VFP_FMXR(,VFP_XREG_FPSCR,0);
427 /** Detect whether NEON is present
429 @return ETrue if present, EFalse if not
434 #if defined(__CPU_HAS_VFP) && defined(__VFP_V3)
435 __NAKED__ TBool Arm::NeonPresent()
437 asm("mov r0, #0 "); // Not present
438 VFP_FMRX(, 1,VFP_XREG_FPEXC); // Save VFP state
439 asm("orr r2, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN));
440 VFP_FMXR(, VFP_XREG_FPEXC,1); // Enable VFP
442 VFP_FMRX(, 2,VFP_XREG_MVFR0); // Read MVFR0
443 asm("tst r2, #%a0" : : "i" ((TInt)VFP_MVFR0_ASIMD32)); // Check to see if all 32 Advanced SIMD registers are present
444 asm("beq 0f "); // Skip ahead if not
446 asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_ASEDIS)); // Check to see if ASIMD is disabled
447 asm("bne 0f "); // Skip ahead if so
448 asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_D32DIS)); // Check to see if the upper 16 registers are disabled
449 asm("moveq r0, #1" ); // If not then eport NEON present
452 VFP_FMXR(,VFP_XREG_FPEXC,1); // Restore VFP state
459 __NAKED__ TBool Arm::MmuActive()
461 asm("mrc p15, 0, r0, c1, c0, 0 ");
462 asm("and r0, r0, #1 ");
466 // Returns the content of Translate Table Base Register 0.
467 // To get physical address of the level 1 table, on some platforms this must be orred with 0xffff8000 (to get rid of table walk cache attributes)
468 __NAKED__ TUint32 Arm::MmuTTBR0()
470 asm("mrc p15, 0, r0, c2, c0, 0 ");
477 /** Get the current value of the high performance counter.
479 If a high performance counter is not available, this uses the millisecond
482 #ifdef HAS_HIGH_RES_TIMER
483 EXPORT_C __NAKED__ TUint32 NKern::FastCounter()
485 GET_HIGH_RES_TICK_COUNT(R0);
489 EXPORT_C TUint32 NKern::FastCounter()
497 /** Get the frequency of counter queried by NKern::FastCounter().
499 EXPORT_C TInt NKern::FastCounterFrequency()
501 #ifdef HAS_HIGH_RES_TIMER
502 return KHighResTimerFrequency;
504 return 1000000 / NKern::TickPeriod();