sl@0: // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\nkern\arm\ncutilf.cia sl@0: // sl@0: // sl@0: sl@0: #include sl@0: #include sl@0: #include "highrestimer.h" sl@0: sl@0: #ifdef __SCHEDULER_MACHINE_CODED__ sl@0: /** Signals the request semaphore of a nanothread. sl@0: sl@0: This function is intended to be used by the EPOC layer and personality sl@0: layers. Device drivers should use Kern::RequestComplete instead. sl@0: sl@0: @param aThread Nanothread to signal. Must be non NULL. sl@0: sl@0: @see Kern::RequestComplete() sl@0: sl@0: @pre Interrupts must be enabled. sl@0: @pre Do not call from an ISR. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/) sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR); sl@0: sl@0: asm("ldr r2, __TheScheduler "); sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); sl@0: asm("add r0, r0, #%a0" : : "i" _FOFF(NThread,iRequestSemaphore)); sl@0: asm("add r3, r3, #1 "); sl@0: asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); sl@0: asm("bl " CSM_ZN14NFastSemaphore6SignalEv); // alignment OK since target is also assembler sl@0: asm("ldr lr, [sp], #4 "); sl@0: asm("b " CSM_ZN5NKern6UnlockEv); sl@0: } sl@0: sl@0: sl@0: /** Atomically signals the request semaphore of a nanothread and a fast mutex. sl@0: sl@0: This function is intended to be used by the EPOC layer and personality sl@0: layers. Device drivers should use Kern::RequestComplete instead. sl@0: sl@0: @param aThread Nanothread to signal. Must be non NULL. sl@0: @param aMutex Fast mutex to signal. If NULL, the system lock is signaled. sl@0: sl@0: @see Kern::RequestComplete() sl@0: sl@0: @pre Kernel must be unlocked. sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: */ sl@0: EXPORT_C __NAKED__ void NKern::ThreadRequestSignal(NThread* /*aThread*/, NFastMutex* /*aMutex*/) sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_INTERRUPTS_ENABLED|MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC); sl@0: sl@0: asm("ldr r2, __TheScheduler "); sl@0: asm("cmp r1, #0 "); sl@0: asm("ldreq r1, __SystemLock "); sl@0: asm("ldr r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); sl@0: asm("stmfd sp!, {r1,lr} "); sl@0: asm("add r0, r0, #%a0" : : "i" _FOFF(NThread,iRequestSemaphore)); sl@0: asm("add r3, r3, #1 "); sl@0: asm("str r3, [r2, #%a0]" : : "i" _FOFF(TScheduler,iKernCSLocked)); sl@0: asm("bl " CSM_ZN14NFastSemaphore6SignalEv); sl@0: asm("ldr r0, [sp], #4 "); sl@0: asm("bl " CSM_ZN10NFastMutex6SignalEv); // alignment OK since target is also assembler sl@0: asm("ldr lr, [sp], #4 "); sl@0: asm("b " CSM_ZN5NKern6UnlockEv); sl@0: sl@0: asm("__SystemLock: "); sl@0: asm(".word %a0" : : "i" ((TInt)&TheScheduler.iLock)); sl@0: asm("__TheScheduler: "); sl@0: asm(".word TheScheduler "); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: #ifndef __USER_CONTEXT_TYPE_MACHINE_CODED__ sl@0: // called by C++ version of NThread::UserContextType() sl@0: __NAKED__ TBool RescheduledAfterInterrupt(TUint32 /*aAddr*/) sl@0: { sl@0: asm("ldr r1, __irq_resched_return "); sl@0: asm("cmp r0, r1 "); sl@0: asm("movne r0, #0 "); sl@0: __JUMP(,lr); sl@0: asm("__irq_resched_return: "); sl@0: asm(".word irq_resched_return "); sl@0: } sl@0: sl@0: #else sl@0: sl@0: /** Get a value which indicates where a thread's user mode context is stored. sl@0: sl@0: @return A value that can be used as an index into the tables returned by sl@0: NThread::UserContextTables(). sl@0: sl@0: @pre any context sl@0: @pre kernel locked sl@0: @post kernel locked sl@0: sl@0: @see UserContextTables sl@0: @publishedPartner sl@0: */ sl@0: EXPORT_C __NAKED__ NThread::TUserContextType NThread::UserContextType() sl@0: { sl@0: ASM_CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR); sl@0: // sl@0: // Optimisation note: It may be possible to coalesce the first and second sl@0: // checks below by creating separate "EContextXxxDied" context types for each sl@0: // possible way a thread can die and ordering these new types before sl@0: // EContextException. sl@0: // sl@0: sl@0: // Dying thread? use context saved earlier by kernel sl@0: sl@0: asm("ldr r3, [r0, #%a0]" : : "i" _FOFF(NThread,iCsFunction)); sl@0: asm("ldrb r2, [r0, #%a0]" : : "i" _FOFF(NThread,iSpare3)); // r2 = iUserContextType sl@0: asm("mov r1, r0 "); // r1 = this sl@0: asm("cmp r3, #%a0" : : "i" ((TInt)NThread::ECSExitInProgress)); sl@0: asm("moveq r0, r2"); sl@0: __JUMP(eq,lr); sl@0: sl@0: // Exception or no user context? sl@0: sl@0: asm("ldr r3, __TheScheduler"); sl@0: asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextException)); sl@0: asm("ldr r3, [r3, #%a0]" : : "i" _FOFF(TScheduler,iCurrentThread)); sl@0: asm("movls r0, r2 "); // Return EContextNone or EContextException sl@0: __JUMP(ls,lr); sl@0: asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextUserIntrCallback)); sl@0: asm("blo 1f"); sl@0: asm("cmp r2, #%a0 " : : "i" ((TInt)NThread::EContextWFARCallback)); sl@0: asm("movls r0, r2 "); // Return EContextUserIntrCallback or EContextWFARCallback sl@0: __JUMP(ls,lr); sl@0: sl@0: // Getting current thread context? must be in exec call as exception sl@0: // and dying thread cases were tested above. sl@0: sl@0: asm("1: "); sl@0: asm("cmp r3, r1"); sl@0: asm("moveq r0, #%a0" : : "i" ((TInt)NThread::EContextExec)); sl@0: __JUMP(eq,lr); sl@0: sl@0: asm("ldr r0, [r1, #%a0]" : : "i" _FOFF(NThread,iStackBase)); sl@0: asm("ldr r2, [r1, #%a0]" : : "i" _FOFF(NThread,iStackSize)); sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(NThread,iSavedSP)); sl@0: asm("add r2, r2, r0"); sl@0: asm("ldr r0, [r3, #%a0]" : : "i" (EXTRA_STACK_SPACE+11*4)); // get saved return address from reschedule sl@0: asm("ldr r12, __irq_resched_return "); sl@0: asm("sub r2, r2, r3"); sl@0: asm("cmp r0, r12 "); sl@0: asm("beq preempted "); sl@0: sl@0: // Transition to supervisor mode must have been due to a SWI sl@0: sl@0: asm("not_preempted:"); sl@0: asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+15*4))); sl@0: asm("moveq r0, #%a0 " : : "i" ((TInt)NThread::EContextWFAR)); // thread must have blocked doing Exec::WaitForAnyRequest sl@0: asm("movne r0, #%a0 " : : "i" ((TInt)NThread::EContextExec)); // Thread must have been in a SLOW or UNPROTECTED Exec call sl@0: __JUMP(,lr); sl@0: sl@0: // thread was preempted due to an interrupt sl@0: // interrupt and reschedule will have pushed ? words + USER_MEMORY_GUARD_SAVE_WORDS + EXTRA_STACK_SPACE onto the stack sl@0: sl@0: asm("preempted:"); sl@0: asm("ldr r12, [r3, #%a0]" : : "i" (EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+12*4)); // first word on stack before reschedule sl@0: asm("mov r0, #%a0 " : : "i" ((TInt)NThread::EContextUserInterrupt)); sl@0: asm("and r12, r12, #0x1f "); sl@0: asm("cmp r12, #0x10 "); // interrupted mode = user? sl@0: __JUMP(eq,lr); sl@0: sl@0: asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+30*4))); sl@0: asm("bcs not_preempted "); // thread was interrupted in supervisor mode, return address and r4-r11 were saved sl@0: sl@0: // interrupt occurred in exec call entry before r4-r11 saved sl@0: asm("cmp r2, #%a0 " : : "i" ((TInt)(EXTRA_STACK_SPACE+USER_MEMORY_GUARD_SAVE_WORDS*4+20*4))); sl@0: asm("moveq r0, #%a0 " : : "i" ((TInt)NThread::EContextSvsrInterrupt1)); // interrupt before return address was saved or after registers restored sl@0: asm("movne r0, #%a0 " : : "i" ((TInt)NThread::EContextSvsrInterrupt2)); // interrupt after return address saved sl@0: __JUMP(,lr); sl@0: sl@0: asm("__irq_resched_return: "); sl@0: asm(".word irq_resched_return "); sl@0: } sl@0: sl@0: #endif // __USER_CONTEXT_TYPE_MACHINE_CODED__ sl@0: sl@0: __NAKED__ void Arm::GetUserSpAndLr(TAny*) sl@0: { sl@0: asm("stmia r0, {r13, r14}^ "); sl@0: asm("mov r0, r0"); // NOP needed between stm^ and banked register access sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ void Arm::SetUserSpAndLr(TAny*) sl@0: { sl@0: asm("ldmia r0, {r13, r14}^ "); sl@0: asm("mov r0, r0"); // NOP needed between ldm^ and banked register access sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #ifdef __CPU_ARM_USE_DOMAINS sl@0: __NAKED__ TUint32 Arm::Dacr() sl@0: { sl@0: asm("mrc p15, 0, r0, c3, c0, 0 "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ void Arm::SetDacr(TUint32) sl@0: { sl@0: asm("mcr p15, 0, r0, c3, c0, 0 "); sl@0: CPWAIT(,r0); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ TUint32 Arm::ModifyDacr(TUint32, TUint32) sl@0: { sl@0: asm("mrc p15, 0, r2, c3, c0, 0 "); sl@0: asm("bic r2, r2, r0 "); sl@0: asm("orr r2, r2, r1 "); sl@0: asm("mcr p15, 0, r2, c3, c0, 0 "); sl@0: CPWAIT(,r0); sl@0: asm("mov r0, r2 "); sl@0: __JUMP(,lr); sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG sl@0: __NAKED__ void Arm::SetCar(TUint32) sl@0: { sl@0: SET_CAR(,r0); sl@0: CPWAIT(,r0); sl@0: __JUMP(,lr); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: sl@0: /** Get the CPU's coprocessor access register value sl@0: sl@0: @return The value of the CAR, 0 if CPU doesn't have CAR sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: EXPORT_C __NAKED__ TUint32 Arm::Car() sl@0: { sl@0: #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG sl@0: GET_CAR(,r0); sl@0: #else sl@0: asm("mov r0, #0 "); sl@0: #endif sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: sl@0: /** Modify the CPU's coprocessor access register value sl@0: Does nothing if CPU does not have CAR. sl@0: sl@0: @param aClearMask Mask of bits to clear (1 = clear this bit) sl@0: @param aSetMask Mask of bits to set (1 = set this bit) sl@0: @return The original value of the CAR, 0 if CPU doesn't have CAR sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: EXPORT_C __NAKED__ TUint32 Arm::ModifyCar(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/) sl@0: { sl@0: #ifdef __CPU_HAS_COPROCESSOR_ACCESS_REG sl@0: GET_CAR(,r2); sl@0: asm("bic r0, r2, r0 "); sl@0: asm("orr r0, r0, r1 "); sl@0: SET_CAR(,r0); sl@0: CPWAIT(,r0); sl@0: asm("mov r0, r2 "); sl@0: #else sl@0: asm("mov r0, #0 "); sl@0: #endif sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #ifdef __CPU_HAS_VFP sl@0: __NAKED__ void Arm::SetFpExc(TUint32) sl@0: { sl@0: #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED) sl@0: // If we are about to enable VFP, disable dynamic branch prediction sl@0: // If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled sl@0: asm("mrs r3, cpsr "); sl@0: CPSIDAIF; sl@0: asm("mrc p15, 0, r1, c1, c0, 1 "); sl@0: asm("tst r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); sl@0: asm("bic r1, r1, #2 "); // clear DB bit (disable dynamic prediction) sl@0: asm("and r2, r1, #1 "); // r2 bit 0 = RS bit (1 if return stack enabled) sl@0: asm("orreq r1, r1, r2, lsl #1 "); // if VFP is being disabled set DB = RS sl@0: asm("mcr p15, 0, r1, c1, c0, 1 "); sl@0: asm("mcr p15, 0, r2, c7, c5, 6 "); // flush BTAC sl@0: VFP_FMXR(,VFP_XREG_FPEXC,0); sl@0: asm("msr cpsr, r3 "); sl@0: __JUMP(,lr); sl@0: #else sl@0: VFP_FMXR(,VFP_XREG_FPEXC,0); sl@0: __JUMP(,lr); sl@0: #endif sl@0: } sl@0: #endif sl@0: sl@0: sl@0: sl@0: /** Get the value of the VFP FPEXC register sl@0: sl@0: @return The value of FPEXC, 0 if there is no VFP sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: EXPORT_C __NAKED__ TUint32 Arm::FpExc() sl@0: { sl@0: #ifdef __CPU_HAS_VFP sl@0: VFP_FMRX(,0,VFP_XREG_FPEXC); sl@0: #else sl@0: asm("mov r0, #0 "); sl@0: #endif sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: sl@0: /** Modify the VFP FPEXC register sl@0: Does nothing if there is no VFP sl@0: sl@0: @param aClearMask Mask of bits to clear (1 = clear this bit) sl@0: @param aSetMask Mask of bits to set (1 = set this bit) sl@0: @return The original value of FPEXC, 0 if no VFP present sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: EXPORT_C __NAKED__ TUint32 Arm::ModifyFpExc(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/) sl@0: { sl@0: #ifdef __CPU_HAS_VFP sl@0: VFP_FMRX(,12,VFP_XREG_FPEXC); sl@0: asm("bic r0, r12, r0 "); sl@0: asm("orr r0, r0, r1 "); sl@0: sl@0: #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED) sl@0: // If we are about to enable VFP, disable dynamic branch prediction sl@0: // If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled sl@0: asm("mrs r3, cpsr "); sl@0: CPSIDAIF; sl@0: asm("mrc p15, 0, r1, c1, c0, 1 "); sl@0: asm("tst r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); sl@0: asm("bic r1, r1, #2 "); // clear DB bit (disable dynamic prediction) sl@0: asm("and r2, r1, #1 "); // r2 bit 0 = RS bit (1 if return stack enabled) sl@0: asm("orreq r1, r1, r2, lsl #1 "); // if VFP is being disabled set DB = RS sl@0: asm("mcr p15, 0, r1, c1, c0, 1 "); sl@0: asm("mcr p15, 0, r2, c7, c5, 6 "); // flush BTAC sl@0: VFP_FMXR(,VFP_XREG_FPEXC,0); sl@0: asm("msr cpsr, r3 "); sl@0: #else sl@0: VFP_FMXR(,VFP_XREG_FPEXC,0); sl@0: #endif // erratum 351912 sl@0: sl@0: asm("mov r0, r12 "); sl@0: #else // no vfp sl@0: asm("mov r0, #0 "); sl@0: #endif sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: /** Get the value of the VFP FPSCR register sl@0: sl@0: @return The value of FPSCR, 0 if there is no VFP sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: EXPORT_C __NAKED__ TUint32 Arm::FpScr() sl@0: { sl@0: #ifdef __CPU_HAS_VFP sl@0: VFP_FMRX(,0,VFP_XREG_FPSCR); sl@0: #else sl@0: asm("mov r0, #0 "); sl@0: #endif sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: sl@0: /** Modify the VFP FPSCR register sl@0: Does nothing if there is no VFP sl@0: sl@0: @param aClearMask Mask of bits to clear (1 = clear this bit) sl@0: @param aSetMask Mask of bits to set (1 = set this bit) sl@0: @return The original value of FPSCR, 0 if no VFP present sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: EXPORT_C __NAKED__ TUint32 Arm::ModifyFpScr(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/) sl@0: { sl@0: #ifdef __CPU_HAS_VFP sl@0: VFP_FMRX(,2,VFP_XREG_FPSCR); sl@0: asm("bic r0, r2, r0 "); sl@0: asm("orr r0, r0, r1 "); sl@0: VFP_FMXR(,VFP_XREG_FPSCR,0); sl@0: asm("mov r0, r2 "); sl@0: #else sl@0: asm("mov r0, #0 "); sl@0: #endif sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: /** Detect whether NEON is present sl@0: sl@0: @return ETrue if present, EFalse if not sl@0: sl@0: @internalTechnology sl@0: @released sl@0: */ sl@0: #if defined(__CPU_HAS_VFP) && defined(__VFP_V3) sl@0: __NAKED__ TBool Arm::NeonPresent() sl@0: { sl@0: asm("mov r0, #0 "); // Not present sl@0: VFP_FMRX(, 1,VFP_XREG_FPEXC); // Save VFP state sl@0: asm("orr r2, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN)); sl@0: VFP_FMXR(, VFP_XREG_FPEXC,1); // Enable VFP sl@0: sl@0: VFP_FMRX(, 2,VFP_XREG_MVFR0); // Read MVFR0 sl@0: asm("tst r2, #%a0" : : "i" ((TInt)VFP_MVFR0_ASIMD32)); // Check to see if all 32 Advanced SIMD registers are present sl@0: asm("beq 0f "); // Skip ahead if not sl@0: GET_CAR(, r2); sl@0: asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_ASEDIS)); // Check to see if ASIMD is disabled sl@0: asm("bne 0f "); // Skip ahead if so sl@0: asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_D32DIS)); // Check to see if the upper 16 registers are disabled sl@0: asm("moveq r0, #1" ); // If not then eport NEON present sl@0: sl@0: asm("0: "); sl@0: VFP_FMXR(,VFP_XREG_FPEXC,1); // Restore VFP state sl@0: __JUMP(, lr); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: #ifdef __CPU_HAS_MMU sl@0: __NAKED__ TBool Arm::MmuActive() sl@0: { sl@0: asm("mrc p15, 0, r0, c1, c0, 0 "); sl@0: asm("and r0, r0, #1 "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: // Returns the content of Translate Table Base Register 0. sl@0: // To get physical address of the level 1 table, on some platforms this must be orred with 0xffff8000 (to get rid of table walk cache attributes) sl@0: __NAKED__ TUint32 Arm::MmuTTBR0() sl@0: { sl@0: asm("mrc p15, 0, r0, c2, c0, 0 "); sl@0: __JUMP(,lr); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: sl@0: /** Get the current value of the high performance counter. sl@0: sl@0: If a high performance counter is not available, this uses the millisecond sl@0: tick count instead. sl@0: */ sl@0: #ifdef HAS_HIGH_RES_TIMER sl@0: EXPORT_C __NAKED__ TUint32 NKern::FastCounter() sl@0: { sl@0: GET_HIGH_RES_TICK_COUNT(R0); sl@0: __JUMP(,lr); sl@0: } sl@0: #else sl@0: EXPORT_C TUint32 NKern::FastCounter() sl@0: { sl@0: return NTickCount(); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: sl@0: /** Get the frequency of counter queried by NKern::FastCounter(). sl@0: */ sl@0: EXPORT_C TInt NKern::FastCounterFrequency() sl@0: { sl@0: #ifdef HAS_HIGH_RES_TIMER sl@0: return KHighResTimerFrequency; sl@0: #else sl@0: return 1000000 / NKern::TickPeriod(); sl@0: #endif sl@0: } sl@0: