sl@0: // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\nkernsmp\arm\ncutilf.cia sl@0: // sl@0: // sl@0: sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: sl@0: sl@0: sl@0: __NAKED__ void Arm::GetUserSpAndLr(TAny*) sl@0: { sl@0: asm("stmia r0, {r13, r14}^ "); sl@0: asm("mov r0, r0"); // NOP needed between stm^ and banked register access sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: __NAKED__ void Arm::SetUserSpAndLr(TAny*) sl@0: { sl@0: asm("ldmia r0, {r13, r14}^ "); sl@0: asm("mov r0, r0"); // NOP needed between ldm^ and banked register access sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: __NAKED__ TUint32 Arm::Dacr() sl@0: { sl@0: asm("mrc p15, 0, r0, c3, c0, 0 "); sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: __NAKED__ void Arm::SetDacr(TUint32) sl@0: { sl@0: asm("mcr p15, 0, r0, c3, c0, 0 "); sl@0: __INST_SYNC_BARRIER_Z__(r1); sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: __NAKED__ TUint32 Arm::ModifyDacr(TUint32, TUint32) sl@0: { sl@0: asm("mrc p15, 0, r2, c3, c0, 0 "); sl@0: asm("bic r2, r2, r0 "); sl@0: asm("orr r2, r2, r1 "); sl@0: asm("mcr p15, 0, r2, c3, c0, 0 "); sl@0: __INST_SYNC_BARRIER_Z__(r3); sl@0: asm("mov r0, r2 "); sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: __NAKED__ void Arm::SetCar(TUint32) sl@0: { sl@0: SET_CAR(, r0); sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: sl@0: sl@0: /** Get the CPU's coprocessor access register value sl@0: sl@0: @return The value of the CAR, 0 if CPU doesn't have CAR sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: EXPORT_C __NAKED__ TUint32 Arm::Car() sl@0: { sl@0: GET_CAR(, r0); sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: sl@0: sl@0: /** Modify the CPU's coprocessor access register value sl@0: Does nothing if CPU does not have CAR. sl@0: sl@0: @param aClearMask Mask of bits to clear (1 = clear this bit) sl@0: @param aSetMask Mask of bits to set (1 = set this bit) sl@0: @return The original value of the CAR, 0 if CPU doesn't have CAR sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: EXPORT_C __NAKED__ TUint32 Arm::ModifyCar(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/) sl@0: { sl@0: GET_CAR(, r2); sl@0: asm("bic r0, r2, r0 "); sl@0: asm("orr r0, r0, r1 "); sl@0: SET_CAR(, r0); sl@0: asm("mov r0, r2 "); sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: sl@0: #ifdef __CPU_HAS_VFP sl@0: __NAKED__ void Arm::SetFpExc(TUint32) sl@0: { sl@0: #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED) sl@0: // If we are about to enable VFP, disable dynamic branch prediction sl@0: // If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled sl@0: asm("mrs r3, cpsr "); sl@0: __ASM_CLI(); sl@0: asm("mrc p15, 0, r1, c1, c0, 1 "); sl@0: asm("tst r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); sl@0: asm("bic r1, r1, #2 "); // clear DB bit (disable dynamic prediction) sl@0: asm("and r2, r1, #1 "); // r2 bit 0 = RS bit (1 if return stack enabled) sl@0: asm("orreq r1, r1, r2, lsl #1 "); // if VFP is being disabled set DB = RS sl@0: asm("mcr p15, 0, r1, c1, c0, 1 "); sl@0: asm("mcr p15, 0, r2, c7, c5, 6 "); // flush BTAC sl@0: VFP_FMXR(, VFP_XREG_FPEXC,0); sl@0: __INST_SYNC_BARRIER_Z__(r12); sl@0: asm("msr cpsr, r3 "); sl@0: __JUMP(, lr); sl@0: #else sl@0: VFP_FMXR(, VFP_XREG_FPEXC,0); sl@0: __JUMP(, lr); sl@0: #endif sl@0: } sl@0: #endif sl@0: sl@0: sl@0: sl@0: /** Get the value of the VFP FPEXC register sl@0: sl@0: @return The value of FPEXC, 0 if there is no VFP sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: EXPORT_C __NAKED__ TUint32 Arm::FpExc() sl@0: { sl@0: #ifdef __CPU_HAS_VFP sl@0: VFP_FMRX(, 0,VFP_XREG_FPEXC); sl@0: #else sl@0: asm("mov r0, #0 "); sl@0: #endif sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: sl@0: sl@0: /** Modify the VFP FPEXC register sl@0: Does nothing if there is no VFP sl@0: sl@0: @param aClearMask Mask of bits to clear (1 = clear this bit) sl@0: @param aSetMask Mask of bits to set (1 = set this bit) sl@0: @return The original value of FPEXC, 0 if no VFP present sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: EXPORT_C __NAKED__ TUint32 Arm::ModifyFpExc(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/) sl@0: { sl@0: #ifdef __CPU_HAS_VFP sl@0: VFP_FMRX(, 12,VFP_XREG_FPEXC); sl@0: asm("bic r0, r12, r0 "); sl@0: asm("orr r0, r0, r1 "); sl@0: sl@0: #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_351912_FIXED) sl@0: // If we are about to enable VFP, disable dynamic branch prediction sl@0: // If we are about to disable VFP, enable dynamic branch prediction if return stack prediction is enabled sl@0: asm("mrs r3, cpsr "); sl@0: __ASM_CLI(); sl@0: asm("mrc p15, 0, r1, c1, c0, 1 "); sl@0: asm("tst r0, #%a0" : : "i" ((TInt)VFP_FPEXC_EN) ); sl@0: asm("bic r1, r1, #2 "); // clear DB bit (disable dynamic prediction) sl@0: asm("and r2, r1, #1 "); // r2 bit 0 = RS bit (1 if return stack enabled) sl@0: asm("orreq r1, r1, r2, lsl #1 "); // if VFP is being disabled set DB = RS sl@0: asm("mcr p15, 0, r1, c1, c0, 1 "); sl@0: asm("mcr p15, 0, r2, c7, c5, 6 "); // flush BTAC sl@0: VFP_FMXR(, VFP_XREG_FPEXC,0); sl@0: __INST_SYNC_BARRIER_Z__(r12); sl@0: asm("msr cpsr, r3 "); sl@0: #else sl@0: VFP_FMXR(, VFP_XREG_FPEXC,0); sl@0: #endif // erratum 351912 sl@0: sl@0: asm("mov r0, r12 "); sl@0: #else // no vfp sl@0: asm("mov r0, #0 "); sl@0: #endif sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: /** Get the value of the VFP FPSCR register sl@0: sl@0: @return The value of FPSCR, 0 if there is no VFP sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: EXPORT_C __NAKED__ TUint32 Arm::FpScr() sl@0: { sl@0: #ifdef __CPU_HAS_VFP sl@0: VFP_FMRX(, 0,VFP_XREG_FPSCR); sl@0: #else sl@0: asm("mov r0, #0 "); sl@0: #endif sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: sl@0: sl@0: /** Modify the VFP FPSCR register sl@0: Does nothing if there is no VFP sl@0: sl@0: @param aClearMask Mask of bits to clear (1 = clear this bit) sl@0: @param aSetMask Mask of bits to set (1 = set this bit) sl@0: @return The original value of FPSCR, 0 if no VFP present sl@0: sl@0: @publishedPartner sl@0: @released sl@0: */ sl@0: EXPORT_C __NAKED__ TUint32 Arm::ModifyFpScr(TUint32 /*aClearMask*/, TUint32 /*aSetMask*/) sl@0: { sl@0: #ifdef __CPU_HAS_VFP sl@0: VFP_FMRX(, 2,VFP_XREG_FPSCR); sl@0: asm("bic r0, r2, r0 "); sl@0: asm("orr r0, r0, r1 "); sl@0: VFP_FMXR(, VFP_XREG_FPSCR,0); sl@0: asm("mov r0, r2 "); sl@0: #else sl@0: asm("mov r0, #0 "); sl@0: #endif sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: sl@0: /** Detect whether NEON is present sl@0: sl@0: @return ETrue if present, EFalse if not sl@0: sl@0: @internalTechnology sl@0: @released sl@0: */ sl@0: #if defined(__CPU_HAS_VFP) && defined(__VFP_V3) sl@0: __NAKED__ TBool Arm::NeonPresent() sl@0: { sl@0: asm("mov r0, #0 "); // Not present sl@0: VFP_FMRX(, 1,VFP_XREG_FPEXC); // Save VFP state sl@0: asm("orr r2, r1, #%a0" : : "i" ((TInt)VFP_FPEXC_EN)); sl@0: VFP_FMXR(, VFP_XREG_FPEXC,1); // Enable VFP sl@0: sl@0: VFP_FMRX(, 2,VFP_XREG_MVFR0); // Read MVFR0 sl@0: asm("tst r2, #%a0" : : "i" ((TInt)VFP_MVFR0_ASIMD32)); // Check to see if all 32 Advanced SIMD registers are present sl@0: asm("beq 0f "); // Skip ahead if not sl@0: GET_CAR(, r2); sl@0: asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_ASEDIS)); // Check to see if ASIMD is disabled sl@0: asm("bne 0f "); // Skip ahead if so sl@0: asm("tst r2, #%a0" : : "i" ((TInt)VFP_CPACR_D32DIS)); // Check to see if the upper 16 registers are disabled sl@0: asm("moveq r0, #1" ); // If not then eport NEON present sl@0: sl@0: asm("0: "); sl@0: VFP_FMXR(,VFP_XREG_FPEXC,1); // Restore VFP state sl@0: __JUMP(, lr); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: #ifdef __CPU_HAS_MMU sl@0: __NAKED__ TBool Arm::MmuActive() sl@0: { sl@0: asm("mrc p15, 0, r0, c1, c0, 0 "); sl@0: asm("and r0, r0, #1 "); sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: // Returns the content of Translate Table Base Register 0. sl@0: // To get physical address of the level 1 table, on some platforms this must be orred with 0xffff8000 (to get rid of table walk cache attributes) sl@0: __NAKED__ TUint32 Arm::MmuTTBR0() sl@0: { sl@0: asm("mrc p15, 0, r0, c2, c0, 0 "); sl@0: __JUMP(, lr); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: sl@0: /** Get the current value of the system timestamp sl@0: sl@0: @publishedPartner sl@0: @prototype sl@0: */ sl@0: EXPORT_C __NAKED__ TUint64 NKern::Timestamp() sl@0: { sl@0: asm("ldr r3, __TheScheduler "); sl@0: asm("mrs r12, cpsr "); // r12 = saved interrupt mask sl@0: asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(TScheduler,i_LocalTimerAddr)); // r2 points to local timer sl@0: __ASM_CLI(); // disable all interrupts sl@0: GET_RWNO_TID(,r3); // r3 -> TSubScheduler sl@0: asm("ldr r1, [r2, #%a0]" : : "i" _FOFF(ArmLocalTimer,iTimerCount)); // r1 = current timer counter sl@0: asm("ldr r0, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,i_LastTimerSet)); // r0 = last value written to timer counter sl@0: asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,i_TimerMultI)); // r2 = scaling factor sl@0: asm("sub r0, r0, r1 "); // elapsed timer ticks since last timestamp sync sl@0: asm("umull r1, r2, r0, r2 "); // r2:r1 = elapsed ticks * scaling factor sl@0: asm("ldr r0, [r3, #%a0]!" : : "i" _FOFF(TSubScheduler,iLastTimestamp64)); // r0 = last timestamp sync point, low word sl@0: asm("ldr r3, [r3, #4] "); // r3 = last timestamp sync point, high word sl@0: asm("adds r1, r1, #0x00800000 "); // add 2^23 (rounding) sl@0: asm("adcs r2, r2, #0 "); sl@0: asm("mov r1, r1, lsr #24 "); // divide by 2^24 sl@0: asm("orr r1, r1, r2, lsl #8 "); // r1 = elapsed time since last timestamp sync sl@0: asm("msr cpsr, r12 "); // restore interrupts sl@0: asm("adds r0, r0, r1 "); // r1:r0 = last timestamp sync point + elapsed time since last timestamp sync sl@0: asm("adcs r1, r3, #0 "); sl@0: __JUMP(,lr); sl@0: asm("__TheScheduler: "); sl@0: asm(".word %a0" : : "i" ((TInt)&TheScheduler)); sl@0: } sl@0: sl@0: sl@0: extern "C" __NAKED__ TLinAddr get_sp_svc() sl@0: { sl@0: asm("mrs r1, cpsr "); sl@0: __ASM_CLI_MODE(MODE_SVC); sl@0: asm("mov r0, sp "); sl@0: asm("msr cpsr, r1 "); sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: extern "C" __NAKED__ TLinAddr get_lr_svc() sl@0: { sl@0: asm("mrs r1, cpsr "); sl@0: __ASM_CLI_MODE(MODE_SVC); sl@0: asm("mov r0, lr "); sl@0: asm("msr cpsr, r1 "); sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: sl@0: /** Get the return address from an ISR sl@0: sl@0: Call only from an ISR sl@0: sl@0: @internalTechnology sl@0: */ sl@0: EXPORT_C __NAKED__ TLinAddr Arm::IrqReturnAddress() sl@0: { sl@0: asm("mrs r1, cpsr "); sl@0: __ASM_CLI(); sl@0: asm("and r0, r1, #0x1f "); sl@0: asm("cmp r0, #0x11 "); // mode_fiq ? sl@0: asm("beq 1f "); sl@0: __ASM_CLI_MODE(MODE_SVC); sl@0: asm("ldr r0, [sp, #%a0]" : : "i" _FOFF(SThreadExcStack,iR15)); sl@0: asm("msr cpsr, r1 "); sl@0: __JUMP(, lr); sl@0: sl@0: asm("1: "); sl@0: GET_RWNO_TID(,r3); sl@0: asm("ldr r2, [r3, #%a0]" : : "i" _FOFF(TSubScheduler,i_FiqStackTop)); // if so, r2->top of FIQ stack sl@0: asm("ldr r0, [r2, #-4] "); // get return address sl@0: asm("msr cpsr, r1 "); sl@0: __JUMP(, lr); sl@0: } sl@0: sl@0: #if defined(__INCLUDE_SPIN_LOCK_CHECKS__) sl@0: #define __ASM_CALL(func) \ sl@0: asm("str lr, [sp, #-4]! "); \ sl@0: asm("bl " CSM_CFUNC(func)); \ sl@0: asm("ldr lr, [sp], #4 "); sl@0: sl@0: #define SPIN_LOCK_ENTRY_CHECK() __ASM_CALL(spin_lock_entry_check) sl@0: #define SPIN_LOCK_MARK_ACQ() __ASM_CALL(spin_lock_mark_acq) sl@0: #define SPIN_UNLOCK_ENTRY_CHECK() __ASM_CALL(spin_unlock_entry_check) sl@0: sl@0: #define RWSPIN_RLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_rlock_entry_check) sl@0: #define RWSPIN_RLOCK_MARK_ACQ() __ASM_CALL(rwspin_rlock_mark_acq) sl@0: #define RWSPIN_RUNLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_runlock_entry_check) sl@0: sl@0: #define RWSPIN_WLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_wlock_entry_check) sl@0: #define RWSPIN_WLOCK_MARK_ACQ() __ASM_CALL(rwspin_wlock_mark_acq) sl@0: #define RWSPIN_WUNLOCK_ENTRY_CHECK() __ASM_CALL(rwspin_wunlock_entry_check) sl@0: sl@0: #else sl@0: #define SPIN_LOCK_ENTRY_CHECK() sl@0: #define SPIN_LOCK_MARK_ACQ() sl@0: #define SPIN_UNLOCK_ENTRY_CHECK() sl@0: sl@0: #define RWSPIN_RLOCK_ENTRY_CHECK() sl@0: #define RWSPIN_RLOCK_MARK_ACQ() sl@0: #define RWSPIN_RUNLOCK_ENTRY_CHECK() sl@0: sl@0: #define RWSPIN_WLOCK_ENTRY_CHECK() sl@0: #define RWSPIN_WLOCK_MARK_ACQ() sl@0: #define RWSPIN_WUNLOCK_ENTRY_CHECK() sl@0: sl@0: #endif sl@0: sl@0: sl@0: /****************************************************************************** sl@0: * Spin locks sl@0: * sl@0: * [this+0] in count (byte) sl@0: * [this+1] out count (byte) sl@0: * [this+6] order (byte) sl@0: * [this+7] holding CPU (byte) sl@0: ******************************************************************************/ sl@0: sl@0: #if defined(__INCLUDE_SPIN_LOCK_CHECKS__) sl@0: extern "C" __NAKED__ void spin_lock_entry_check() sl@0: { sl@0: /* R0 points to lock */ sl@0: asm("stmfd sp!, {r1,r2,r3,r12} "); sl@0: asm("mrs r12, cpsr "); sl@0: __ASM_CLI(); sl@0: GET_RWNO_TID(, r1); /* R1->SubScheduler */ sl@0: asm("cmp r1, #0 "); sl@0: asm("beq slec_ok "); /* Skip checks if subscheduler not yet initialised */ sl@0: asm("ldrh r2, [r0, #6] "); /* R2[8:15]=holding CPU, R2[0:7]=order */ sl@0: asm("tst r2, #0xE0 "); sl@0: asm("bne slec_preemption "); /* This lock requires preemption to be disabled */ sl@0: sl@0: /* check interrupts disabled, if interrupts/preemption is not disabled sl@0: there is a risk of same core deadlock occuring, hence this check and sl@0: run-time assert to ensure code stays safe */ sl@0: asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); sl@0: asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* Check interrupts masked */ sl@0: asm("beq slec_1 "); /* Yes - OK */ sl@0: __ASM_CRASH(); /* No - die */ sl@0: sl@0: asm("slec_preemption: "); sl@0: asm("and r3, r2, #0xFF "); sl@0: asm("cmp r3, #0xFF "); /* check for EOrderNone */ sl@0: asm("beq slec_1 "); /* EOrderNone - don't check interrupts or preemption */ sl@0: asm("and r3, r12, #0x1F "); sl@0: asm("cmp r3, #0x13 "); /* Make sure we're in mode_svc */ sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("bne slec_preemption_die "); /* If not, die */ sl@0: asm("cmp r3, #0 "); sl@0: asm("bne slec_1 "); /* Preemption disabled - OK */ sl@0: asm("slec_preemption_die: "); sl@0: __ASM_CRASH(); /* Preemption enabled - die */ sl@0: sl@0: asm("slec_1: "); sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); sl@0: asm("cmp r3, r2, lsr #8 "); /* Test if held by current CPU */ sl@0: asm("bne slec_2 "); /* Not already held by this CPU - OK */ sl@0: __ASM_CRASH(); /* Already held by this CPU - die */ sl@0: sl@0: asm("slec_2: "); sl@0: asm("ldr r3, [r1, #%a0]!" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); sl@0: asm("ldr r1, [r1, #4] "); /* r3=low word of iSpinLockOrderCheck, r1=high word */ sl@0: asm("cmp r3, #0 "); sl@0: asm("addeq r2, r2, #0x20000000 "); /* if low word zero, add 32 to LS1 index ... */ sl@0: asm("moveq r3, r1 "); /* ... and r3=high word ... */ sl@0: asm("subs r1, r3, #1 "); /* R1 = R3 with all bits up to and including LS1 flipped */ sl@0: asm("beq slec_ok "); /* If all bits zero, no locks held so OK */ sl@0: asm("eor r3, r3, r1 "); /* Clear all bits above LS1 */ sl@0: CLZ(1,3); /* R1 = 31 - bit number of LS1 */ sl@0: asm("rsb r1, r1, #31 "); /* R1 = bit number of LS1 */ sl@0: asm("add r1, r1, r2, lsr #24 "); /* add 32 if we were looking at high word */ sl@0: asm("mov r2, r2, lsl #24 "); /* this lock's order value into R2 high byte */ sl@0: asm("cmp r1, r2, asr #24 "); /* compare current lowest order lock to sign-extended order value */ sl@0: asm("bgt slec_ok "); /* if this lock's order < current lowest, OK */ sl@0: __ASM_CRASH(); /* otherwise die */ sl@0: sl@0: asm("slec_ok: "); sl@0: asm("msr cpsr, r12 "); /* restore interrupts */ sl@0: asm("ldmfd sp!, {r1,r2,r3,r12} "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: extern "C" __NAKED__ void spin_lock_mark_acq() sl@0: { sl@0: /* R0 points to lock */ sl@0: asm("stmfd sp!, {r1,r2,r3,r12} "); sl@0: asm("mrs r12, cpsr "); sl@0: __ASM_CLI(); sl@0: GET_RWNO_TID(, r1); /* R1->SubScheduler */ sl@0: asm("cmp r1, #0 "); sl@0: asm("beq slma_ok "); /* Skip checks if subscheduler not yet initialised */ sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); sl@0: asm("ldrb r2, [r0, #6] "); /* R2 = lock order value */ sl@0: asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); sl@0: asm("strb r3, [r0, #7] "); /* set byte 7 to holding CPU number */ sl@0: asm("cmp r2, #0x40 "); sl@0: asm("bhs slma_ok "); /* if EOrderNone, done */ sl@0: asm("cmp r2, #0x20 "); sl@0: asm("addhs r1, r1, #4 "); sl@0: asm("and r2, r2, #0x1f "); sl@0: asm("mov r3, #1 "); sl@0: asm("mov r3, r3, lsl r2 "); /* r3 = bit to set */ sl@0: asm("ldr r2, [r1] "); sl@0: asm("orr r2, r2, r3 "); sl@0: asm("str r2, [r1] "); /* set bit in iSpinLockOrderCheck corresponding to lock order */ sl@0: sl@0: asm("slma_ok: "); sl@0: asm("msr cpsr, r12 "); /* restore interrupts */ sl@0: asm("ldmfd sp!, {r1,r2,r3,r12} "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: extern "C" __NAKED__ void spin_unlock_entry_check() sl@0: { sl@0: /* R0 points to lock */ sl@0: asm("stmfd sp!, {r1,r2,r3,r12} "); sl@0: asm("mrs r12, cpsr "); sl@0: __ASM_CLI(); sl@0: GET_RWNO_TID(, r1); /* R1->SubScheduler */ sl@0: asm("cmp r1, #0 "); sl@0: asm("beq suec_ok "); /* Skip checks if subscheduler not yet initialised */ sl@0: asm("ldrh r2, [r0, #6] "); /* R2[8:15]=holding CPU, R2[0:7]=order */ sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); sl@0: asm("eor r2, r2, r3, lsl #8 "); /* R2[8:15]=holding CPU^current CPU, R2[0:7]=order */ sl@0: asm("tst r2, #0xE0 "); sl@0: asm("bne suec_preemption "); /* This lock requires preemption to be disabled */ sl@0: sl@0: /* check interrupts disabled */ sl@0: asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); sl@0: asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* Check interrupts masked */ sl@0: asm("beq suec_1 "); /* Yes - OK */ sl@0: __ASM_CRASH(); /* No - die */ sl@0: sl@0: asm("suec_preemption: "); sl@0: asm("and r3, r2, #0xFF "); sl@0: asm("cmp r3, #0xFF "); /* check for EOrderNone */ sl@0: asm("ldrne r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("beq suec_1 "); /* EOrderNone - don't check interrupts or preemption */ sl@0: asm("cmp r3, #0 "); sl@0: asm("bne suec_1 "); /* Preemption disabled - OK */ sl@0: __ASM_CRASH(); /* Preemption enabled - die */ sl@0: sl@0: asm("suec_1: "); sl@0: asm("tst r2, #0xFF00 "); /* Check if holding CPU ^ current CPU number == 0 */ sl@0: asm("beq suec_2 "); /* Held by this CPU - OK */ sl@0: __ASM_CRASH(); /* Not held by this CPU - die */ sl@0: sl@0: asm("suec_2: "); sl@0: asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); sl@0: asm("mov r3, #0xFF "); sl@0: asm("strb r3, [r0, #7] "); /* reset holding CPU */ sl@0: asm("cmp r2, #0x40 "); sl@0: asm("bhs suec_ok "); /* if EOrderNone, done */ sl@0: asm("cmp r2, #0x20 "); sl@0: asm("addhs r1, r1, #4 "); sl@0: asm("and r2, r2, #0x1F "); sl@0: asm("mov r3, #1 "); sl@0: asm("mov r3, r3, lsl r2 "); /* r3 = bit to clear */ sl@0: asm("ldr r2, [r1] "); sl@0: asm("tst r2, r3 "); /* test bit originally set */ sl@0: asm("bic r2, r2, r3 "); sl@0: asm("str r2, [r1] "); /* clear bit in iSpinLockOrderCheck corresponding to lock order */ sl@0: asm("bne suec_ok "); /* if originally set, OK */ sl@0: __ASM_CRASH(); /* if not, die - something must have got corrupted */ sl@0: sl@0: asm("suec_ok: "); sl@0: asm("msr cpsr, r12 "); /* restore interrupts */ sl@0: asm("ldmfd sp!, {r1,r2,r3,r12} "); sl@0: __JUMP(,lr); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: /****************************************************************************** sl@0: * Plain old spin lock sl@0: * sl@0: * Fundamental algorithm: sl@0: * lock() { old_in = in++; while(out!=old_in) __chill(); } sl@0: * unlock() { ++out; } sl@0: * sl@0: * [this+0] out count (byte) sl@0: * [this+1] in count (byte) sl@0: * sl@0: ******************************************************************************/ sl@0: __NAKED__ EXPORT_C void TSpinLock::LockIrq() sl@0: { sl@0: __ASM_CLI(); /* Disable interrupts */ sl@0: SPIN_LOCK_ENTRY_CHECK() sl@0: asm("1: "); sl@0: LDREXH(1,0); sl@0: asm("mov r2, r1, lsr #8 "); /* R2 = original in count */ sl@0: asm("add r1, r1, #0x100 "); sl@0: STREXH(3,1,0); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); sl@0: asm("and r1, r1, #0xFF "); /* R1 = out count */ sl@0: asm("3: "); sl@0: asm("cmp r2, r1 "); /* out = original in ? */ sl@0: asm("bne 2f "); /* no - must wait */ sl@0: SPIN_LOCK_MARK_ACQ() sl@0: __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("2: "); sl@0: ARM_WFE; sl@0: asm("ldrb r1, [r0, #0] "); /* read out count again */ sl@0: asm("b 3b "); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TSpinLock::UnlockIrq() sl@0: { sl@0: SPIN_UNLOCK_ENTRY_CHECK() sl@0: __DATA_MEMORY_BARRIER_Z__(r1); /* Ensure accesses don't move outside locked section */ sl@0: asm("ldrb r2, [r0, #0] "); sl@0: asm("add r2, r2, #1 "); sl@0: asm("strb r2, [r0, #0] "); /* ++out */ sl@0: __DATA_SYNC_BARRIER__(r1); /* Ensure write to out completes before SEV */ sl@0: ARM_SEV; /* Wake up any waiting processors */ sl@0: __ASM_STI(); /* Enable interrupts */ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBool TSpinLock::FlashIrq() sl@0: { sl@0: GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ sl@0: asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr)); sl@0: asm("ldrh r1, [r0, #0] "); sl@0: asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending)); sl@0: asm("sub r1, r1, r1, lsr #8 "); /* r1 low byte = (out - in) mod 256 */ sl@0: asm("and r1, r1, #0xFF "); sl@0: asm("cmp r1, #0xFF "); /* if out - in = -1, no-one else waiting */ sl@0: asm("addeq r3, r3, #1 "); sl@0: asm("cmpeq r3, #1024 "); /* if no-one waiting for lock, check for pending interrupt */ sl@0: asm("bne 1f "); /* branch if someone else waiting */ sl@0: asm("mov r0, #0 "); /* else return FALSE */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("1: "); sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("bl " CSM_ZN9TSpinLock9UnlockIrqEv); sl@0: asm("bl " CSM_ZN9TSpinLock7LockIrqEv); sl@0: asm("mov r0, #1 "); sl@0: asm("ldr pc, [sp], #4 "); sl@0: } sl@0: sl@0: sl@0: __NAKED__ EXPORT_C void TSpinLock::LockOnly() sl@0: { sl@0: SPIN_LOCK_ENTRY_CHECK() sl@0: asm("1: "); sl@0: LDREXH(1,0); sl@0: asm("mov r2, r1, lsr #8 "); /* R2 = original in count */ sl@0: asm("add r1, r1, #0x100 "); sl@0: STREXH(3,1,0); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); sl@0: asm("and r1, r1, #0xFF "); /* R1 = out count */ sl@0: asm("3: "); sl@0: asm("cmp r2, r1 "); /* out = original in ? */ sl@0: asm("bne 2f "); /* no - must wait */ sl@0: SPIN_LOCK_MARK_ACQ() sl@0: __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("2: "); sl@0: ARM_WFE; sl@0: asm("ldrb r1, [r0, #0] "); /* read out count again */ sl@0: asm("b 3b "); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TSpinLock::UnlockOnly() sl@0: { sl@0: SPIN_UNLOCK_ENTRY_CHECK() sl@0: __DATA_MEMORY_BARRIER_Z__(r1); /* Ensure accesses don't move outside locked section */ sl@0: asm("ldrb r2, [r0, #0] "); sl@0: asm("add r2, r2, #1 "); sl@0: asm("strb r2, [r0, #0] "); /* ++out */ sl@0: __DATA_SYNC_BARRIER__(r1); /* Ensure write to out completes before SEV */ sl@0: ARM_SEV; /* Wake up any waiting processors */ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBool TSpinLock::FlashOnly() sl@0: { sl@0: asm("ldrh r1, [r0, #0] "); sl@0: asm("sub r1, r1, r1, lsr #8 "); /* r1 low byte = (out - in) mod 256 */ sl@0: asm("and r1, r1, #0xFF "); sl@0: asm("cmp r1, #0xFF "); /* if out - in = -1, no-one else waiting */ sl@0: asm("bne 1f "); /* branch if someone else waiting */ sl@0: asm("mov r0, #0 "); /* else return FALSE */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("1: "); sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("bl " CSM_ZN9TSpinLock10UnlockOnlyEv); sl@0: asm("bl " CSM_ZN9TSpinLock8LockOnlyEv); sl@0: asm("mov r0, #1 "); sl@0: asm("ldr pc, [sp], #4 "); sl@0: } sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TInt TSpinLock::LockIrqSave() sl@0: { sl@0: asm("mrs r12, cpsr "); sl@0: __ASM_CLI(); /* Disable interrupts */ sl@0: SPIN_LOCK_ENTRY_CHECK() sl@0: asm("1: "); sl@0: LDREXH(1,0); sl@0: asm("mov r2, r1, lsr #8 "); /* R2 = original in count */ sl@0: asm("add r1, r1, #0x100 "); sl@0: STREXH(3,1,0); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); sl@0: asm("and r1, r1, #0xFF "); /* R1 = out count */ sl@0: asm("3: "); sl@0: asm("cmp r2, r1 "); /* out = original in ? */ sl@0: asm("bne 2f "); /* no - must wait */ sl@0: SPIN_LOCK_MARK_ACQ() sl@0: __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ sl@0: asm("and r0, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* return original CPSR I and F bits */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("2: "); sl@0: ARM_WFE; sl@0: asm("ldrb r1, [r0, #0] "); /* read out count again */ sl@0: asm("b 3b "); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TSpinLock::UnlockIrqRestore(TInt) sl@0: { sl@0: SPIN_UNLOCK_ENTRY_CHECK() sl@0: __DATA_MEMORY_BARRIER_Z__(r3); /* Ensure accesses don't move outside locked section */ sl@0: asm("ldrb r2, [r0, #0] "); sl@0: asm("mrs r12, cpsr "); sl@0: asm("add r2, r2, #1 "); sl@0: asm("bic r12, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); sl@0: asm("strb r2, [r0, #0] "); /* ++out */ sl@0: __DATA_SYNC_BARRIER__(r3); /* Ensure write to out completes before SEV */ sl@0: ARM_SEV; /* Wake up any waiting processors */ sl@0: asm("orr r1, r1, r12 "); sl@0: asm("msr cpsr, r1 "); /* restore interrupts */ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBool TSpinLock::FlashIrqRestore(TInt) sl@0: { sl@0: GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ sl@0: asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr)); sl@0: asm("ldrh r2, [r0, #0] "); sl@0: asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending)); sl@0: asm("sub r2, r2, r2, lsr #8 "); /* r2 low byte = (out - in) mod 256 */ sl@0: asm("and r2, r2, #0xFF "); sl@0: asm("cmp r2, #0xFF "); /* if out - in = -1, no-one else waiting */ sl@0: asm("addeq r3, r3, #1 "); sl@0: asm("cmpeq r3, #1024 "); /* if no-one waiting for lock, check for pending interrupt */ sl@0: asm("bne 1f "); /* branch if someone else waiting */ sl@0: asm("mov r0, #0 "); /* else return FALSE */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("1: "); sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("bl " CSM_ZN9TSpinLock16UnlockIrqRestoreEi); sl@0: asm("bl " CSM_ZN9TSpinLock7LockIrqEv); sl@0: asm("mov r0, #1 "); sl@0: asm("ldr pc, [sp], #4 "); sl@0: } sl@0: sl@0: sl@0: __NAKED__ EXPORT_C TBool TSpinLock::FlashPreempt() sl@0: { sl@0: asm("ldrh r2, [r0, #0] "); sl@0: GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ sl@0: asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag)); sl@0: asm("sub r2, r2, r2, lsr #8 "); /* r2 low byte = (out - in) mod 256 */ sl@0: asm("and r2, r2, #0xFF "); sl@0: asm("cmp r2, #0xFF "); /* if out - in = -1, no-one else waiting */ sl@0: asm("cmpeq r3, #0 "); /* if no-one else waiting, check if reschedule or IDFCs pending */ sl@0: asm("bne 1f "); /* if so or someone else waiting, branch to release lock */ sl@0: asm("mov r0, #0 "); /* else return FALSE */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("1: "); sl@0: asm("stmfd sp!, {r0,lr} "); sl@0: asm("bl " CSM_ZN9TSpinLock10UnlockOnlyEv); sl@0: asm("bl " CSM_ZN5NKern15PreemptionPointEv); sl@0: asm("ldr r0, [sp], #4 "); sl@0: asm("bl " CSM_ZN9TSpinLock8LockOnlyEv); sl@0: asm("mov r0, #1 "); sl@0: asm("ldr pc, [sp], #4 "); sl@0: } sl@0: sl@0: sl@0: /****************************************************************************** sl@0: * Read/Write Spin lock sl@0: * sl@0: * Structure ( (in.r,in.w) , (out.r,out.w) ) sl@0: * Fundamental algorithm: sl@0: * lockr() { old_in = (in.r++,in.w); while(out.w!=old_in.w) __chill(); } sl@0: * unlockr() { ++out.r; } sl@0: * lockw() { old_in = (in.r,in.w++); while(out!=old_in) __chill(); } sl@0: * unlockw() { ++out.w; } sl@0: * sl@0: * [this+0] in.w sl@0: * [this+1] in.r sl@0: * [this+2] out.w sl@0: * [this+3] out.r sl@0: * [this+4] Bit mask of CPUs which hold read locks sl@0: * [this+6] order value sl@0: * [this+7] CPU number which holds write lock, 0xFF if none sl@0: * sl@0: ******************************************************************************/ sl@0: sl@0: #if defined(__INCLUDE_SPIN_LOCK_CHECKS__) sl@0: extern "C" __NAKED__ void rwspin_rlock_entry_check() sl@0: { sl@0: /* R0 points to lock */ sl@0: asm("stmfd sp!, {r1,r2,r3,r12} "); sl@0: asm("mrs r12, cpsr "); sl@0: __ASM_CLI(); /* Disable interrupts */ sl@0: GET_RWNO_TID(, r1); /* R1->SubScheduler */ sl@0: asm("cmp r1, #0 "); sl@0: asm("beq rwrlec_ok "); /* Skip checks if subscheduler not yet initialised */ sl@0: asm("ldr r2, [r0, #4] "); /* R2[24:31]=wcpu, R2[16:23]=order, R2[0:7]=rcpu mask */ sl@0: asm("tst r2, #0x00E00000 "); sl@0: asm("bne rwrlec_preemption "); /* This lock requires preemption to be disabled */ sl@0: sl@0: /* check interrupts disabled */ sl@0: asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); sl@0: asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* Check interrupts masked */ sl@0: asm("beq rwrlec_1 "); /* Yes - OK */ sl@0: __ASM_CRASH(); /* No - die */ sl@0: sl@0: asm("rwrlec_preemption: "); sl@0: asm("and r3, r2, #0x00FF0000 "); sl@0: asm("cmp r3, #0x00FF0000 "); /* check for EOrderNone */ sl@0: asm("beq rwrlec_1 "); /* EOrderNone - don't check interrupts or preemption */ sl@0: asm("and r3, r12, #0x1F "); sl@0: asm("cmp r3, #0x13 "); /* Make sure we're in mode_svc */ sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("bne rwrlec_preemption_die "); /* If not, die */ sl@0: asm("cmp r3, #0 "); sl@0: asm("bne rwrlec_1 "); /* Preemption disabled - OK */ sl@0: asm("rwrlec_preemption_die: "); sl@0: __ASM_CRASH(); /* Preemption enabled - die */ sl@0: sl@0: asm("rwrlec_1: "); sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); sl@0: asm("eor r3, r2, r3, lsl #24 "); sl@0: asm("cmp r3, #0x01000000 "); /* Held by current CPU for write ? */ sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); sl@0: asm("bhs rwrlec_2 "); /* No - OK */ sl@0: __ASM_CRASH(); /* Already held by this CPU for write - die */ sl@0: sl@0: asm("rwrlec_2: "); sl@0: asm("tst r2, r3 "); /* Held by current CPU for read ? */ sl@0: asm("beq rwrlec_3 "); /* No - OK */ sl@0: __ASM_CRASH(); /* Already held by this CPU for read - die */ sl@0: sl@0: asm("rwrlec_3: "); sl@0: asm("ldr r3, [r1, #%a0]!" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); sl@0: asm("mov r2, r2, lsr #16 "); sl@0: asm("and r2, r2, #0xFF "); /* r2 = lock order */ sl@0: asm("ldr r1, [r1, #4] "); /* r3=low word of iSpinLockOrderCheck, r1=high word */ sl@0: asm("cmp r3, #0 "); sl@0: asm("addeq r2, r2, #0x20000000 "); /* if low word zero, add 32 to LS1 index ... */ sl@0: asm("moveq r3, r1 "); /* ... and r3=high word ... */ sl@0: asm("subs r1, r3, #1 "); /* R1 = R3 with all bits up to and including LS1 flipped */ sl@0: asm("beq rwrlec_ok "); /* If all bits zero, no locks held so OK */ sl@0: asm("eor r3, r3, r1 "); /* Clear all bits above LS1 */ sl@0: CLZ(1,3); /* R1 = 31 - bit number of LS1 */ sl@0: asm("rsb r1, r1, #31 "); /* R1 = bit number of LS1 */ sl@0: asm("add r1, r1, r2, lsr #24 "); /* add 32 if we were looking at high word */ sl@0: asm("mov r2, r2, lsl #24 "); /* this lock's order value into R2 high byte */ sl@0: asm("cmp r1, r2, asr #24 "); /* compare current lowest order lock to sign-extended order value */ sl@0: asm("bgt rwrlec_ok "); /* if this lock's order < current lowest, OK */ sl@0: __ASM_CRASH(); /* otherwise die */ sl@0: sl@0: asm("rwrlec_ok: "); sl@0: asm("msr cpsr, r12 "); /* restore interrupts */ sl@0: asm("ldmfd sp!, {r1,r2,r3,r12} "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: extern "C" __NAKED__ void rwspin_rlock_mark_acq() sl@0: { sl@0: /* R0 points to lock */ sl@0: asm("stmfd sp!, {r1-r4,r12} "); sl@0: asm("mrs r12, cpsr "); sl@0: __ASM_CLI(); /* Disable interrupts */ sl@0: GET_RWNO_TID(, r1); /* R1->SubScheduler */ sl@0: asm("cmp r1, #0 "); sl@0: asm("beq rwrlma_ok "); /* Skip checks if subscheduler not yet initialised */ sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); sl@0: asm("add r0, r0, #4 "); sl@0: asm("1: "); sl@0: LDREXB(2,0); /* rcpu mask */ sl@0: asm("orr r2, r2, r3 "); /* set bit corresponding to current CPU */ sl@0: STREXB(4,2,0); sl@0: asm("cmp r4, #0 "); sl@0: asm("bne 1b "); sl@0: asm("ldrb r2, [r0, #2] "); /* R2 = lock order value */ sl@0: asm("sub r0, r0, #4 "); sl@0: asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); sl@0: asm("cmp r2, #0x40 "); sl@0: asm("bhs rwrlma_ok "); /* if EOrderNone, done */ sl@0: asm("cmp r2, #0x20 "); sl@0: asm("addhs r1, r1, #4 "); sl@0: asm("and r2, r2, #0x1f "); sl@0: asm("mov r3, #1 "); sl@0: asm("mov r3, r3, lsl r2 "); /* r3 = bit to set */ sl@0: asm("ldr r2, [r1] "); sl@0: asm("orr r2, r2, r3 "); sl@0: asm("str r2, [r1] "); /* set bit in iSpinLockOrderCheck corresponding to lock order */ sl@0: sl@0: asm("rwrlma_ok: "); sl@0: asm("msr cpsr, r12 "); /* restore interrupts */ sl@0: asm("ldmfd sp!, {r1-r4,r12} "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: extern "C" __NAKED__ void rwspin_runlock_entry_check() sl@0: { sl@0: /* R0 points to lock */ sl@0: asm("stmfd sp!, {r1-r4,r12} "); sl@0: asm("mrs r12, cpsr "); sl@0: __ASM_CLI(); /* Disable interrupts */ sl@0: GET_RWNO_TID(, r1); /* R1->SubScheduler */ sl@0: asm("cmp r1, #0 "); sl@0: asm("beq rwruec_ok "); /* Skip checks if subscheduler not yet initialised */ sl@0: asm("ldr r2, [r0, #4] "); /* R2[24:31]=wcpu, R2[16:23]=order, R2[0:7]=rcpu mask */ sl@0: asm("tst r2, #0x00E00000 "); sl@0: asm("bne rwruec_preemption "); /* This lock requires preemption to be disabled */ sl@0: sl@0: /* check interrupts disabled */ sl@0: asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); sl@0: asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* Check interrupts masked */ sl@0: asm("beq rwruec_1 "); /* Yes - OK */ sl@0: __ASM_CRASH(); /* No - die */ sl@0: sl@0: asm("rwruec_preemption: "); sl@0: asm("and r3, r2, #0x00FF0000 "); sl@0: asm("cmp r3, #0x00FF0000 "); /* check for EOrderNone */ sl@0: asm("ldrne r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("beq rwruec_1 "); /* EOrderNone - don't check interrupts or preemption */ sl@0: asm("cmp r3, #0 "); sl@0: asm("bne rwruec_1 "); /* Preemption disabled - OK */ sl@0: __ASM_CRASH(); /* Preemption enabled - die */ sl@0: sl@0: asm("rwruec_1: "); sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); sl@0: asm("tst r2, r3 "); /* Check if current CPU holds read lock */ sl@0: asm("bne rwruec_2 "); /* Read lock held by this CPU - OK */ sl@0: __ASM_CRASH(); /* Not held by this CPU - die */ sl@0: sl@0: asm("rwruec_2: "); sl@0: asm("add r0, r0, #4 "); sl@0: asm("1: "); sl@0: LDREX(2,0); /* rcpu mask */ sl@0: asm("bic r2, r2, r3 "); /* clear bit corresponding to current CPU */ sl@0: STREX(4,2,0); sl@0: asm("cmp r4, #0 "); sl@0: asm("bne 1b "); sl@0: asm("sub r0, r0, #4 "); sl@0: asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); sl@0: asm("tst r2, #0x00C00000 "); sl@0: asm("bne rwruec_ok "); /* if EOrderNone, done */ sl@0: asm("tst r2, #0x00200000 "); sl@0: asm("addne r1, r1, #4 "); sl@0: asm("mov r2, r2, lsr #16 "); sl@0: asm("and r2, r2, #0x1F "); sl@0: asm("mov r3, #1 "); sl@0: asm("mov r3, r3, lsl r2 "); /* r3 = bit to clear */ sl@0: asm("ldr r2, [r1] "); sl@0: asm("tst r2, r3 "); /* test bit originally set */ sl@0: asm("bic r2, r2, r3 "); sl@0: asm("str r2, [r1] "); /* clear bit in iSpinLockOrderCheck corresponding to lock order */ sl@0: asm("bne rwruec_ok "); /* if originally set, OK */ sl@0: __ASM_CRASH(); /* if not, die - something must have got corrupted */ sl@0: sl@0: asm("rwruec_ok: "); sl@0: asm("msr cpsr, r12 "); /* restore interrupts */ sl@0: asm("ldmfd sp!, {r1-r4,r12} "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: extern "C" __NAKED__ void rwspin_wlock_entry_check() sl@0: { sl@0: /* R0 points to lock */ sl@0: asm("stmfd sp!, {r1,r2,r3,r12} "); sl@0: asm("mrs r12, cpsr "); sl@0: __ASM_CLI(); /* Disable interrupts */ sl@0: GET_RWNO_TID(, r1); /* R1->SubScheduler */ sl@0: asm("cmp r1, #0 "); sl@0: asm("beq rwwlec_ok "); /* Skip checks if subscheduler not yet initialised */ sl@0: asm("ldr r2, [r0, #4] "); /* R2[24:31]=wcpu, R2[16:23]=order, R2[0:7]=rcpu mask */ sl@0: asm("tst r2, #0x00E00000 "); sl@0: asm("bne rwwlec_preemption "); /* This lock requires preemption to be disabled */ sl@0: sl@0: /* check interrupts disabled */ sl@0: asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); sl@0: asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* Check interrupts masked */ sl@0: asm("beq rwwlec_1 "); /* Yes - OK */ sl@0: __ASM_CRASH(); /* No - die */ sl@0: sl@0: asm("rwwlec_preemption: "); sl@0: asm("and r3, r2, #0x00FF0000 "); sl@0: asm("cmp r3, #0x00FF0000 "); /* check for EOrderNone */ sl@0: asm("beq rwwlec_1 "); /* EOrderNone - don't check interrupts or preemption */ sl@0: asm("and r3, r12, #0x1F "); sl@0: asm("cmp r3, #0x13 "); /* Make sure we're in mode_svc */ sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("bne rwwlec_preemption_die "); /* If not, die */ sl@0: asm("cmp r3, #0 "); sl@0: asm("bne rwwlec_1 "); /* Preemption disabled - OK */ sl@0: asm("rwwlec_preemption_die: "); sl@0: __ASM_CRASH(); /* Preemption enabled - die */ sl@0: sl@0: asm("rwwlec_1: "); sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuMask)); sl@0: asm("tst r2, r3 "); /* Test if held by current CPU for read */ sl@0: asm("beq rwwlec_2 "); /* No - OK */ sl@0: __ASM_CRASH(); /* Yes - die */ sl@0: sl@0: asm("rwwlec_2: "); sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); sl@0: asm("cmp r3, r2, lsr #24 "); /* Test if held by current CPU for write */ sl@0: asm("bne rwwlec_3 "); /* No - OK */ sl@0: __ASM_CRASH(); /* Yes - die */ sl@0: sl@0: asm("rwwlec_3: "); sl@0: asm("ldr r3, [r1, #%a0]!" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); sl@0: asm("mov r2, r2, lsr #16 "); sl@0: asm("and r2, r2, #0xFF "); /* r2 = lock order */ sl@0: asm("ldr r1, [r1, #4] "); /* r3=low word of iSpinLockOrderCheck, r1=high word */ sl@0: asm("cmp r3, #0 "); sl@0: asm("addeq r2, r2, #0x20000000 "); /* if low word zero, add 32 to LS1 index ... */ sl@0: asm("moveq r3, r1 "); /* ... and r3=high word ... */ sl@0: asm("subs r1, r3, #1 "); /* R1 = R3 with all bits up to and including LS1 flipped */ sl@0: asm("beq rwwlec_ok "); /* If all bits zero, no locks held so OK */ sl@0: asm("eor r3, r3, r1 "); /* Clear all bits above LS1 */ sl@0: CLZ(1,3); /* R1 = 31 - bit number of LS1 */ sl@0: asm("rsb r1, r1, #31 "); /* R1 = bit number of LS1 */ sl@0: asm("add r1, r1, r2, lsr #24 "); /* add 32 if we were looking at high word */ sl@0: asm("mov r2, r2, lsl #24 "); /* this lock's order value into R2 high byte */ sl@0: asm("cmp r1, r2, asr #24 "); /* compare current lowest order lock to sign-extended order value */ sl@0: asm("bgt rwwlec_ok "); /* if this lock's order < current lowest, OK */ sl@0: __ASM_CRASH(); /* otherwise die */ sl@0: sl@0: asm("rwwlec_ok: "); sl@0: asm("msr cpsr, r12 "); /* restore interrupts */ sl@0: asm("ldmfd sp!, {r1,r2,r3,r12} "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: extern "C" __NAKED__ void rwspin_wlock_mark_acq() sl@0: { sl@0: /* R0 points to lock */ sl@0: asm("stmfd sp!, {r1,r2,r3,r12} "); sl@0: asm("mrs r12, cpsr "); sl@0: __ASM_CLI(); /* Disable interrupts */ sl@0: GET_RWNO_TID(, r1); /* R1->SubScheduler */ sl@0: asm("cmp r1, #0 "); sl@0: asm("beq rwwlma_ok "); /* Skip checks if subscheduler not yet initialised */ sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); sl@0: asm("ldrb r2, [r0, #6] "); /* R2 = lock order value */ sl@0: asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); sl@0: asm("strb r3, [r0, #7] "); /* set byte 7 to holding CPU number */ sl@0: asm("cmp r2, #0x40 "); sl@0: asm("bhs rwwlma_ok "); /* if EOrderNone, done */ sl@0: asm("cmp r2, #0x20 "); sl@0: asm("addhs r1, r1, #4 "); sl@0: asm("and r2, r2, #0x1f "); sl@0: asm("mov r3, #1 "); sl@0: asm("mov r3, r3, lsl r2 "); /* r3 = bit to set */ sl@0: asm("ldr r2, [r1] "); sl@0: asm("orr r2, r2, r3 "); sl@0: asm("str r2, [r1] "); /* set bit in iSpinLockOrderCheck corresponding to lock order */ sl@0: sl@0: asm("rwwlma_ok: "); sl@0: asm("msr cpsr, r12 "); /* restore interrupts */ sl@0: asm("ldmfd sp!, {r1,r2,r3,r12} "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: extern "C" __NAKED__ void rwspin_wunlock_entry_check() sl@0: { sl@0: /* R0 points to lock */ sl@0: asm("stmfd sp!, {r1,r2,r3,r12} "); sl@0: asm("mrs r12, cpsr "); sl@0: __ASM_CLI(); /* Disable interrupts */ sl@0: GET_RWNO_TID(, r1); /* R1->SubScheduler */ sl@0: asm("cmp r1, #0 "); sl@0: asm("beq rwwuec_ok "); /* Skip checks if subscheduler not yet initialised */ sl@0: asm("ldrh r2, [r0, #6] "); /* R2[8:15]=holding CPU, R2[0:7]=order */ sl@0: asm("ldr r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iCpuNum)); sl@0: asm("eor r2, r2, r3, lsl #8 "); /* R2[8:15]=holding CPU^current CPU, R2[0:7]=order */ sl@0: asm("tst r2, #0xE0 "); sl@0: asm("bne rwwuec_preemption "); /* This lock requires preemption to be disabled */ sl@0: sl@0: /* check interrupts disabled */ sl@0: asm("and r3, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); sl@0: asm("cmp r3, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* Check interrupts masked */ sl@0: asm("beq rwwuec_1 "); /* Yes - OK */ sl@0: __ASM_CRASH(); /* No - die */ sl@0: sl@0: asm("rwwuec_preemption: "); sl@0: asm("and r3, r2, #0xFF "); sl@0: asm("cmp r3, #0xFF "); /* check for EOrderNone */ sl@0: asm("ldrne r3, [r1, #%a0]" : : "i" _FOFF(TSubScheduler, iKernLockCount)); sl@0: asm("beq rwwuec_1 "); /* EOrderNone - don't check interrupts or preemption */ sl@0: asm("cmp r3, #0 "); sl@0: asm("bne rwwuec_1 "); /* Preemption disabled - OK */ sl@0: __ASM_CRASH(); /* Preemption enabled - die */ sl@0: sl@0: asm("rwwuec_1: "); sl@0: asm("tst r2, #0xFF00 "); /* Check if holding CPU ^ current CPU number == 0 */ sl@0: asm("beq rwwuec_2 "); /* Held by this CPU - OK */ sl@0: __ASM_CRASH(); /* Not held by this CPU - die */ sl@0: sl@0: asm("rwwuec_2: "); sl@0: asm("add r1, r1, #%a0" : : "i" _FOFF(TSubScheduler, iSpinLockOrderCheck)); sl@0: asm("mov r3, #0xFF "); sl@0: asm("strb r3, [r0, #7] "); /* reset holding CPU */ sl@0: asm("cmp r2, #0x40 "); sl@0: asm("bhs rwwuec_ok "); /* if EOrderNone, done */ sl@0: asm("cmp r2, #0x20 "); sl@0: asm("addhs r1, r1, #4 "); sl@0: asm("and r2, r2, #0x1F "); sl@0: asm("mov r3, #1 "); sl@0: asm("mov r3, r3, lsl r2 "); /* r3 = bit to clear */ sl@0: asm("ldr r2, [r1] "); sl@0: asm("tst r2, r3 "); /* test bit originally set */ sl@0: asm("bic r2, r2, r3 "); sl@0: asm("str r2, [r1] "); /* clear bit in iSpinLockOrderCheck corresponding to lock order */ sl@0: asm("bne rwwuec_ok "); /* if originally set, OK */ sl@0: __ASM_CRASH(); /* if not, die - something must have got corrupted */ sl@0: sl@0: asm("rwwuec_ok: "); sl@0: asm("msr cpsr, r12 "); /* restore interrupts */ sl@0: asm("ldmfd sp!, {r1,r2,r3,r12} "); sl@0: __JUMP(,lr); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: /*----------------------------------------------------------------------------- sl@0: - Read locks disabling IRQ sl@0: -----------------------------------------------------------------------------*/ sl@0: __NAKED__ EXPORT_C void TRWSpinLock::LockIrqR() sl@0: { sl@0: __ASM_CLI(); /* Disable interrupts */ sl@0: RWSPIN_RLOCK_ENTRY_CHECK() sl@0: asm("1: "); sl@0: LDREX(1,0); sl@0: asm("and r2, r1, #0xFF "); /* R2 = original in.w */ sl@0: asm("add r1, r1, #0x100 "); /* increment in.r */ sl@0: asm("tst r1, #0xFF00 "); /* if wraparound ... */ sl@0: asm("subeq r1, r1, #0x10000 "); /* ... revert carry into out.w */ sl@0: STREX(3,1,0); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); sl@0: asm("3: "); sl@0: asm("and r1, r1, #0xFF0000 "); /* R1 = out.w << 16 */ sl@0: asm("cmp r1, r2, lsl #16 "); /* out.w = original in.w ? */ sl@0: asm("bne 2f "); /* no - must wait */ sl@0: RWSPIN_RLOCK_MARK_ACQ() sl@0: __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("2: "); sl@0: ARM_WFE; sl@0: asm("ldr r1, [r0, #0] "); /* read out.w count again */ sl@0: asm("b 3b "); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqR() sl@0: { sl@0: RWSPIN_RUNLOCK_ENTRY_CHECK() sl@0: __DATA_MEMORY_BARRIER_Z__(r3); /* Ensure accesses don't move outside locked section */ sl@0: asm("1: "); sl@0: LDREX(2,0); sl@0: asm("add r2, r2, #0x01000000 "); /* increment out.r */ sl@0: STREX(3,2,0); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); sl@0: __DATA_SYNC_BARRIER__(r3); /* Ensure write to out.r completes before SEV */ sl@0: ARM_SEV; /* Wake up any waiting processors */ sl@0: __ASM_STI(); /* Enable interrupts */ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqR() sl@0: { sl@0: GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ sl@0: asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr)); sl@0: asm("ldr r2, [r0, #0] "); sl@0: asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending)); sl@0: asm("eor r2, r2, r2, lsr #16 "); /* r2 low byte = out.w ^ in.w = 0 if no writers waiting */ sl@0: asm("tst r2, #0xFF "); sl@0: asm("addeq r3, r3, #1 "); sl@0: asm("cmpeq r3, #1024 "); /* if no writers waiting for lock, check for pending interrupt */ sl@0: asm("bne 1f "); /* branch if writers waiting or pending interrupt */ sl@0: asm("mov r0, #0 "); /* else return FALSE */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("1: "); sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("bl " CSM_ZN11TRWSpinLock10UnlockIrqREv); sl@0: asm("bl " CSM_ZN11TRWSpinLock8LockIrqREv); sl@0: asm("mov r0, #1 "); sl@0: asm("ldr pc, [sp], #4 "); sl@0: } sl@0: sl@0: sl@0: /*----------------------------------------------------------------------------- sl@0: - Write locks disabling IRQ sl@0: -----------------------------------------------------------------------------*/ sl@0: __NAKED__ EXPORT_C void TRWSpinLock::LockIrqW() sl@0: { sl@0: __ASM_CLI(); /* Disable interrupts */ sl@0: RWSPIN_WLOCK_ENTRY_CHECK() sl@0: asm("1: "); sl@0: LDREX(1,0); sl@0: asm("mov r2, r1, lsl #16 "); /* R2 = original in << 16 */ sl@0: asm("add r1, r1, #1 "); /* increment in.w */ sl@0: asm("tst r1, #0xFF "); /* if wraparound ... */ sl@0: asm("subeq r1, r1, #0x100 "); /* ... revert carry into in.r */ sl@0: STREX(3,1,0); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); sl@0: asm("3: "); sl@0: asm("mov r1, r1, lsr #16 "); /* r1 = out */ sl@0: asm("cmp r1, r2, lsr #16 "); /* out = original in ? */ sl@0: asm("bne 2f "); /* no - must wait */ sl@0: RWSPIN_WLOCK_MARK_ACQ() sl@0: __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("2: "); sl@0: ARM_WFE; sl@0: asm("ldr r1, [r0, #0] "); /* read out count again */ sl@0: asm("b 3b "); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqW() sl@0: { sl@0: RWSPIN_WUNLOCK_ENTRY_CHECK() sl@0: __DATA_MEMORY_BARRIER_Z__(r3); /* Ensure accesses don't move outside locked section */ sl@0: asm("ldrb r2, [r0, #2] "); sl@0: asm("add r2, r2, #1 "); sl@0: asm("strb r2, [r0, #2] "); /* increment out.w */ sl@0: __DATA_SYNC_BARRIER__(r3); /* Ensure write to out.w completes before SEV */ sl@0: ARM_SEV; /* Wake up any waiting processors */ sl@0: __ASM_STI(); /* Enable interrupts */ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqW() sl@0: { sl@0: GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ sl@0: asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr)); sl@0: asm("ldr r2, [r0, #0] "); sl@0: asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending)); sl@0: asm("add r2, r2, #0x00010000 "); /* increment out.w */ sl@0: asm("tst r2, #0x00FF0000 "); /* if wraparound, revert carry */ sl@0: asm("subeq r2, r2, #0x01000000 "); sl@0: asm("eor r2, r2, r2, lsl #16 "); /* test if (out.w+1,out.r) == (in.w,in.r) */ sl@0: asm("cmp r2, #0x00010000 "); sl@0: asm("bhs 1f "); /* if not, someone else is waiting */ sl@0: asm("add r3, r3, #1 "); sl@0: asm("cmp r3, #1024 "); /* if no-one waiting for lock, check for pending interrupt */ sl@0: asm("bne 1f "); /* branch if pending interrupt */ sl@0: asm("mov r0, #0 "); /* else return FALSE */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("1: "); sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("bl " CSM_ZN11TRWSpinLock10UnlockIrqWEv); sl@0: asm("bl " CSM_ZN11TRWSpinLock8LockIrqWEv); sl@0: asm("mov r0, #1 "); sl@0: asm("ldr pc, [sp], #4 "); sl@0: } sl@0: sl@0: sl@0: sl@0: /*----------------------------------------------------------------------------- sl@0: - Read locks leaving IRQ alone sl@0: -----------------------------------------------------------------------------*/ sl@0: __NAKED__ EXPORT_C void TRWSpinLock::LockOnlyR() sl@0: { sl@0: RWSPIN_RLOCK_ENTRY_CHECK() sl@0: asm("1: "); sl@0: LDREX(1,0); sl@0: asm("and r2, r1, #0xFF "); /* R2 = original in.w */ sl@0: asm("add r1, r1, #0x100 "); /* increment in.r */ sl@0: asm("tst r1, #0xFF00 "); /* if wraparound ... */ sl@0: asm("subeq r1, r1, #0x10000 "); /* ... revert carry into out.w */ sl@0: STREX(3,1,0); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); sl@0: asm("3: "); sl@0: asm("and r1, r1, #0xFF0000 "); /* R1 = out.w << 16 */ sl@0: asm("cmp r1, r2, lsl #16 "); /* out.w = original in.w ? */ sl@0: asm("bne 2f "); /* no - must wait */ sl@0: RWSPIN_RLOCK_MARK_ACQ() sl@0: __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("2: "); sl@0: ARM_WFE; sl@0: asm("ldr r1, [r0, #0] "); /* read out.w count again */ sl@0: asm("b 3b "); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TRWSpinLock::UnlockOnlyR() sl@0: { sl@0: RWSPIN_RUNLOCK_ENTRY_CHECK() sl@0: __DATA_MEMORY_BARRIER_Z__(r3); /* Ensure accesses don't move outside locked section */ sl@0: asm("1: "); sl@0: LDREX(2,0); sl@0: asm("add r2, r2, #0x01000000 "); /* increment out.r */ sl@0: STREX(3,2,0); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); sl@0: __DATA_SYNC_BARRIER__(r3); /* Ensure write to out.r completes before SEV */ sl@0: ARM_SEV; /* Wake up any waiting processors */ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBool TRWSpinLock::FlashOnlyR() sl@0: { sl@0: asm("ldr r2, [r0, #0] "); sl@0: asm("eor r2, r2, r2, lsr #16 "); /* r2 low byte = out.w ^ in.w = 0 if no writers waiting */ sl@0: asm("tst r2, #0xFF "); sl@0: asm("bne 1f "); /* branch if writers waiting */ sl@0: asm("mov r0, #0 "); /* else return FALSE */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("1: "); sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("bl " CSM_ZN11TRWSpinLock11UnlockOnlyREv); sl@0: asm("bl " CSM_ZN11TRWSpinLock9LockOnlyREv); sl@0: asm("mov r0, #1 "); sl@0: asm("ldr pc, [sp], #4 "); sl@0: } sl@0: sl@0: sl@0: /*----------------------------------------------------------------------------- sl@0: - Write locks leaving IRQ alone sl@0: -----------------------------------------------------------------------------*/ sl@0: __NAKED__ EXPORT_C void TRWSpinLock::LockOnlyW() sl@0: { sl@0: RWSPIN_WLOCK_ENTRY_CHECK() sl@0: asm("1: "); sl@0: LDREX(1,0); sl@0: asm("mov r2, r1, lsl #16 "); /* R2 = original in << 16 */ sl@0: asm("add r1, r1, #1 "); /* increment in.w */ sl@0: asm("tst r1, #0xFF "); /* if wraparound ... */ sl@0: asm("subeq r1, r1, #0x100 "); /* ... revert carry into in.r */ sl@0: STREX(3,1,0); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); sl@0: asm("3: "); sl@0: asm("mov r1, r1, lsr #16 "); /* r1 = out */ sl@0: asm("cmp r1, r2, lsr #16 "); /* out = original in ? */ sl@0: asm("bne 2f "); /* no - must wait */ sl@0: RWSPIN_WLOCK_MARK_ACQ() sl@0: __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("2: "); sl@0: ARM_WFE; sl@0: asm("ldr r1, [r0, #0] "); /* read out count again */ sl@0: asm("b 3b "); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TRWSpinLock::UnlockOnlyW() sl@0: { sl@0: RWSPIN_WUNLOCK_ENTRY_CHECK() sl@0: __DATA_MEMORY_BARRIER_Z__(r3); /* Ensure accesses don't move outside locked section */ sl@0: asm("ldrb r2, [r0, #2] "); sl@0: asm("add r2, r2, #1 "); sl@0: asm("strb r2, [r0, #2] "); /* increment out.w */ sl@0: __DATA_SYNC_BARRIER__(r3); /* Ensure write to out.w completes before SEV */ sl@0: ARM_SEV; /* Wake up any waiting processors */ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBool TRWSpinLock::FlashOnlyW() sl@0: { sl@0: asm("ldr r2, [r0, #0] "); sl@0: asm("add r2, r2, #0x00010000 "); /* increment out.w */ sl@0: asm("tst r2, #0x00FF0000 "); /* if wraparound, revert carry */ sl@0: asm("subeq r2, r2, #0x01000000 "); sl@0: asm("eor r2, r2, r2, lsl #16 "); /* test if (out.w+1,out.r) == (in.w,in.r) */ sl@0: asm("cmp r2, #0x00010000 "); sl@0: asm("bhs 1f "); /* if not, someone else is waiting */ sl@0: asm("mov r0, #0 "); /* else return FALSE */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("1: "); sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("bl " CSM_ZN11TRWSpinLock11UnlockOnlyWEv); sl@0: asm("bl " CSM_ZN11TRWSpinLock9LockOnlyWEv); sl@0: asm("mov r0, #1 "); sl@0: asm("ldr pc, [sp], #4 "); sl@0: } sl@0: sl@0: sl@0: sl@0: /*----------------------------------------------------------------------------- sl@0: - Read locks disabling IRQ with save/restore IRQ state sl@0: -----------------------------------------------------------------------------*/ sl@0: __NAKED__ EXPORT_C TInt TRWSpinLock::LockIrqSaveR() sl@0: { sl@0: asm("mrs r12, cpsr "); sl@0: __ASM_CLI(); /* Disable interrupts */ sl@0: RWSPIN_RLOCK_ENTRY_CHECK() sl@0: asm("1: "); sl@0: LDREX(1,0); sl@0: asm("and r2, r1, #0xFF "); /* R2 = original in.w */ sl@0: asm("add r1, r1, #0x100 "); /* increment in.r */ sl@0: asm("tst r1, #0xFF00 "); /* if wraparound ... */ sl@0: asm("subeq r1, r1, #0x10000 "); /* ... revert carry into out.w */ sl@0: STREX(3,1,0); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); sl@0: asm("3: "); sl@0: asm("and r1, r1, #0xFF0000 "); /* R1 = out.w << 16 */ sl@0: asm("cmp r1, r2, lsl #16 "); /* out.w = original in.w ? */ sl@0: asm("bne 2f "); /* no - must wait */ sl@0: RWSPIN_RLOCK_MARK_ACQ() sl@0: __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ sl@0: asm("and r0, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* return original CPSR I and F bits */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("2: "); sl@0: ARM_WFE; sl@0: asm("ldr r1, [r0, #0] "); /* read out.w count again */ sl@0: asm("b 3b "); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqRestoreR(TInt) sl@0: { sl@0: RWSPIN_RUNLOCK_ENTRY_CHECK() sl@0: __DATA_MEMORY_BARRIER_Z__(r3); /* Ensure accesses don't move outside locked section */ sl@0: asm("1: "); sl@0: LDREX(2,0); sl@0: asm("add r2, r2, #0x01000000 "); /* increment out.r */ sl@0: STREX(3,2,0); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); sl@0: asm("mrs r12, cpsr "); sl@0: __DATA_SYNC_BARRIER__(r3); /* Ensure write to out.r completes before SEV */ sl@0: asm("bic r12, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); sl@0: ARM_SEV; /* Wake up any waiting processors */ sl@0: asm("orr r1, r1, r12 "); sl@0: asm("msr cpsr, r1 "); /* restore interrupts */ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqRestoreR(TInt) sl@0: { sl@0: GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ sl@0: asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr)); sl@0: asm("ldr r2, [r0, #0] "); sl@0: asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending)); sl@0: asm("eor r2, r2, r2, lsr #16 "); /* r2 low byte = out.w ^ in.w = 0 if no writers waiting */ sl@0: asm("tst r2, #0xFF "); sl@0: asm("addeq r3, r3, #1 "); sl@0: asm("cmpeq r3, #1024 "); /* if no writers waiting for lock, check for pending interrupt */ sl@0: asm("bne 1f "); /* branch if writers waiting or pending interrupt */ sl@0: asm("mov r0, #0 "); /* else return FALSE */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("1: "); sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("bl " CSM_ZN11TRWSpinLock17UnlockIrqRestoreREi); sl@0: asm("bl " CSM_ZN11TRWSpinLock8LockIrqREv); sl@0: asm("mov r0, #1 "); sl@0: asm("ldr pc, [sp], #4 "); sl@0: } sl@0: sl@0: sl@0: /*----------------------------------------------------------------------------- sl@0: - Write locks disabling IRQ with save/restore IRQ state sl@0: -----------------------------------------------------------------------------*/ sl@0: __NAKED__ EXPORT_C TInt TRWSpinLock::LockIrqSaveW() sl@0: { sl@0: asm("mrs r12, cpsr "); sl@0: __ASM_CLI(); /* Disable interrupts */ sl@0: RWSPIN_WLOCK_ENTRY_CHECK() sl@0: asm("1: "); sl@0: LDREX(1,0); sl@0: asm("mov r2, r1, lsl #16 "); /* R2 = original in << 16 */ sl@0: asm("add r1, r1, #1 "); /* increment in.w */ sl@0: asm("tst r1, #0xFF "); /* if wraparound ... */ sl@0: asm("subeq r1, r1, #0x100 "); /* ... revert carry into in.r */ sl@0: STREX(3,1,0); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); sl@0: asm("3: "); sl@0: asm("mov r1, r1, lsr #16 "); /* r1 = out */ sl@0: asm("cmp r1, r2, lsr #16 "); /* out = original in ? */ sl@0: asm("bne 2f "); /* no - must wait */ sl@0: RWSPIN_WLOCK_MARK_ACQ() sl@0: __DATA_MEMORY_BARRIER__(r3); /* we have got the lock */ sl@0: asm("and r0, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); /* return original CPSR I and F bits */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("2: "); sl@0: ARM_WFE; sl@0: asm("ldr r1, [r0, #0] "); /* read out count again */ sl@0: asm("b 3b "); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TRWSpinLock::UnlockIrqRestoreW(TInt) sl@0: { sl@0: RWSPIN_WUNLOCK_ENTRY_CHECK() sl@0: __DATA_MEMORY_BARRIER_Z__(r3); /* Ensure accesses don't move outside locked section */ sl@0: asm("ldrb r2, [r0, #2] "); sl@0: asm("mrs r12, cpsr "); sl@0: asm("add r2, r2, #1 "); sl@0: asm("bic r12, r12, #%a0" : : "i" ((TInt)KAllInterruptsMask)); sl@0: asm("strb r2, [r0, #2] "); /* increment out.w */ sl@0: __DATA_SYNC_BARRIER__(r3); /* Ensure write to out.w completes before SEV */ sl@0: ARM_SEV; /* Wake up any waiting processors */ sl@0: asm("orr r1, r1, r12 "); sl@0: asm("msr cpsr, r1 "); /* restore interrupts */ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBool TRWSpinLock::FlashIrqRestoreW(TInt) sl@0: { sl@0: GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ sl@0: asm("ldr r12, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,i_GicCpuIfcAddr)); sl@0: asm("ldr r2, [r0, #0] "); sl@0: asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(GicCpuIfc,iHighestPending)); sl@0: asm("add r2, r2, #0x00010000 "); /* increment out.w */ sl@0: asm("tst r2, #0x00FF0000 "); /* if wraparound, revert carry */ sl@0: asm("subeq r2, r2, #0x01000000 "); sl@0: asm("eor r2, r2, r2, lsl #16 "); /* test if (out.w+1,out.r) == (in.w,in.r) */ sl@0: asm("cmp r2, #0x00010000 "); sl@0: asm("bhs 1f "); /* if not, someone else is waiting */ sl@0: asm("add r3, r3, #1 "); sl@0: asm("cmp r3, #1024 "); /* if no-one else waiting for lock, check for pending interrupt */ sl@0: asm("bne 1f "); /* branch if pending interrupt */ sl@0: asm("mov r0, #0 "); /* else return FALSE */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("1: "); sl@0: asm("str lr, [sp, #-4]! "); sl@0: asm("bl " CSM_ZN11TRWSpinLock17UnlockIrqRestoreWEi); sl@0: asm("bl " CSM_ZN11TRWSpinLock8LockIrqWEv); sl@0: asm("mov r0, #1 "); sl@0: asm("ldr pc, [sp], #4 "); sl@0: } sl@0: sl@0: sl@0: /*----------------------------------------------------------------------------- sl@0: - Read lock flash allowing preemption sl@0: -----------------------------------------------------------------------------*/ sl@0: __NAKED__ EXPORT_C TBool TRWSpinLock::FlashPreemptR() sl@0: { sl@0: asm("ldr r2, [r0, #0] "); sl@0: GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ sl@0: asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag)); sl@0: asm("eor r2, r2, r2, lsr #16 "); /* r2 low byte = out.w ^ in.w = 0 if no writers waiting */ sl@0: asm("tst r2, #0xFF "); sl@0: asm("cmpeq r3, #0 "); /* if no writers waiting, check if reschedule or IDFCs pending */ sl@0: asm("bne 1f "); /* branch if so or if writers waiting */ sl@0: asm("mov r0, #0 "); /* else return FALSE */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("1: "); sl@0: asm("stmfd sp!, {r0,lr} "); sl@0: asm("bl " CSM_ZN11TRWSpinLock11UnlockOnlyREv); sl@0: asm("bl " CSM_ZN5NKern15PreemptionPointEv); sl@0: asm("ldr r0, [sp], #4 "); sl@0: asm("bl " CSM_ZN11TRWSpinLock9LockOnlyREv); sl@0: asm("mov r0, #1 "); sl@0: asm("ldr pc, [sp], #4 "); sl@0: } sl@0: sl@0: sl@0: /*----------------------------------------------------------------------------- sl@0: - Write lock flash allowing preemption sl@0: -----------------------------------------------------------------------------*/ sl@0: __NAKED__ EXPORT_C TBool TRWSpinLock::FlashPreemptW() sl@0: { sl@0: asm("ldr r2, [r0, #0] "); sl@0: GET_RWNO_TID(,r12); /* r12 -> TSubScheduler */ sl@0: asm("ldr r3, [r12, #%a0]" : : "i" _FOFF(TSubScheduler,iRescheduleNeededFlag)); sl@0: asm("add r2, r2, #0x00010000 "); /* increment out.w */ sl@0: asm("tst r2, #0x00FF0000 "); /* if wraparound, revert carry */ sl@0: asm("subeq r2, r2, #0x01000000 "); sl@0: asm("eor r2, r2, r2, lsl #16 "); /* test if (out.w+1,out.r) == (in.w,in.r) */ sl@0: asm("cmp r2, #0x00010000 "); sl@0: asm("bhs 1f "); /* if not, someone else is waiting */ sl@0: asm("cmp r3, #0 "); /* no-one else waiting, check if reschedule or IDFCs pending */ sl@0: asm("bne 1f "); /* if so, branch to release lock */ sl@0: asm("mov r0, #0 "); /* else return FALSE */ sl@0: __JUMP(,lr); sl@0: sl@0: asm("1: "); sl@0: asm("stmfd sp!, {r0,lr} "); sl@0: asm("bl " CSM_ZN11TRWSpinLock11UnlockOnlyWEv); sl@0: asm("bl " CSM_ZN5NKern15PreemptionPointEv); sl@0: asm("ldr r0, [sp], #4 "); sl@0: asm("bl " CSM_ZN11TRWSpinLock9LockOnlyWEv); sl@0: asm("mov r0, #1 "); sl@0: asm("ldr pc, [sp], #4 "); sl@0: } sl@0: