sl@0: // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\common\arm\atomic_32_v6.h sl@0: // 32 bit atomic operations on V6 and V6K processors sl@0: // Also 8 and 16 bit atomic operations on V6K processors sl@0: // Also 8, 16 and 32 bit load/store on all processors sl@0: // sl@0: // sl@0: sl@0: #include "atomic_ops.h" sl@0: sl@0: #if defined(__OP_LOAD__) sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,__DATA_SIZE__)(const volatile TAny* /*a*/) sl@0: { sl@0: // R0=a sl@0: // return value in R0 sl@0: sl@0: __LDR_INST__( ," r0, [r0] "); sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r1); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: sl@0: #elif defined(__OP_STORE__) sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/) sl@0: { sl@0: #ifdef __BARRIERS_NEEDED__ sl@0: // R0=a, R1=v sl@0: // return value in R0 equal to v sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: __STR_INST__( ," r1, [r0] "); sl@0: asm("mov r0, r1 "); sl@0: __JUMP(,lr); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=v sl@0: // return value in R0 equal to v sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: __STR_INST__( ," r1, [r0] "); sl@0: __LOCAL_DATA_MEMORY_BARRIER__(r12); sl@0: asm("mov r0, r1 "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: #elif defined(__OP_RMW1__) sl@0: sl@0: #ifdef __OP_SWP__ sl@0: #define __SOURCE_REG__ 1 sl@0: #define __DO_PROCESSING__ sl@0: #else sl@0: #define __SOURCE_REG__ 2 sl@0: #if defined(__OP_ADD__) sl@0: #define __DO_PROCESSING__ asm("add r2, r0, r1 "); sl@0: #elif defined(__OP_AND__) sl@0: #define __DO_PROCESSING__ asm("and r2, r0, r1 "); sl@0: #elif defined(__OP_IOR__) sl@0: #define __DO_PROCESSING__ asm("orr r2, r0, r1 "); sl@0: #elif defined(__OP_XOR__) sl@0: #define __DO_PROCESSING__ asm("eor r2, r0, r1 "); sl@0: #endif sl@0: #endif sl@0: sl@0: #define __DO_RMW1_OP__ \ sl@0: asm("mov r12, r0 "); \ sl@0: asm("1: "); \ sl@0: __LDREX_INST__(0,12); \ sl@0: __DO_PROCESSING__ \ sl@0: __STREX_INST__(3,__SOURCE_REG__,12); \ sl@0: asm("cmp r3, #0 "); \ sl@0: asm("bne 1b "); sl@0: sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __DO_RMW1_OP__ sl@0: __JUMP(,lr); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=v sl@0: // return value in R0 sl@0: __DO_RMW1_OP__ sl@0: __LOCAL_DATA_MEMORY_BARRIER__(r3); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #undef __DO_RMW1_OP__ sl@0: #undef __SOURCE_REG__ sl@0: #undef __DO_PROCESSING__ sl@0: sl@0: sl@0: #elif defined(__OP_CAS__) sl@0: sl@0: #define __DO_CAS_OP__ \ sl@0: __LDR_INST__( ," r12, [r1] "); \ sl@0: asm("1: "); \ sl@0: __LDREX_INST__(3,0); \ sl@0: asm("cmp r3, r12 "); \ sl@0: asm("bne 2f "); \ sl@0: __STREX_INST__(3,2,0); \ sl@0: asm("cmp r3, #0 "); \ sl@0: asm("bne 1b "); \ sl@0: asm("2: "); \ sl@0: __STR_INST__(ne, "r3, [r1] "); \ sl@0: asm("movne r0, #0 "); \ sl@0: asm("moveq r0, #1 "); sl@0: sl@0: sl@0: extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rel,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=q, R2=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rlx,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=q, R2=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __DO_CAS_OP__ sl@0: __JUMP(,lr); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,ord,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=q, R2=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,acq,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=q, R2=v sl@0: // return value in R0 sl@0: __DO_CAS_OP__ sl@0: __LOCAL_DATA_MEMORY_BARRIER__(r3); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #undef __DO_CAS_OP__ sl@0: sl@0: sl@0: sl@0: #elif defined(__OP_AXO__) sl@0: sl@0: #define __SAVE_REGS__ asm("str r4, [sp, #-4]! "); sl@0: #define __RESTORE_REGS__ asm("ldr r4, [sp], #4 "); sl@0: sl@0: #define __DO_AXO_OP__ \ sl@0: asm("mov r12, r0 "); \ sl@0: asm("1: "); \ sl@0: __LDREX_INST__(0,12); \ sl@0: asm("and r4, r0, r1 "); \ sl@0: asm("eor r4, r4, r2 "); \ sl@0: __STREX_INST__(3,4,12); \ sl@0: asm("cmp r3, #0 "); \ sl@0: asm("bne 1b "); sl@0: sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=u, R2=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=u, R2=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __SAVE_REGS__ sl@0: __DO_AXO_OP__ sl@0: __RESTORE_REGS__ sl@0: __JUMP(,lr); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=u, R2=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=u, R2=v sl@0: // return value in R0 sl@0: __SAVE_REGS__ sl@0: __DO_AXO_OP__ sl@0: __LOCAL_DATA_MEMORY_BARRIER__(r3); sl@0: __RESTORE_REGS__ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #undef __SAVE_REGS__ sl@0: #undef __RESTORE_REGS__ sl@0: #undef __DO_AXO_OP__ sl@0: sl@0: sl@0: #elif defined(__OP_RMW3__) sl@0: sl@0: #define __SAVE_REGS__ asm("stmfd sp!, {r4-r5} "); sl@0: #define __RESTORE_REGS__ asm("ldmfd sp!, {r4-r5} "); sl@0: sl@0: #if defined(__OP_TAU__) sl@0: #define __COND_GE__ "cs" sl@0: #define __COND_LT__ "cc" sl@0: #define __DO_SIGN_EXTEND__ sl@0: #elif defined(__OP_TAS__) sl@0: #define __COND_GE__ "ge" sl@0: #define __COND_LT__ "lt" sl@0: #define __DO_SIGN_EXTEND__ __SIGN_EXTEND__(r0) sl@0: #endif sl@0: sl@0: #define __DO_RMW3_OP__ \ sl@0: asm("mov r12, r0 "); \ sl@0: asm("1: "); \ sl@0: __LDREX_INST__(0,12); \ sl@0: __DO_SIGN_EXTEND__ \ sl@0: asm("cmp r0, r1 "); \ sl@0: asm("add" __COND_GE__ " r4, r0, r2 "); \ sl@0: asm("add" __COND_LT__ " r4, r0, r3 "); \ sl@0: __STREX_INST__(5,4,12); \ sl@0: asm("cmp r5, #0 "); \ sl@0: asm("bne 1b "); sl@0: sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=t, R2=u, R3=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=t, R2=u, R3=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __SAVE_REGS__ sl@0: __DO_RMW3_OP__ sl@0: __RESTORE_REGS__ sl@0: __JUMP(,lr); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=t, R2=u, R3=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=t, R2=u, R3=v sl@0: // return value in R0 sl@0: __SAVE_REGS__ sl@0: __DO_RMW3_OP__ sl@0: __LOCAL_DATA_MEMORY_BARRIER__(r5); sl@0: __RESTORE_REGS__ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #undef __SAVE_REGS__ sl@0: #undef __RESTORE_REGS__ sl@0: #undef __DO_RMW3_OP__ sl@0: #undef __COND_GE__ sl@0: #undef __COND_LT__ sl@0: #undef __DO_SIGN_EXTEND__ sl@0: sl@0: sl@0: #endif sl@0: sl@0: // Second inclusion undefines temporaries sl@0: #include "atomic_ops.h"