sl@0: // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\common\arm\atomic_64_v6k.h sl@0: // 64 bit atomic operations on V6K processors sl@0: // sl@0: // sl@0: sl@0: #include "atomic_ops.h" sl@0: sl@0: #if defined(__OP_LOAD__) sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_load_acq64(const volatile TAny* /*a*/) sl@0: { sl@0: // R0=a sl@0: // return value in R1:R0 sl@0: sl@0: asm("mov r2, r0 "); sl@0: ENSURE_8BYTE_ALIGNMENT(2); sl@0: asm("1: "); sl@0: LDREXD(0,2); // R1:R0 = oldv sl@0: STREXD(3,0,2); // try to write back, R3=0 if success sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); // failed - retry sl@0: __LOCAL_DATA_MEMORY_BARRIER__(r3); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: sl@0: sl@0: #elif defined(__OP_STORE__) sl@0: sl@0: #define __DO_STORE__ \ sl@0: asm("mov r12, r0 "); \ sl@0: asm("1: "); \ sl@0: LDREXD(0,12); \ sl@0: STREXD(1,2,12); \ sl@0: asm("cmp r1, #0 "); \ sl@0: asm("bne 1b "); sl@0: sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_store_rel64(volatile TAny* /*a*/, __TYPE__ /*v*/) sl@0: { sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, just fall through to __e32_atomic_store_ord64 sl@0: #ifndef __EABI__ sl@0: asm("mov r3, r2 "); sl@0: asm("mov r2, r1 "); sl@0: #endif sl@0: // R0=a, R3:R2=v sl@0: // return value in R1:R0 equal to v sl@0: ENSURE_8BYTE_ALIGNMENT(0); sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: __DO_STORE__ sl@0: asm("mov r0, r2 "); sl@0: asm("mov r1, r3 "); sl@0: __JUMP(,lr); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_store_ord64(volatile TAny* /*a*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R3:R2=v sl@0: // return value in R1:R0 equal to v sl@0: #ifndef __EABI__ sl@0: asm("mov r3, r2 "); sl@0: asm("mov r2, r1 "); sl@0: #endif sl@0: ENSURE_8BYTE_ALIGNMENT(0); sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: __DO_STORE__ sl@0: __LOCAL_DATA_MEMORY_BARRIER__(r1); sl@0: asm("mov r0, r2 "); sl@0: asm("mov r1, r3 "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #undef __DO_STORE__ sl@0: sl@0: sl@0: #elif defined(__OP_RMW1__) sl@0: sl@0: #ifdef __OP_SWP__ sl@0: #define __SAVE_REGS__ asm("str r6, [sp, #-4]! "); sl@0: #define __RESTORE_REGS__ asm("ldr r6, [sp], #4 "); sl@0: #define __SOURCE_REG__ 2 sl@0: #define __DO_PROCESSING__ sl@0: #else sl@0: #define __SAVE_REGS__ asm("stmfd sp!, {r4-r6} "); sl@0: #define __RESTORE_REGS__ asm("ldmfd sp!, {r4-r6} "); sl@0: #define __SOURCE_REG__ 4 sl@0: #if defined(__OP_ADD__) sl@0: #define __DO_PROCESSING__ asm("adds r4, r0, r2 "); asm("adcs r5, r1, r3 "); sl@0: #elif defined(__OP_AND__) sl@0: #define __DO_PROCESSING__ asm("and r4, r0, r2 "); asm("and r5, r1, r3 "); sl@0: #elif defined(__OP_IOR__) sl@0: #define __DO_PROCESSING__ asm("orr r4, r0, r2 "); asm("orr r5, r1, r3 "); sl@0: #elif defined(__OP_XOR__) sl@0: #define __DO_PROCESSING__ asm("eor r4, r0, r2 "); asm("eor r5, r1, r3 "); sl@0: #endif sl@0: #endif sl@0: sl@0: #define __DO_RMW1_OP__ \ sl@0: asm("mov r12, r0 "); \ sl@0: ENSURE_8BYTE_ALIGNMENT(0); \ sl@0: asm("1: "); \ sl@0: LDREXD(0,12); \ sl@0: __DO_PROCESSING__ \ sl@0: STREXD(6,__SOURCE_REG__,12); \ sl@0: asm("cmp r6, #0 "); \ sl@0: asm("bne 1b "); sl@0: sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R3:R2=v sl@0: // return value in R1:R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R3:R2=v sl@0: // return value in R1:R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: #ifndef __EABI__ sl@0: asm("mov r3, r2 "); sl@0: asm("mov r2, r1 "); sl@0: #endif sl@0: __SAVE_REGS__ sl@0: __DO_RMW1_OP__ sl@0: __RESTORE_REGS__ sl@0: __JUMP(,lr); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R3:R2=v sl@0: // return value in R1:R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R3:R2=v sl@0: // return value in R1:R0 sl@0: #ifndef __EABI__ sl@0: asm("mov r3, r2 "); sl@0: asm("mov r2, r1 "); sl@0: #endif sl@0: __SAVE_REGS__ sl@0: __DO_RMW1_OP__ sl@0: __LOCAL_DATA_MEMORY_BARRIER__(r6); sl@0: __RESTORE_REGS__ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #undef __SAVE_REGS__ sl@0: #undef __RESTORE_REGS__ sl@0: #undef __DO_RMW1_OP__ sl@0: #undef __SOURCE_REG__ sl@0: #undef __DO_PROCESSING__ sl@0: sl@0: sl@0: #elif defined(__OP_CAS__) sl@0: sl@0: #define __SAVE_REGS__ asm("stmfd sp!, {r4-r7} "); sl@0: #define __RESTORE_REGS__ asm("ldmfd sp!, {r4-r7} "); sl@0: sl@0: #define __DO_CAS_OP__ \ sl@0: asm("ldmia r1, {r6-r7} "); \ sl@0: ENSURE_8BYTE_ALIGNMENT(0); \ sl@0: asm("1: "); \ sl@0: LDREXD(4,0); \ sl@0: asm("cmp r4, r6 "); \ sl@0: asm("cmpeq r5, r7 "); \ sl@0: asm("bne 2f "); \ sl@0: STREXD(12,2,0); \ sl@0: asm("cmp r12, #0 "); \ sl@0: asm("bne 1b "); \ sl@0: asm("2: "); \ sl@0: asm("stmneia r1, {r4-r5} "); \ sl@0: asm("movne r0, #0 "); \ sl@0: asm("moveq r0, #1 "); sl@0: sl@0: sl@0: extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=q, R3:R2=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=q, R3:R2=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __SAVE_REGS__ sl@0: __DO_CAS_OP__ sl@0: __RESTORE_REGS__ sl@0: __JUMP(,lr); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=q, R3:R2=v sl@0: // return value in R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/) sl@0: { sl@0: // R0=a, R1=q, R3:R2=v sl@0: // return value in R0 sl@0: __SAVE_REGS__ sl@0: __DO_CAS_OP__ sl@0: __LOCAL_DATA_MEMORY_BARRIER__(r12); sl@0: __RESTORE_REGS__ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #undef __SAVE_REGS__ sl@0: #undef __RESTORE_REGS__ sl@0: #undef __DO_CAS_OP__ sl@0: sl@0: sl@0: sl@0: #elif defined(__OP_AXO__) sl@0: sl@0: #ifdef __EABI__ sl@0: #define __SAVE_REGS__ asm("mov r1, sp "); asm("stmfd sp!, {r4-r8} "); sl@0: #define __RESTORE_REGS__ asm("ldmfd sp!, {r4-r8} "); sl@0: #else sl@0: #define __SAVE_REGS__ asm("str r3, [sp, #-4]! "); asm("mov r3, r2 "); asm("mov r2, r1 "); asm("mov r1, sp "); asm("stmfd sp!, {r4-r8} "); sl@0: #define __RESTORE_REGS__ asm("ldmfd sp!, {r4-r8,r12} "); sl@0: #endif sl@0: sl@0: #define __DO_AXO_OP__ \ sl@0: asm("ldmia r1, {r4-r5} "); \ sl@0: asm("mov r12, r0 "); \ sl@0: ENSURE_8BYTE_ALIGNMENT(0); \ sl@0: asm("1: "); \ sl@0: LDREXD(0,12); \ sl@0: asm("and r6, r0, r2 "); \ sl@0: asm("and r7, r1, r3 "); \ sl@0: asm("eor r6, r6, r4 "); \ sl@0: asm("eor r7, r7, r5 "); \ sl@0: STREXD(8,6,12); \ sl@0: asm("cmp r8, #0 "); \ sl@0: asm("bne 1b "); sl@0: sl@0: sl@0: #ifdef __EABI__ sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: #else sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int) sl@0: #endif sl@0: { sl@0: // R0=a, R3:R2=u, [SP+4,0]=v sl@0: // return value in R1:R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: #ifdef __EABI__ sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: #else sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int) sl@0: #endif sl@0: { sl@0: // R0=a, R3:R2=u, [SP+4,0]=v sl@0: // return value in R1:R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __SAVE_REGS__ sl@0: __DO_AXO_OP__ sl@0: __RESTORE_REGS__ sl@0: __JUMP(,lr); sl@0: #endif sl@0: } sl@0: sl@0: #ifdef __EABI__ sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: #else sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int) sl@0: #endif sl@0: { sl@0: // R0=a, R3:R2=u, [SP+4,0]=v sl@0: // return value in R1:R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: #ifdef __EABI__ sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: #else sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int) sl@0: #endif sl@0: { sl@0: // R0=a, R3:R2=u, [SP+4,0]=v sl@0: // return value in R1:R0 sl@0: __SAVE_REGS__ sl@0: __DO_AXO_OP__ sl@0: __LOCAL_DATA_MEMORY_BARRIER__(r8); sl@0: __RESTORE_REGS__ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #undef __SAVE_REGS__ sl@0: #undef __RESTORE_REGS__ sl@0: #undef __DO_AXO_OP__ sl@0: sl@0: sl@0: #elif defined(__OP_RMW3__) sl@0: sl@0: #ifdef __EABI__ sl@0: #define __SAVE_REGS__ asm("mov r1, sp "); asm("stmfd sp!, {r4-r10} "); sl@0: #define __RESTORE_REGS__ asm("ldmfd sp!, {r4-r10} "); sl@0: #else sl@0: #define __SAVE_REGS__ asm("str r3, [sp, #-4]! "); asm("mov r3, r2 "); asm("mov r2, r1 "); asm("mov r1, sp "); asm("stmfd sp!, {r4-r10} "); sl@0: #define __RESTORE_REGS__ asm("ldmfd sp!, {r4-r10,r12} "); sl@0: #endif sl@0: sl@0: #if defined(__OP_TAU__) sl@0: #define __COND_GE__ "cs" sl@0: #define __COND_LT__ "cc" sl@0: #elif defined(__OP_TAS__) sl@0: #define __COND_GE__ "ge" sl@0: #define __COND_LT__ "lt" sl@0: #endif sl@0: sl@0: #define __DO_RMW3_OP__ \ sl@0: asm("ldmia r1, {r4-r7} "); \ sl@0: asm("mov r12, r0 "); \ sl@0: ENSURE_8BYTE_ALIGNMENT(0); \ sl@0: asm("1: "); \ sl@0: LDREXD(0,12); \ sl@0: asm("subs r8, r0, r2 "); \ sl@0: asm("sbcs r9, r1, r3 "); \ sl@0: asm("mov" __COND_GE__ " r8, r4 "); \ sl@0: asm("mov" __COND_GE__ " r9, r5 "); \ sl@0: asm("mov" __COND_LT__ " r8, r6 "); \ sl@0: asm("mov" __COND_LT__ " r9, r7 "); \ sl@0: asm("adds r8, r8, r0 "); \ sl@0: asm("adcs r9, r9, r1 "); \ sl@0: STREXD(10,8,12); \ sl@0: asm("cmp r10, #0 "); \ sl@0: asm("bne 1b "); sl@0: sl@0: sl@0: #ifdef __EABI__ sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: #else sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int) sl@0: #endif sl@0: { sl@0: // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v sl@0: // return value in R1:R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: #ifdef __EABI__ sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: #else sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int) sl@0: #endif sl@0: { sl@0: // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v sl@0: // return value in R1:R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __SAVE_REGS__ sl@0: __DO_RMW3_OP__ sl@0: __RESTORE_REGS__ sl@0: __JUMP(,lr); sl@0: #endif sl@0: } sl@0: sl@0: #ifdef __EABI__ sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: #else sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int) sl@0: #endif sl@0: { sl@0: // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v sl@0: // return value in R1:R0 sl@0: #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function sl@0: __LOCAL_DATA_MEMORY_BARRIER_Z__(r12); sl@0: #endif sl@0: } sl@0: sl@0: #ifdef __EABI__ sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/) sl@0: #else sl@0: extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int) sl@0: #endif sl@0: { sl@0: // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v sl@0: // return value in R1:R0 sl@0: __SAVE_REGS__ sl@0: __DO_RMW3_OP__ sl@0: __LOCAL_DATA_MEMORY_BARRIER__(r10); sl@0: __RESTORE_REGS__ sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #undef __SAVE_REGS__ sl@0: #undef __RESTORE_REGS__ sl@0: #undef __DO_RMW3_OP__ sl@0: #undef __COND_GE__ sl@0: #undef __COND_LT__ sl@0: sl@0: sl@0: #endif sl@0: sl@0: // Second inclusion undefines temporaries sl@0: #include "atomic_ops.h"