Update contrib.
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\common\arm\atomic_64_v6k.h
15 // 64 bit atomic operations on V6K processors
19 #include "atomic_ops.h"
21 #if defined(__OP_LOAD__)
22 extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_load_acq64(const volatile TAny* /*a*/)
25 // return value in R1:R0
28 ENSURE_8BYTE_ALIGNMENT(2);
30 LDREXD(0,2); // R1:R0 = oldv
31 STREXD(3,0,2); // try to write back, R3=0 if success
33 asm("bne 1b "); // failed - retry
34 __LOCAL_DATA_MEMORY_BARRIER__(r3);
40 #elif defined(__OP_STORE__)
42 #define __DO_STORE__ \
43 asm("mov r12, r0 "); \
51 extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_store_rel64(volatile TAny* /*a*/, __TYPE__ /*v*/)
53 #ifdef __BARRIERS_NEEDED__ // If no barriers, just fall through to __e32_atomic_store_ord64
59 // return value in R1:R0 equal to v
60 ENSURE_8BYTE_ALIGNMENT(0);
61 __LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
69 extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_store_ord64(volatile TAny* /*a*/, __TYPE__ /*v*/)
72 // return value in R1:R0 equal to v
77 ENSURE_8BYTE_ALIGNMENT(0);
78 __LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
80 __LOCAL_DATA_MEMORY_BARRIER__(r1);
89 #elif defined(__OP_RMW1__)
92 #define __SAVE_REGS__ asm("str r6, [sp, #-4]! ");
93 #define __RESTORE_REGS__ asm("ldr r6, [sp], #4 ");
94 #define __SOURCE_REG__ 2
95 #define __DO_PROCESSING__
97 #define __SAVE_REGS__ asm("stmfd sp!, {r4-r6} ");
98 #define __RESTORE_REGS__ asm("ldmfd sp!, {r4-r6} ");
99 #define __SOURCE_REG__ 4
100 #if defined(__OP_ADD__)
101 #define __DO_PROCESSING__ asm("adds r4, r0, r2 "); asm("adcs r5, r1, r3 ");
102 #elif defined(__OP_AND__)
103 #define __DO_PROCESSING__ asm("and r4, r0, r2 "); asm("and r5, r1, r3 ");
104 #elif defined(__OP_IOR__)
105 #define __DO_PROCESSING__ asm("orr r4, r0, r2 "); asm("orr r5, r1, r3 ");
106 #elif defined(__OP_XOR__)
107 #define __DO_PROCESSING__ asm("eor r4, r0, r2 "); asm("eor r5, r1, r3 ");
111 #define __DO_RMW1_OP__ \
112 asm("mov r12, r0 "); \
113 ENSURE_8BYTE_ALIGNMENT(0); \
117 STREXD(6,__SOURCE_REG__,12); \
118 asm("cmp r6, #0 "); \
122 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
125 // return value in R1:R0
126 #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function
127 __LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
131 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
134 // return value in R1:R0
135 #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function
147 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
150 // return value in R1:R0
151 #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function
152 __LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
156 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
159 // return value in R1:R0
166 __LOCAL_DATA_MEMORY_BARRIER__(r6);
172 #undef __RESTORE_REGS__
173 #undef __DO_RMW1_OP__
174 #undef __SOURCE_REG__
175 #undef __DO_PROCESSING__
178 #elif defined(__OP_CAS__)
180 #define __SAVE_REGS__ asm("stmfd sp!, {r4-r7} ");
181 #define __RESTORE_REGS__ asm("ldmfd sp!, {r4-r7} ");
183 #define __DO_CAS_OP__ \
184 asm("ldmia r1, {r6-r7} "); \
185 ENSURE_8BYTE_ALIGNMENT(0); \
188 asm("cmp r4, r6 "); \
189 asm("cmpeq r5, r7 "); \
192 asm("cmp r12, #0 "); \
195 asm("stmneia r1, {r4-r5} "); \
196 asm("movne r0, #0 "); \
197 asm("moveq r0, #1 ");
200 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
202 // R0=a, R1=q, R3:R2=v
203 // return value in R0
204 #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function
205 __LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
209 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
211 // R0=a, R1=q, R3:R2=v
212 // return value in R0
213 #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function
221 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
223 // R0=a, R1=q, R3:R2=v
224 // return value in R0
225 #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function
226 __LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
230 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
232 // R0=a, R1=q, R3:R2=v
233 // return value in R0
236 __LOCAL_DATA_MEMORY_BARRIER__(r12);
242 #undef __RESTORE_REGS__
247 #elif defined(__OP_AXO__)
250 #define __SAVE_REGS__ asm("mov r1, sp "); asm("stmfd sp!, {r4-r8} ");
251 #define __RESTORE_REGS__ asm("ldmfd sp!, {r4-r8} ");
253 #define __SAVE_REGS__ asm("str r3, [sp, #-4]! "); asm("mov r3, r2 "); asm("mov r2, r1 "); asm("mov r1, sp "); asm("stmfd sp!, {r4-r8} ");
254 #define __RESTORE_REGS__ asm("ldmfd sp!, {r4-r8,r12} ");
257 #define __DO_AXO_OP__ \
258 asm("ldmia r1, {r4-r5} "); \
259 asm("mov r12, r0 "); \
260 ENSURE_8BYTE_ALIGNMENT(0); \
263 asm("and r6, r0, r2 "); \
264 asm("and r7, r1, r3 "); \
265 asm("eor r6, r6, r4 "); \
266 asm("eor r7, r7, r5 "); \
268 asm("cmp r8, #0 "); \
273 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
275 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int)
278 // R0=a, R3:R2=u, [SP+4,0]=v
279 // return value in R1:R0
280 #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function
281 __LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
286 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
288 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int)
291 // R0=a, R3:R2=u, [SP+4,0]=v
292 // return value in R1:R0
293 #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function
302 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
304 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int)
307 // R0=a, R3:R2=u, [SP+4,0]=v
308 // return value in R1:R0
309 #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function
310 __LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
315 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
317 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int)
320 // R0=a, R3:R2=u, [SP+4,0]=v
321 // return value in R1:R0
324 __LOCAL_DATA_MEMORY_BARRIER__(r8);
330 #undef __RESTORE_REGS__
334 #elif defined(__OP_RMW3__)
337 #define __SAVE_REGS__ asm("mov r1, sp "); asm("stmfd sp!, {r4-r10} ");
338 #define __RESTORE_REGS__ asm("ldmfd sp!, {r4-r10} ");
340 #define __SAVE_REGS__ asm("str r3, [sp, #-4]! "); asm("mov r3, r2 "); asm("mov r2, r1 "); asm("mov r1, sp "); asm("stmfd sp!, {r4-r10} ");
341 #define __RESTORE_REGS__ asm("ldmfd sp!, {r4-r10,r12} ");
344 #if defined(__OP_TAU__)
345 #define __COND_GE__ "cs"
346 #define __COND_LT__ "cc"
347 #elif defined(__OP_TAS__)
348 #define __COND_GE__ "ge"
349 #define __COND_LT__ "lt"
352 #define __DO_RMW3_OP__ \
353 asm("ldmia r1, {r4-r7} "); \
354 asm("mov r12, r0 "); \
355 ENSURE_8BYTE_ALIGNMENT(0); \
358 asm("subs r8, r0, r2 "); \
359 asm("sbcs r9, r1, r3 "); \
360 asm("mov" __COND_GE__ " r8, r4 "); \
361 asm("mov" __COND_GE__ " r9, r5 "); \
362 asm("mov" __COND_LT__ " r8, r6 "); \
363 asm("mov" __COND_LT__ " r9, r7 "); \
364 asm("adds r8, r8, r0 "); \
365 asm("adcs r9, r9, r1 "); \
367 asm("cmp r10, #0 "); \
372 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
374 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int)
377 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
378 // return value in R1:R0
379 #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function
380 __LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
385 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
387 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int)
390 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
391 // return value in R1:R0
392 #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function
401 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
403 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int)
406 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
407 // return value in R1:R0
408 #ifdef __BARRIERS_NEEDED__ // If no barriers, all ordering variants collapse to same function
409 __LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
414 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
416 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int)
419 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
420 // return value in R1:R0
423 __LOCAL_DATA_MEMORY_BARRIER__(r10);
429 #undef __RESTORE_REGS__
430 #undef __DO_RMW3_OP__
437 // Second inclusion undefines temporaries
438 #include "atomic_ops.h"