Update contrib.
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\common\arm\atomic_64_exec.h
15 // User-side 64 bit atomic operations on V6 or V5 processors using Exec calls
16 // WARNING: GCC98r2 doesn't align registers so 'v' ends up in R2:R1 not R3:R2
20 #include "atomic_ops.h"
23 // Write paging supported, so atomics must work on paged memory, so SLOW exec needed
24 #define __ATOMIC64_EXEC__(op) SLOW_EXEC1_NR(EExecSlowAtomic##op##64)
26 // Write paging not supported, so atomics can assume unpaged memory, so FAST exec OK
27 #define __ATOMIC64_EXEC__(op) FAST_EXEC1_NR(EFastExecFastAtomic##op##64)
30 #ifdef __BARRIERS_NEEDED__
31 #error Barriers not supported on V6/V5, only V6K/V7
34 #if defined(__OP_LOAD__)
35 #error LOAD same as kernel side
36 #elif defined(__OP_STORE__)
37 #error STORE same as kernel side
39 #elif defined(__OP_RMW1__)
41 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
44 // return value in R1:R0
45 // just fall through to __e32_atomic_*_acq64
48 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
51 // return value in R1:R0
52 // just fall through to __e32_atomic_*_acq64
55 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
58 // return value in R1:R0
59 // just fall through to __e32_atomic_*_acq64
62 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
65 // return value in R1:R0
71 ENSURE_8BYTE_ALIGNMENT(0);
72 #if defined(__OP_SWP__)
75 asm("stmfd sp!, {r2-r3} "); // i2 = XOR mask = v
76 asm("stmfd sp!, {r1,r12} "); // i1 = AND mask = 0
77 asm("stmfd sp!, {r0-r1} "); // iA = a
79 __ATOMIC64_EXEC__(Axo);
80 asm("ldmia sp!, {r0-r1} ");
81 asm("add sp, sp, #16 ");
82 #elif defined(__OP_ADD__)
83 asm("stmfd sp!, {r0-r3} ");
85 __ATOMIC64_EXEC__(Add);
86 asm("ldmia sp!, {r0-r1} ");
87 asm("add sp, sp, #8 ");
88 #elif defined(__OP_AND__)
91 asm("stmfd sp!, {r1,r12} "); // i2 = XOR mask = 0
92 asm("stmfd sp!, {r0-r3} "); // i1 = AND mask = v, iA=a
94 __ATOMIC64_EXEC__(Axo);
95 asm("ldmia sp!, {r0-r1} ");
96 asm("add sp, sp, #16 ");
97 #elif defined(__OP_IOR__)
98 asm("mvn r1, r2 "); // r12:r1 = ~r3:r2
100 asm("stmfd sp!, {r2-r3} "); // i2 = XOR mask = v
101 asm("stmfd sp!, {r1,r12} "); // i1 = AND mask = ~v
102 asm("stmfd sp!, {r0-r1} "); // iA = a
104 __ATOMIC64_EXEC__(Axo);
105 asm("ldmia sp!, {r0-r1} ");
106 asm("add sp, sp, #16 ");
107 #elif defined(__OP_XOR__)
110 asm("stmfd sp!, {r2-r3} "); // i2 = XOR mask = v
111 asm("stmfd sp!, {r1,r12} "); // i1 = AND mask = 0xFFFFFFFFFFFFFFFF
112 asm("stmfd sp!, {r0-r1} "); // iA = a
114 __ATOMIC64_EXEC__(Axo);
115 asm("ldmia sp!, {r0-r1} ");
116 asm("add sp, sp, #16 ");
123 #elif defined(__OP_CAS__)
125 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
127 // R0=a, R1=q, R3:R2=v
128 // return value in R0
129 // just fall through to __e32_atomic_*_acq64
132 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
134 // R0=a, R1=q, R3:R2=v
135 // return value in R0
136 // just fall through to __e32_atomic_*_acq64
139 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
141 // R0=a, R1=q, R3:R2=v
142 // return value in R0
143 // just fall through to __e32_atomic_*_acq64
146 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
148 // R0=a, R1=q, R3:R2=v
149 // return value in R0
150 ENSURE_8BYTE_ALIGNMENT(0);
151 asm("stmfd sp!, {r0-r3} "); // iA=a, iQ=q, i1=v
153 __ATOMIC64_EXEC__(Cas); // returns result in R0
154 asm("add sp, sp, #16 ");
159 #elif defined(__OP_AXO__)
162 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
164 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int)
167 // R0=a, R3:R2=u, [SP+4,0]=v
168 // return value in R1:R0
169 // just fall through to __e32_atomic_*_acq64
173 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
175 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int)
178 // R0=a, R3:R2=u, [SP+4,0]=v
179 // return value in R1:R0
180 // just fall through to __e32_atomic_*_acq64
184 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
186 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int)
189 // R0=a, R3:R2=u, [SP+4,0]=v
190 // return value in R1:R0
191 // just fall through to __e32_atomic_*_acq64
195 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
197 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int)
200 // R0=a, R3:R2=u, [SP+4,0]=v
201 // return value in R1:R0
202 ENSURE_8BYTE_ALIGNMENT(0);
204 // i2 = XOR mask = v already on stack
205 asm("stmfd sp!, {r0-r3} "); // i1 = AND mask = u, iA = a
207 asm("stmfd sp!, {r1-r3} "); // i1 = AND mask = u, i2 = XOR mask = v (high word already on stack)
208 asm("stmfd sp!, {r0-r1} "); // iA = a, dummy word for i0 (unused)
211 __ATOMIC64_EXEC__(Axo);
212 asm("ldmia sp!, {r0-r1} ");
214 asm("add sp, sp, #8 ");
216 asm("add sp, sp, #12 ");
222 #elif defined(__OP_RMW3__)
225 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
227 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int)
230 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
231 // return value in R1:R0
232 // just fall through to __e32_atomic_*_acq64
236 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
238 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int)
241 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
242 // return value in R1:R0
243 // just fall through to __e32_atomic_*_acq64
247 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
249 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int)
252 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
253 // return value in R1:R0
254 // just fall through to __e32_atomic_*_acq64
258 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
260 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int)
263 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
264 // return value in R1:R0
265 ENSURE_8BYTE_ALIGNMENT(0);
267 // i3 = v already on stack
268 // i2 = u already on stack
269 asm("stmfd sp!, {r0-r3} "); // i1 = t, iA = a
271 // v and high word of u already on stack
272 asm("stmfd sp!, {r1-r3} "); // i1 = t, i2 = u (high word already on stack)
273 asm("stmfd sp!, {r0-r1} "); // iA = a, dummy word for i0 (unused)
276 #if defined(__OP_TAU__)
277 __ATOMIC64_EXEC__(Tau);
278 #elif defined(__OP_TAS__)
279 __ATOMIC64_EXEC__(Tas);
281 asm("ldmia sp!, {r0-r1} ");
283 asm("add sp, sp, #8 ");
285 asm("add sp, sp, #12 ");
292 // Second inclusion undefines temporaries
293 #include "atomic_ops.h"