Update contrib.
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\common\arm\atomic_64_v6_v5.h
15 // Kernel-side 64 bit atomic operations on V6 or V5 processors
16 // WARNING: GCC98r2 doesn't align registers so 'v' ends up in R2:R1 not R3:R2
20 #include "atomic_ops.h"
22 #ifdef __BARRIERS_NEEDED__
23 #error Barriers not supported on V6/V5, only V6K/V7
26 #if defined(__OP_LOAD__)
27 extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_load_acq64(const volatile TAny* /*a*/)
30 // return value in R1:R0
31 ENSURE_8BYTE_ALIGNMENT(0);
32 asm("ldmia r0, {r0-r1} ");
38 #elif defined(__OP_STORE__)
39 extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_store_rel64(volatile TAny* /*a*/, __TYPE__ /*v*/)
42 // return value in R1:R0 equal to v
43 // just fall through to __e32_atomic_store_ord64
46 extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_store_ord64(volatile TAny* /*a*/, __TYPE__ /*v*/)
49 // return value in R1:R0 equal to v
50 ENSURE_8BYTE_ALIGNMENT(0);
52 asm("stmia r0, {r2-r3} ");
56 asm("stmia r0, {r1-r2} ");
64 #elif defined(__OP_RMW1__)
66 #if defined(__OP_SWP__)
67 #define __DO_PROCESSING__
68 #elif defined(__OP_ADD__)
69 #define __DO_PROCESSING__ asm("adds r2, r2, r0 "); asm("adcs r3, r3, r1 ");
70 #elif defined(__OP_AND__)
71 #define __DO_PROCESSING__ asm("and r2, r2, r0 "); asm("and r3, r3, r1 ");
72 #elif defined(__OP_IOR__)
73 #define __DO_PROCESSING__ asm("orr r2, r2, r0 "); asm("orr r3, r3, r1 ");
74 #elif defined(__OP_XOR__)
75 #define __DO_PROCESSING__ asm("eor r2, r2, r0 "); asm("eor r3, r3, r1 ");
78 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
81 // return value in R1:R0
82 // just fall through to __e32_atomic_*_acq64
85 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
88 // return value in R1:R0
89 // just fall through to __e32_atomic_*_acq64
92 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
95 // return value in R1:R0
96 // just fall through to __e32_atomic_*_acq64
99 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
102 // return value in R1:R0
109 ENSURE_8BYTE_ALIGNMENT(12);
110 asm("str r4, [sp, #-4]! ");
111 __DISABLE_INTERRUPTS__(r4,r1);
112 asm("ldmia r12, {r0-r1} ");
114 asm("stmia r12, {r2-r3} ");
115 __RESTORE_INTERRUPTS__(r4);
116 asm("ldr r4, [sp], #4 ");
120 #undef __DO_PROCESSING__
123 #elif defined(__OP_CAS__)
125 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
127 // R0=a, R1=q, R3:R2=v
128 // return value in R0
129 // just fall through to __e32_atomic_*_acq64
132 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
134 // R0=a, R1=q, R3:R2=v
135 // return value in R0
136 // just fall through to __e32_atomic_*_acq64
139 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
141 // R0=a, R1=q, R3:R2=v
142 // return value in R0
143 // just fall through to __e32_atomic_*_acq64
146 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
148 // R0=a, R1=q, R3:R2=v
149 // return value in R0
150 asm("stmfd sp!, {r4-r7} ");
151 asm("ldmia r1, {r4-r5} ");
152 ENSURE_8BYTE_ALIGNMENT(0);
153 __DISABLE_INTERRUPTS__(r12,r6);
154 asm("ldmia r0, {r6-r7} ");
156 asm("cmpeq r7, r5 ");
157 asm("stmeqia r0, {r2-r3} ");
158 __RESTORE_INTERRUPTS__(r12); // flags preserved
159 asm("stmneia r1, {r6-r7} ");
160 asm("ldmfd sp!, {r4-r7} ");
161 asm("moveq r0, #1 ");
162 asm("movne r0, #0 ");
167 #elif defined(__OP_AXO__)
169 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
171 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int)
174 // R0=a, R3:R2=u, [SP+4,0]=v
175 // return value in R1:R0
176 // just fall through to __e32_atomic_*_acq64
180 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
182 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int)
185 // R0=a, R3:R2=u, [SP+4,0]=v
186 // return value in R1:R0
187 // just fall through to __e32_atomic_*_acq64
191 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
193 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int)
196 // R0=a, R3:R2=u, [SP+4,0]=v
197 // return value in R1:R0
198 // just fall through to __e32_atomic_*_acq64
202 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
204 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int)
207 // R0=a, R3:R2=u, [SP+4,0]=v
208 // return value in R1:R0
211 asm("stmfd sp!, {r4-r6} ");
213 asm("ldmia r1, {r4-r5} ");
215 asm("stmfd sp!, {r4-r6} ");
220 asm("ldr r5, [sp, #12] ");
222 ENSURE_8BYTE_ALIGNMENT(12);
223 __DISABLE_INTERRUPTS__(r6,r0);
224 asm("ldmia r12, {r0-r1} ");
225 asm("and r2, r2, r0 ");
226 asm("and r3, r3, r1 ");
227 asm("eor r2, r2, r4 ");
228 asm("eor r3, r3, r5 ");
229 asm("stmia r12, {r2-r3} ");
230 __RESTORE_INTERRUPTS__(r6); // flags preserved
231 asm("ldmfd sp!, {r4-r6} ");
236 #elif defined(__OP_RMW3__)
238 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
240 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int)
243 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
244 // return value in R1:R0
245 // just fall through to __e32_atomic_*_acq64
249 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
251 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int)
254 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
255 // return value in R1:R0
256 // just fall through to __e32_atomic_*_acq64
260 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
262 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int)
265 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
266 // return value in R1:R0
267 // just fall through to __e32_atomic_*_acq64
271 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
273 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int)
276 // R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
277 // return value in R1:R0
280 asm("stmfd sp!, {r4-r8} ");
282 asm("ldmia r1, {r4-r7} ");
285 asm("stmfd sp!, {r4-r8} ");
289 asm("ldmia r12, {r5-r7} ");
292 ENSURE_8BYTE_ALIGNMENT(12);
293 __DISABLE_INTERRUPTS__(r8,r0);
294 asm("ldmia r12, {r0-r1} ");
295 asm("subs r2, r0, r2 ");
296 asm("sbcs r3, r1, r3 ");
297 #if defined(__OP_TAU__)
298 asm("movcc r4, r6 ");
299 asm("movcc r5, r7 ");
300 #elif defined(__OP_TAS__)
301 asm("movlt r4, r6 ");
302 asm("movlt r5, r7 ");
304 asm("adds r2, r0, r4 ");
305 asm("adcs r3, r1, r5 ");
306 asm("stmia r12, {r2-r3} ");
307 __RESTORE_INTERRUPTS__(r8); // flags preserved
308 asm("ldmfd sp!, {r4-r8} ");
314 // Second inclusion undefines temporaries
315 #include "atomic_ops.h"