1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/common/arm/atomic_8_16_32_exec.h Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,213 @@
1.4 +// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\common\arm\atomic_8_16_32_exec.h
1.18 +// 8, 16 and 32 bit user side atomic operations on V5 processors
1.19 +// These work by calling Exec functions.
1.20 +//
1.21 +//
1.22 +
1.23 +#include "atomic_ops.h"
1.24 +
1.25 +#ifdef __CPU_ARMV6
1.26 +#error 8/16/32 bit atomic operations on ARMv6 must use LDREX/STREX
1.27 +#else
1.28 +// Write paging not supported, so atomics can assume unpaged memory, so FAST exec OK
1.29 +
1.30 +#if __DATA_SIZE__==8
1.31 +#define __ATOMIC_EXEC__(op) FAST_EXEC1_NR(EFastExecFastAtomic##op##8)
1.32 +#elif __DATA_SIZE__==16
1.33 +#define __ATOMIC_EXEC__(op) FAST_EXEC1_NR(EFastExecFastAtomic##op##16)
1.34 +#elif __DATA_SIZE__==32
1.35 +#define __ATOMIC_EXEC__(op) FAST_EXEC1_NR(EFastExecFastAtomic##op##32)
1.36 +#else
1.37 +#error Invalid data size
1.38 +#endif
1.39 +
1.40 +#endif
1.41 +
1.42 +#ifdef __BARRIERS_NEEDED__
1.43 +#error Barriers not supported on V6/V5, only V6K/V7
1.44 +#endif
1.45 +
1.46 +#if defined(__OP_LOAD__)
1.47 +#error Load operation not defined
1.48 +
1.49 +#elif defined(__OP_STORE__)
1.50 +#error Store operation not defined
1.51 +
1.52 +#elif defined(__OP_RMW1__)
1.53 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/)
1.54 + {
1.55 + // R0=a, R1=v
1.56 + // return value in R0
1.57 + // just fall through
1.58 + }
1.59 +
1.60 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/)
1.61 + {
1.62 + // R0=a, R1=v
1.63 + // return value in R0
1.64 + // just fall through
1.65 + }
1.66 +
1.67 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/)
1.68 + {
1.69 + // R0=a, R1=v
1.70 + // return value in R0
1.71 + // just fall through
1.72 + }
1.73 +
1.74 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/)
1.75 + {
1.76 + // R0=a, R1=v
1.77 + // return value in R0
1.78 +#if defined(__OP_ADD__)
1.79 + asm("stmfd sp!, {r0-r1} "); // iA=a, i0=v, i1, i2 unused
1.80 + asm("mov r0, sp ");
1.81 + __ATOMIC_EXEC__(Add);
1.82 + asm("add sp, sp, #8 ");
1.83 +#else
1.84 +#if defined(__OP_SWP__)
1.85 + asm("mov r2, r1 "); // XOR mask = v
1.86 + asm("mov r1, #0 "); // AND mask = 0
1.87 +#elif defined(__OP_AND__)
1.88 + asm("mov r2, #0 "); // AND mask = v, XOR mask = 0
1.89 +#elif defined(__OP_IOR__)
1.90 + asm("mov r2, r1 "); // XOR mask = v
1.91 + asm("mvn r1, r1 "); // AND mask = ~v
1.92 +#elif defined(__OP_XOR__)
1.93 + asm("mov r2, r1 "); // XOR mask = v
1.94 + asm("mvn r1, #0 "); // AND mask = 0xFFFFFFFF
1.95 +#endif
1.96 + asm("stmfd sp!, {r0-r2} "); // iA=a, i0=AND mask, i1=XOR mask, i2 unused
1.97 + asm("mov r0, sp ");
1.98 + __ATOMIC_EXEC__(Axo);
1.99 + asm("add sp, sp, #12 ");
1.100 +#endif
1.101 + __JUMP(,lr);
1.102 + }
1.103 +
1.104 +
1.105 +#elif defined(__OP_CAS__)
1.106 +extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rel,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
1.107 + {
1.108 + // R0=a, R1=q, R2=v
1.109 + // return value in R0
1.110 + // just fall through
1.111 + }
1.112 +
1.113 +extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rlx,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
1.114 + {
1.115 + // R0=a, R1=q, R2=v
1.116 + // return value in R0
1.117 + // just fall through
1.118 + }
1.119 +
1.120 +extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,ord,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
1.121 + {
1.122 + // R0=a, R1=q, R2=v
1.123 + // return value in R0
1.124 + // just fall through
1.125 + }
1.126 +
1.127 +extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,acq,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
1.128 + {
1.129 + // R0=a, R1=q, R2=v
1.130 + // return value in R0
1.131 + asm("stmfd sp!, {r0-r2} "); // iA=a, iQ=q, i1=v, i2 unused
1.132 + asm("mov r0, sp ");
1.133 + __ATOMIC_EXEC__(Cas);
1.134 + asm("add sp, sp, #12 ");
1.135 + __JUMP(,lr);
1.136 + }
1.137 +
1.138 +
1.139 +#elif defined(__OP_AXO__)
1.140 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
1.141 + {
1.142 + // R0=a, R1=u, R2=v
1.143 + // return value in R0
1.144 + // just fall through
1.145 + }
1.146 +
1.147 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
1.148 + {
1.149 + // R0=a, R1=u, R2=v
1.150 + // return value in R0
1.151 + // just fall through
1.152 + }
1.153 +
1.154 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
1.155 + {
1.156 + // R0=a, R1=u, R2=v
1.157 + // return value in R0
1.158 + // just fall through
1.159 + }
1.160 +
1.161 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
1.162 + {
1.163 + // R0=a, R1=u, R2=v
1.164 + // return value in R0
1.165 + asm("stmfd sp!, {r0-r2} "); // iA=a, i0=u, i1=v, i2 unused
1.166 + asm("mov r0, sp ");
1.167 + __ATOMIC_EXEC__(Axo);
1.168 + asm("add sp, sp, #12 ");
1.169 + __JUMP(,lr);
1.170 + }
1.171 +
1.172 +
1.173 +#elif defined(__OP_RMW3__)
1.174 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
1.175 + {
1.176 + // R0=a, R1=t, R2=u, R3=v
1.177 + // return value in R0
1.178 + // just fall through
1.179 + }
1.180 +
1.181 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
1.182 + {
1.183 + // R0=a, R1=t, R2=u, R3=v
1.184 + // return value in R0
1.185 + // just fall through
1.186 + }
1.187 +
1.188 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
1.189 + {
1.190 + // R0=a, R1=t, R2=u, R3=v
1.191 + // return value in R0
1.192 + // just fall through
1.193 + }
1.194 +
1.195 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
1.196 + {
1.197 + // R0=a, R1=t, R2=u, R3=v
1.198 + // return value in R0
1.199 + asm("stmfd sp!, {r0-r3} "); // iA=a, i0=t, i1=u, i2=v
1.200 + asm("mov r0, sp ");
1.201 +#if defined(__OP_TAU__)
1.202 + __ATOMIC_EXEC__(Tau);
1.203 +#elif defined(__OP_TAS__)
1.204 + __ATOMIC_EXEC__(Tas);
1.205 +#endif
1.206 + asm("add sp, sp, #16 ");
1.207 + __JUMP(,lr);
1.208 + }
1.209 +
1.210 +
1.211 +#endif
1.212 +
1.213 +#undef __ATOMIC_EXEC__
1.214 +
1.215 +// Second inclusion undefines temporaries
1.216 +#include "atomic_ops.h"