os/kernelhwsrv/kernel/eka/common/arm/atomic_64_v6_v5.h
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/common/arm/atomic_64_v6_v5.h	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,315 @@
     1.4 +// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +// e32\common\arm\atomic_64_v6_v5.h
    1.18 +// Kernel-side 64 bit atomic operations on V6 or V5 processors
    1.19 +// WARNING: GCC98r2 doesn't align registers so 'v' ends up in R2:R1 not R3:R2
    1.20 +// 
    1.21 +//
    1.22 +
    1.23 +#include "atomic_ops.h"
    1.24 +
    1.25 +#ifdef __BARRIERS_NEEDED__
    1.26 +#error Barriers not supported on V6/V5, only V6K/V7
    1.27 +#endif
    1.28 +
    1.29 +#if defined(__OP_LOAD__)
    1.30 +extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_load_acq64(const volatile TAny* /*a*/)
    1.31 +	{
    1.32 +	// R0=a
    1.33 +	// return value in R1:R0
    1.34 +	ENSURE_8BYTE_ALIGNMENT(0);
    1.35 +	asm("ldmia r0, {r0-r1} ");
    1.36 +	__JUMP(,lr);
    1.37 +	}
    1.38 +
    1.39 +
    1.40 +
    1.41 +#elif defined(__OP_STORE__)
    1.42 +extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_store_rel64(volatile TAny* /*a*/, __TYPE__ /*v*/)
    1.43 +	{
    1.44 +	// R0=a, R3:R2=v
    1.45 +	// return value in R1:R0 equal to v
    1.46 +	// just fall through to __e32_atomic_store_ord64
    1.47 +	}
    1.48 +
    1.49 +extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_store_ord64(volatile TAny* /*a*/, __TYPE__ /*v*/)
    1.50 +	{
    1.51 +	// R0=a, R3:R2=v
    1.52 +	// return value in R1:R0 equal to v
    1.53 +	ENSURE_8BYTE_ALIGNMENT(0);
    1.54 +#ifdef __EABI__
    1.55 +	asm("stmia r0, {r2-r3} ");
    1.56 +	asm("mov r0, r2 ");
    1.57 +	asm("mov r1, r3 ");
    1.58 +#else
    1.59 +	asm("stmia r0, {r1-r2} ");
    1.60 +	asm("mov r0, r1 ");
    1.61 +	asm("mov r1, r2 ");
    1.62 +#endif
    1.63 +	__JUMP(,lr);
    1.64 +	}
    1.65 +
    1.66 +
    1.67 +#elif defined(__OP_RMW1__)
    1.68 +
    1.69 +#if defined(__OP_SWP__)
    1.70 +#define	__DO_PROCESSING__
    1.71 +#elif defined(__OP_ADD__)
    1.72 +#define	__DO_PROCESSING__	asm("adds r2, r2, r0 ");	asm("adcs r3, r3, r1 ");
    1.73 +#elif defined(__OP_AND__)
    1.74 +#define	__DO_PROCESSING__	asm("and r2, r2, r0 ");		asm("and r3, r3, r1 ");
    1.75 +#elif defined(__OP_IOR__)
    1.76 +#define	__DO_PROCESSING__	asm("orr r2, r2, r0 ");		asm("orr r3, r3, r1 ");
    1.77 +#elif defined(__OP_XOR__)
    1.78 +#define	__DO_PROCESSING__	asm("eor r2, r2, r0 ");		asm("eor r3, r3, r1 ");
    1.79 +#endif
    1.80 +
    1.81 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
    1.82 +	{
    1.83 +	// R0=a, R3:R2=v
    1.84 +	// return value in R1:R0
    1.85 +	// just fall through to __e32_atomic_*_acq64
    1.86 +	}
    1.87 +
    1.88 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
    1.89 +	{
    1.90 +	// R0=a, R3:R2=v
    1.91 +	// return value in R1:R0
    1.92 +	// just fall through to __e32_atomic_*_acq64
    1.93 +	}
    1.94 +
    1.95 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
    1.96 +	{
    1.97 +	// R0=a, R3:R2=v
    1.98 +	// return value in R1:R0
    1.99 +	// just fall through to __e32_atomic_*_acq64
   1.100 +	}
   1.101 +
   1.102 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
   1.103 +	{
   1.104 +	// R0=a, R3:R2=v
   1.105 +	// return value in R1:R0
   1.106 +
   1.107 +#ifndef __EABI__
   1.108 +	asm("mov r3, r2 ");
   1.109 +	asm("mov r2, r1 ");
   1.110 +#endif
   1.111 +	asm("mov r12, r0 ");
   1.112 +	ENSURE_8BYTE_ALIGNMENT(12);
   1.113 +	asm("str r4, [sp, #-4]! ");
   1.114 +	__DISABLE_INTERRUPTS__(r4,r1);
   1.115 +	asm("ldmia r12, {r0-r1} ");
   1.116 +	__DO_PROCESSING__
   1.117 +	asm("stmia r12, {r2-r3} ");
   1.118 +	__RESTORE_INTERRUPTS__(r4);
   1.119 +	asm("ldr r4, [sp], #4 ");
   1.120 +	__JUMP(,lr);
   1.121 +	}
   1.122 +
   1.123 +#undef __DO_PROCESSING__
   1.124 +
   1.125 +
   1.126 +#elif defined(__OP_CAS__)
   1.127 +
   1.128 +extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
   1.129 +	{
   1.130 +	// R0=a, R1=q, R3:R2=v
   1.131 +	// return value in R0
   1.132 +	// just fall through to __e32_atomic_*_acq64
   1.133 +	}
   1.134 +
   1.135 +extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
   1.136 +	{
   1.137 +	// R0=a, R1=q, R3:R2=v
   1.138 +	// return value in R0
   1.139 +	// just fall through to __e32_atomic_*_acq64
   1.140 +	}
   1.141 +
   1.142 +extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
   1.143 +	{
   1.144 +	// R0=a, R1=q, R3:R2=v
   1.145 +	// return value in R0
   1.146 +	// just fall through to __e32_atomic_*_acq64
   1.147 +	}
   1.148 +
   1.149 +extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
   1.150 +	{
   1.151 +	// R0=a, R1=q, R3:R2=v
   1.152 +	// return value in R0
   1.153 +	asm("stmfd sp!, {r4-r7} ");
   1.154 +	asm("ldmia r1, {r4-r5} ");
   1.155 +	ENSURE_8BYTE_ALIGNMENT(0);
   1.156 +	__DISABLE_INTERRUPTS__(r12,r6);
   1.157 +	asm("ldmia r0, {r6-r7} ");
   1.158 +	asm("cmp r6, r4 ");
   1.159 +	asm("cmpeq r7, r5 ");
   1.160 +	asm("stmeqia r0, {r2-r3} ");
   1.161 +	__RESTORE_INTERRUPTS__(r12);	// flags preserved
   1.162 +	asm("stmneia r1, {r6-r7} ");
   1.163 +	asm("ldmfd sp!, {r4-r7} ");
   1.164 +	asm("moveq r0, #1 ");
   1.165 +	asm("movne r0, #0 ");
   1.166 +	__JUMP(,lr);
   1.167 +	}
   1.168 +
   1.169 +
   1.170 +#elif defined(__OP_AXO__)
   1.171 +#ifdef __EABI__
   1.172 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   1.173 +#else
   1.174 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int)
   1.175 +#endif
   1.176 +	{
   1.177 +	// R0=a, R3:R2=u, [SP+4,0]=v
   1.178 +	// return value in R1:R0
   1.179 +	// just fall through to __e32_atomic_*_acq64
   1.180 +	}
   1.181 +
   1.182 +#ifdef __EABI__
   1.183 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   1.184 +#else
   1.185 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int)
   1.186 +#endif
   1.187 +	{
   1.188 +	// R0=a, R3:R2=u, [SP+4,0]=v
   1.189 +	// return value in R1:R0
   1.190 +	// just fall through to __e32_atomic_*_acq64
   1.191 +	}
   1.192 +
   1.193 +#ifdef __EABI__
   1.194 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   1.195 +#else
   1.196 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int)
   1.197 +#endif
   1.198 +	{
   1.199 +	// R0=a, R3:R2=u, [SP+4,0]=v
   1.200 +	// return value in R1:R0
   1.201 +	// just fall through to __e32_atomic_*_acq64
   1.202 +	}
   1.203 +
   1.204 +#ifdef __EABI__
   1.205 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   1.206 +#else
   1.207 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int)
   1.208 +#endif
   1.209 +	{
   1.210 +	// R0=a, R3:R2=u, [SP+4,0]=v
   1.211 +	// return value in R1:R0
   1.212 +#ifdef __EABI__
   1.213 +	asm("mov r1, sp ");
   1.214 +	asm("stmfd sp!, {r4-r6} ");
   1.215 +	asm("mov r12, r0 ");
   1.216 +	asm("ldmia r1, {r4-r5} ");
   1.217 +#else
   1.218 +	asm("stmfd sp!, {r4-r6} ");
   1.219 +	asm("mov r4, r3 ");
   1.220 +	asm("mov r3, r2 ");
   1.221 +	asm("mov r2, r1 ");
   1.222 +	asm("mov r12, r0 ");
   1.223 +	asm("ldr r5, [sp, #12] ");
   1.224 +#endif
   1.225 +	ENSURE_8BYTE_ALIGNMENT(12);
   1.226 +	__DISABLE_INTERRUPTS__(r6,r0);
   1.227 +	asm("ldmia r12, {r0-r1} ");
   1.228 +	asm("and r2, r2, r0 ");
   1.229 +	asm("and r3, r3, r1 ");
   1.230 +	asm("eor r2, r2, r4 ");
   1.231 +	asm("eor r3, r3, r5 ");
   1.232 +	asm("stmia r12, {r2-r3} ");
   1.233 +	__RESTORE_INTERRUPTS__(r6);	// flags preserved
   1.234 +	asm("ldmfd sp!, {r4-r6} ");
   1.235 +	__JUMP(,lr);
   1.236 +	}
   1.237 +
   1.238 +
   1.239 +#elif defined(__OP_RMW3__)
   1.240 +#ifdef __EABI__
   1.241 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   1.242 +#else
   1.243 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int)
   1.244 +#endif
   1.245 +	{
   1.246 +	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
   1.247 +	// return value in R1:R0
   1.248 +	// just fall through to __e32_atomic_*_acq64
   1.249 +	}
   1.250 +
   1.251 +#ifdef __EABI__
   1.252 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   1.253 +#else
   1.254 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int)
   1.255 +#endif
   1.256 +	{
   1.257 +	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
   1.258 +	// return value in R1:R0
   1.259 +	// just fall through to __e32_atomic_*_acq64
   1.260 +	}
   1.261 +
   1.262 +#ifdef __EABI__
   1.263 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   1.264 +#else
   1.265 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int)
   1.266 +#endif
   1.267 +	{
   1.268 +	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
   1.269 +	// return value in R1:R0
   1.270 +	// just fall through to __e32_atomic_*_acq64
   1.271 +	}
   1.272 +
   1.273 +#ifdef __EABI__
   1.274 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   1.275 +#else
   1.276 +extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int)
   1.277 +#endif
   1.278 +	{
   1.279 +	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
   1.280 +	// return value in R1:R0
   1.281 +#ifdef __EABI__
   1.282 +	asm("mov r1, sp ");
   1.283 +	asm("stmfd sp!, {r4-r8} ");
   1.284 +	asm("mov r12, r0 ");
   1.285 +	asm("ldmia r1, {r4-r7} ");
   1.286 +#else
   1.287 +	asm("mov r12, sp ");
   1.288 +	asm("stmfd sp!, {r4-r8} ");
   1.289 +	asm("mov r4, r3 ");
   1.290 +	asm("mov r3, r2 ");
   1.291 +	asm("mov r2, r1 ");
   1.292 +	asm("ldmia r12, {r5-r7} ");
   1.293 +	asm("mov r12, r0 ");
   1.294 +#endif
   1.295 +	ENSURE_8BYTE_ALIGNMENT(12);
   1.296 +	__DISABLE_INTERRUPTS__(r8,r0);
   1.297 +	asm("ldmia r12, {r0-r1} ");
   1.298 +	asm("subs r2, r0, r2 ");
   1.299 +	asm("sbcs r3, r1, r3 ");
   1.300 +#if	defined(__OP_TAU__)
   1.301 +	asm("movcc r4, r6 ");
   1.302 +	asm("movcc r5, r7 ");
   1.303 +#elif	defined(__OP_TAS__)
   1.304 +	asm("movlt r4, r6 ");
   1.305 +	asm("movlt r5, r7 ");
   1.306 +#endif
   1.307 +	asm("adds r2, r0, r4 ");
   1.308 +	asm("adcs r3, r1, r5 ");
   1.309 +	asm("stmia r12, {r2-r3} ");
   1.310 +	__RESTORE_INTERRUPTS__(r8);	// flags preserved
   1.311 +	asm("ldmfd sp!, {r4-r8} ");
   1.312 +	__JUMP(,lr);
   1.313 +	}
   1.314 +
   1.315 +#endif
   1.316 +
   1.317 +// Second inclusion undefines temporaries
   1.318 +#include "atomic_ops.h"