os/kernelhwsrv/kernel/eka/common/arm/atomic_64_v6_v5.h
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 // e32\common\arm\atomic_64_v6_v5.h
    15 // Kernel-side 64 bit atomic operations on V6 or V5 processors
    16 // WARNING: GCC98r2 doesn't align registers so 'v' ends up in R2:R1 not R3:R2
    17 // 
    18 //
    19 
    20 #include "atomic_ops.h"
    21 
    22 #ifdef __BARRIERS_NEEDED__
    23 #error Barriers not supported on V6/V5, only V6K/V7
    24 #endif
    25 
    26 #if defined(__OP_LOAD__)
    27 extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_load_acq64(const volatile TAny* /*a*/)
    28 	{
    29 	// R0=a
    30 	// return value in R1:R0
    31 	ENSURE_8BYTE_ALIGNMENT(0);
    32 	asm("ldmia r0, {r0-r1} ");
    33 	__JUMP(,lr);
    34 	}
    35 
    36 
    37 
    38 #elif defined(__OP_STORE__)
    39 extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_store_rel64(volatile TAny* /*a*/, __TYPE__ /*v*/)
    40 	{
    41 	// R0=a, R3:R2=v
    42 	// return value in R1:R0 equal to v
    43 	// just fall through to __e32_atomic_store_ord64
    44 	}
    45 
    46 extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_store_ord64(volatile TAny* /*a*/, __TYPE__ /*v*/)
    47 	{
    48 	// R0=a, R3:R2=v
    49 	// return value in R1:R0 equal to v
    50 	ENSURE_8BYTE_ALIGNMENT(0);
    51 #ifdef __EABI__
    52 	asm("stmia r0, {r2-r3} ");
    53 	asm("mov r0, r2 ");
    54 	asm("mov r1, r3 ");
    55 #else
    56 	asm("stmia r0, {r1-r2} ");
    57 	asm("mov r0, r1 ");
    58 	asm("mov r1, r2 ");
    59 #endif
    60 	__JUMP(,lr);
    61 	}
    62 
    63 
    64 #elif defined(__OP_RMW1__)
    65 
    66 #if defined(__OP_SWP__)
    67 #define	__DO_PROCESSING__
    68 #elif defined(__OP_ADD__)
    69 #define	__DO_PROCESSING__	asm("adds r2, r2, r0 ");	asm("adcs r3, r3, r1 ");
    70 #elif defined(__OP_AND__)
    71 #define	__DO_PROCESSING__	asm("and r2, r2, r0 ");		asm("and r3, r3, r1 ");
    72 #elif defined(__OP_IOR__)
    73 #define	__DO_PROCESSING__	asm("orr r2, r2, r0 ");		asm("orr r3, r3, r1 ");
    74 #elif defined(__OP_XOR__)
    75 #define	__DO_PROCESSING__	asm("eor r2, r2, r0 ");		asm("eor r3, r3, r1 ");
    76 #endif
    77 
    78 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
    79 	{
    80 	// R0=a, R3:R2=v
    81 	// return value in R1:R0
    82 	// just fall through to __e32_atomic_*_acq64
    83 	}
    84 
    85 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
    86 	{
    87 	// R0=a, R3:R2=v
    88 	// return value in R1:R0
    89 	// just fall through to __e32_atomic_*_acq64
    90 	}
    91 
    92 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
    93 	{
    94 	// R0=a, R3:R2=v
    95 	// return value in R1:R0
    96 	// just fall through to __e32_atomic_*_acq64
    97 	}
    98 
    99 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
   100 	{
   101 	// R0=a, R3:R2=v
   102 	// return value in R1:R0
   103 
   104 #ifndef __EABI__
   105 	asm("mov r3, r2 ");
   106 	asm("mov r2, r1 ");
   107 #endif
   108 	asm("mov r12, r0 ");
   109 	ENSURE_8BYTE_ALIGNMENT(12);
   110 	asm("str r4, [sp, #-4]! ");
   111 	__DISABLE_INTERRUPTS__(r4,r1);
   112 	asm("ldmia r12, {r0-r1} ");
   113 	__DO_PROCESSING__
   114 	asm("stmia r12, {r2-r3} ");
   115 	__RESTORE_INTERRUPTS__(r4);
   116 	asm("ldr r4, [sp], #4 ");
   117 	__JUMP(,lr);
   118 	}
   119 
   120 #undef __DO_PROCESSING__
   121 
   122 
   123 #elif defined(__OP_CAS__)
   124 
   125 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
   126 	{
   127 	// R0=a, R1=q, R3:R2=v
   128 	// return value in R0
   129 	// just fall through to __e32_atomic_*_acq64
   130 	}
   131 
   132 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
   133 	{
   134 	// R0=a, R1=q, R3:R2=v
   135 	// return value in R0
   136 	// just fall through to __e32_atomic_*_acq64
   137 	}
   138 
   139 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
   140 	{
   141 	// R0=a, R1=q, R3:R2=v
   142 	// return value in R0
   143 	// just fall through to __e32_atomic_*_acq64
   144 	}
   145 
   146 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
   147 	{
   148 	// R0=a, R1=q, R3:R2=v
   149 	// return value in R0
   150 	asm("stmfd sp!, {r4-r7} ");
   151 	asm("ldmia r1, {r4-r5} ");
   152 	ENSURE_8BYTE_ALIGNMENT(0);
   153 	__DISABLE_INTERRUPTS__(r12,r6);
   154 	asm("ldmia r0, {r6-r7} ");
   155 	asm("cmp r6, r4 ");
   156 	asm("cmpeq r7, r5 ");
   157 	asm("stmeqia r0, {r2-r3} ");
   158 	__RESTORE_INTERRUPTS__(r12);	// flags preserved
   159 	asm("stmneia r1, {r6-r7} ");
   160 	asm("ldmfd sp!, {r4-r7} ");
   161 	asm("moveq r0, #1 ");
   162 	asm("movne r0, #0 ");
   163 	__JUMP(,lr);
   164 	}
   165 
   166 
   167 #elif defined(__OP_AXO__)
   168 #ifdef __EABI__
   169 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   170 #else
   171 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int)
   172 #endif
   173 	{
   174 	// R0=a, R3:R2=u, [SP+4,0]=v
   175 	// return value in R1:R0
   176 	// just fall through to __e32_atomic_*_acq64
   177 	}
   178 
   179 #ifdef __EABI__
   180 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   181 #else
   182 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int)
   183 #endif
   184 	{
   185 	// R0=a, R3:R2=u, [SP+4,0]=v
   186 	// return value in R1:R0
   187 	// just fall through to __e32_atomic_*_acq64
   188 	}
   189 
   190 #ifdef __EABI__
   191 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   192 #else
   193 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int)
   194 #endif
   195 	{
   196 	// R0=a, R3:R2=u, [SP+4,0]=v
   197 	// return value in R1:R0
   198 	// just fall through to __e32_atomic_*_acq64
   199 	}
   200 
   201 #ifdef __EABI__
   202 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   203 #else
   204 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int)
   205 #endif
   206 	{
   207 	// R0=a, R3:R2=u, [SP+4,0]=v
   208 	// return value in R1:R0
   209 #ifdef __EABI__
   210 	asm("mov r1, sp ");
   211 	asm("stmfd sp!, {r4-r6} ");
   212 	asm("mov r12, r0 ");
   213 	asm("ldmia r1, {r4-r5} ");
   214 #else
   215 	asm("stmfd sp!, {r4-r6} ");
   216 	asm("mov r4, r3 ");
   217 	asm("mov r3, r2 ");
   218 	asm("mov r2, r1 ");
   219 	asm("mov r12, r0 ");
   220 	asm("ldr r5, [sp, #12] ");
   221 #endif
   222 	ENSURE_8BYTE_ALIGNMENT(12);
   223 	__DISABLE_INTERRUPTS__(r6,r0);
   224 	asm("ldmia r12, {r0-r1} ");
   225 	asm("and r2, r2, r0 ");
   226 	asm("and r3, r3, r1 ");
   227 	asm("eor r2, r2, r4 ");
   228 	asm("eor r3, r3, r5 ");
   229 	asm("stmia r12, {r2-r3} ");
   230 	__RESTORE_INTERRUPTS__(r6);	// flags preserved
   231 	asm("ldmfd sp!, {r4-r6} ");
   232 	__JUMP(,lr);
   233 	}
   234 
   235 
   236 #elif defined(__OP_RMW3__)
   237 #ifdef __EABI__
   238 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   239 #else
   240 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int)
   241 #endif
   242 	{
   243 	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
   244 	// return value in R1:R0
   245 	// just fall through to __e32_atomic_*_acq64
   246 	}
   247 
   248 #ifdef __EABI__
   249 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   250 #else
   251 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int)
   252 #endif
   253 	{
   254 	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
   255 	// return value in R1:R0
   256 	// just fall through to __e32_atomic_*_acq64
   257 	}
   258 
   259 #ifdef __EABI__
   260 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   261 #else
   262 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int)
   263 #endif
   264 	{
   265 	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
   266 	// return value in R1:R0
   267 	// just fall through to __e32_atomic_*_acq64
   268 	}
   269 
   270 #ifdef __EABI__
   271 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   272 #else
   273 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int)
   274 #endif
   275 	{
   276 	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
   277 	// return value in R1:R0
   278 #ifdef __EABI__
   279 	asm("mov r1, sp ");
   280 	asm("stmfd sp!, {r4-r8} ");
   281 	asm("mov r12, r0 ");
   282 	asm("ldmia r1, {r4-r7} ");
   283 #else
   284 	asm("mov r12, sp ");
   285 	asm("stmfd sp!, {r4-r8} ");
   286 	asm("mov r4, r3 ");
   287 	asm("mov r3, r2 ");
   288 	asm("mov r2, r1 ");
   289 	asm("ldmia r12, {r5-r7} ");
   290 	asm("mov r12, r0 ");
   291 #endif
   292 	ENSURE_8BYTE_ALIGNMENT(12);
   293 	__DISABLE_INTERRUPTS__(r8,r0);
   294 	asm("ldmia r12, {r0-r1} ");
   295 	asm("subs r2, r0, r2 ");
   296 	asm("sbcs r3, r1, r3 ");
   297 #if	defined(__OP_TAU__)
   298 	asm("movcc r4, r6 ");
   299 	asm("movcc r5, r7 ");
   300 #elif	defined(__OP_TAS__)
   301 	asm("movlt r4, r6 ");
   302 	asm("movlt r5, r7 ");
   303 #endif
   304 	asm("adds r2, r0, r4 ");
   305 	asm("adcs r3, r1, r5 ");
   306 	asm("stmia r12, {r2-r3} ");
   307 	__RESTORE_INTERRUPTS__(r8);	// flags preserved
   308 	asm("ldmfd sp!, {r4-r8} ");
   309 	__JUMP(,lr);
   310 	}
   311 
   312 #endif
   313 
   314 // Second inclusion undefines temporaries
   315 #include "atomic_ops.h"