os/kernelhwsrv/kernel/eka/common/arm/atomic_64_v6k.h
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 // e32\common\arm\atomic_64_v6k.h
    15 // 64 bit atomic operations on V6K processors
    16 // 
    17 //
    18 
    19 #include "atomic_ops.h"
    20 
    21 #if defined(__OP_LOAD__)
    22 extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_load_acq64(const volatile TAny* /*a*/)
    23 	{
    24 	// R0=a
    25 	// return value in R1:R0
    26 
    27 	asm("mov r2, r0 ");
    28 	ENSURE_8BYTE_ALIGNMENT(2);
    29 	asm("1: ");
    30 	LDREXD(0,2);						// R1:R0 = oldv
    31 	STREXD(3,0,2);						// try to write back, R3=0 if success
    32 	asm("cmp r3, #0 ");
    33 	asm("bne 1b ");						// failed - retry
    34 	__LOCAL_DATA_MEMORY_BARRIER__(r3);
    35 	__JUMP(,lr);
    36 	}
    37 
    38 
    39 
    40 #elif defined(__OP_STORE__)
    41 
    42 #define	__DO_STORE__		\
    43 	asm("mov r12, r0 ");	\
    44 	asm("1: ");				\
    45 	LDREXD(0,12);			\
    46 	STREXD(1,2,12);			\
    47 	asm("cmp r1, #0 ");		\
    48 	asm("bne 1b ");
    49 
    50 
    51 extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_store_rel64(volatile TAny* /*a*/, __TYPE__ /*v*/)
    52 	{
    53 #ifdef __BARRIERS_NEEDED__				// If no barriers, just fall through to __e32_atomic_store_ord64
    54 #ifndef __EABI__
    55 	asm("mov r3, r2 ");
    56 	asm("mov r2, r1 ");
    57 #endif
    58 	// R0=a, R3:R2=v
    59 	// return value in R1:R0 equal to v
    60 	ENSURE_8BYTE_ALIGNMENT(0);
    61 	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
    62 	__DO_STORE__
    63 	asm("mov r0, r2 ");
    64 	asm("mov r1, r3 ");
    65 	__JUMP(,lr);
    66 #endif
    67 	}
    68 
    69 extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_store_ord64(volatile TAny* /*a*/, __TYPE__ /*v*/)
    70 	{
    71 	// R0=a, R3:R2=v
    72 	// return value in R1:R0 equal to v
    73 #ifndef __EABI__
    74 	asm("mov r3, r2 ");
    75 	asm("mov r2, r1 ");
    76 #endif
    77 	ENSURE_8BYTE_ALIGNMENT(0);
    78 	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
    79 	__DO_STORE__
    80 	__LOCAL_DATA_MEMORY_BARRIER__(r1);
    81 	asm("mov r0, r2 ");
    82 	asm("mov r1, r3 ");
    83 	__JUMP(,lr);
    84 	}
    85 
    86 #undef __DO_STORE__
    87 
    88 
    89 #elif defined(__OP_RMW1__)
    90 
    91 #ifdef __OP_SWP__
    92 #define	__SAVE_REGS__		asm("str r6, [sp, #-4]! ");
    93 #define	__RESTORE_REGS__	asm("ldr r6, [sp], #4 ");
    94 #define	__SOURCE_REG__		2
    95 #define	__DO_PROCESSING__
    96 #else
    97 #define	__SAVE_REGS__		asm("stmfd sp!, {r4-r6} ");
    98 #define	__RESTORE_REGS__	asm("ldmfd sp!, {r4-r6} ");
    99 #define	__SOURCE_REG__		4
   100 #if defined(__OP_ADD__)
   101 #define	__DO_PROCESSING__	asm("adds r4, r0, r2 ");	asm("adcs r5, r1, r3 ");
   102 #elif defined(__OP_AND__)
   103 #define	__DO_PROCESSING__	asm("and r4, r0, r2 ");		asm("and r5, r1, r3 ");
   104 #elif defined(__OP_IOR__)
   105 #define	__DO_PROCESSING__	asm("orr r4, r0, r2 ");		asm("orr r5, r1, r3 ");
   106 #elif defined(__OP_XOR__)
   107 #define	__DO_PROCESSING__	asm("eor r4, r0, r2 ");		asm("eor r5, r1, r3 ");
   108 #endif
   109 #endif
   110 
   111 #define __DO_RMW1_OP__				\
   112 	asm("mov r12, r0 ");			\
   113 	ENSURE_8BYTE_ALIGNMENT(0);		\
   114 	asm("1: ");						\
   115 	LDREXD(0,12);					\
   116 	__DO_PROCESSING__				\
   117 	STREXD(6,__SOURCE_REG__,12);	\
   118 	asm("cmp r6, #0 ");				\
   119 	asm("bne 1b ");
   120 
   121 
   122 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
   123 	{
   124 	// R0=a, R3:R2=v
   125 	// return value in R1:R0
   126 #ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
   127 	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
   128 #endif
   129 	}
   130 
   131 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
   132 	{
   133 	// R0=a, R3:R2=v
   134 	// return value in R1:R0
   135 #ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
   136 #ifndef __EABI__
   137 	asm("mov r3, r2 ");
   138 	asm("mov r2, r1 ");
   139 #endif
   140 	__SAVE_REGS__
   141 	__DO_RMW1_OP__
   142 	__RESTORE_REGS__
   143 	__JUMP(,lr);
   144 #endif
   145 	}
   146 
   147 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
   148 	{
   149 	// R0=a, R3:R2=v
   150 	// return value in R1:R0
   151 #ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
   152 	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
   153 #endif
   154 	}
   155 
   156 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
   157 	{
   158 	// R0=a, R3:R2=v
   159 	// return value in R1:R0
   160 #ifndef __EABI__
   161 	asm("mov r3, r2 ");
   162 	asm("mov r2, r1 ");
   163 #endif
   164 	__SAVE_REGS__
   165 	__DO_RMW1_OP__
   166 	__LOCAL_DATA_MEMORY_BARRIER__(r6);
   167 	__RESTORE_REGS__
   168 	__JUMP(,lr);
   169 	}
   170 
   171 #undef __SAVE_REGS__
   172 #undef __RESTORE_REGS__
   173 #undef __DO_RMW1_OP__
   174 #undef __SOURCE_REG__
   175 #undef __DO_PROCESSING__
   176 
   177 
   178 #elif defined(__OP_CAS__)
   179 
   180 #define	__SAVE_REGS__		asm("stmfd sp!, {r4-r7} ");
   181 #define	__RESTORE_REGS__	asm("ldmfd sp!, {r4-r7} ");
   182 
   183 #define __DO_CAS_OP__				\
   184 	asm("ldmia r1, {r6-r7} ");		\
   185 	ENSURE_8BYTE_ALIGNMENT(0);		\
   186 	asm("1: ");						\
   187 	LDREXD(4,0);					\
   188 	asm("cmp r4, r6 ");				\
   189 	asm("cmpeq r5, r7 ");			\
   190 	asm("bne 2f ");					\
   191 	STREXD(12,2,0);					\
   192 	asm("cmp r12, #0 ");			\
   193 	asm("bne 1b ");					\
   194 	asm("2: ");						\
   195 	asm("stmneia r1, {r4-r5} ");	\
   196 	asm("movne r0, #0 ");			\
   197 	asm("moveq r0, #1 ");
   198 
   199 
   200 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
   201 	{
   202 	// R0=a, R1=q, R3:R2=v
   203 	// return value in R0
   204 #ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
   205 	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
   206 #endif
   207 	}
   208 
   209 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
   210 	{
   211 	// R0=a, R1=q, R3:R2=v
   212 	// return value in R0
   213 #ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
   214 	__SAVE_REGS__
   215 	__DO_CAS_OP__
   216 	__RESTORE_REGS__
   217 	__JUMP(,lr);
   218 #endif
   219 	}
   220 
   221 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
   222 	{
   223 	// R0=a, R1=q, R3:R2=v
   224 	// return value in R0
   225 #ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
   226 	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
   227 #endif
   228 	}
   229 
   230 extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
   231 	{
   232 	// R0=a, R1=q, R3:R2=v
   233 	// return value in R0
   234 	__SAVE_REGS__
   235 	__DO_CAS_OP__
   236 	__LOCAL_DATA_MEMORY_BARRIER__(r12);
   237 	__RESTORE_REGS__
   238 	__JUMP(,lr);
   239 	}
   240 
   241 #undef __SAVE_REGS__
   242 #undef __RESTORE_REGS__
   243 #undef __DO_CAS_OP__
   244 
   245 
   246 
   247 #elif defined(__OP_AXO__)
   248 
   249 #ifdef __EABI__
   250 #define	__SAVE_REGS__		asm("mov r1, sp "); asm("stmfd sp!, {r4-r8} ");
   251 #define	__RESTORE_REGS__	asm("ldmfd sp!, {r4-r8} ");
   252 #else
   253 #define	__SAVE_REGS__		asm("str r3, [sp, #-4]! "); asm("mov r3, r2 "); asm("mov r2, r1 "); asm("mov r1, sp "); asm("stmfd sp!, {r4-r8} ");
   254 #define	__RESTORE_REGS__	asm("ldmfd sp!, {r4-r8,r12} ");
   255 #endif
   256 
   257 #define __DO_AXO_OP__				\
   258 	asm("ldmia r1, {r4-r5} ");		\
   259 	asm("mov r12, r0 ");			\
   260 	ENSURE_8BYTE_ALIGNMENT(0);		\
   261 	asm("1: ");						\
   262 	LDREXD(0,12);					\
   263 	asm("and r6, r0, r2 ");			\
   264 	asm("and r7, r1, r3 ");			\
   265 	asm("eor r6, r6, r4 ");			\
   266 	asm("eor r7, r7, r5 ");			\
   267 	STREXD(8,6,12);					\
   268 	asm("cmp r8, #0 ");				\
   269 	asm("bne 1b ");
   270 
   271 
   272 #ifdef __EABI__
   273 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   274 #else
   275 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int)
   276 #endif
   277 	{
   278 	// R0=a, R3:R2=u, [SP+4,0]=v
   279 	// return value in R1:R0
   280 #ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
   281 	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
   282 #endif
   283 	}
   284 
   285 #ifdef __EABI__
   286 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   287 #else
   288 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int)
   289 #endif
   290 	{
   291 	// R0=a, R3:R2=u, [SP+4,0]=v
   292 	// return value in R1:R0
   293 #ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
   294 	__SAVE_REGS__
   295 	__DO_AXO_OP__
   296 	__RESTORE_REGS__
   297 	__JUMP(,lr);
   298 #endif
   299 	}
   300 
   301 #ifdef __EABI__
   302 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   303 #else
   304 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int)
   305 #endif
   306 	{
   307 	// R0=a, R3:R2=u, [SP+4,0]=v
   308 	// return value in R1:R0
   309 #ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
   310 	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
   311 #endif
   312 	}
   313 
   314 #ifdef __EABI__
   315 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   316 #else
   317 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int)
   318 #endif
   319 	{
   320 	// R0=a, R3:R2=u, [SP+4,0]=v
   321 	// return value in R1:R0
   322 	__SAVE_REGS__
   323 	__DO_AXO_OP__
   324 	__LOCAL_DATA_MEMORY_BARRIER__(r8);
   325 	__RESTORE_REGS__
   326 	__JUMP(,lr);
   327 	}
   328 
   329 #undef __SAVE_REGS__
   330 #undef __RESTORE_REGS__
   331 #undef __DO_AXO_OP__
   332 
   333 
   334 #elif defined(__OP_RMW3__)
   335 
   336 #ifdef __EABI__
   337 #define	__SAVE_REGS__		asm("mov r1, sp "); asm("stmfd sp!, {r4-r10} ");
   338 #define	__RESTORE_REGS__	asm("ldmfd sp!, {r4-r10} ");
   339 #else
   340 #define	__SAVE_REGS__		asm("str r3, [sp, #-4]! "); asm("mov r3, r2 "); asm("mov r2, r1 "); asm("mov r1, sp "); asm("stmfd sp!, {r4-r10} ");
   341 #define	__RESTORE_REGS__	asm("ldmfd sp!, {r4-r10,r12} ");
   342 #endif
   343 
   344 #if	defined(__OP_TAU__)
   345 #define	__COND_GE__		"cs"
   346 #define	__COND_LT__		"cc"
   347 #elif	defined(__OP_TAS__)
   348 #define	__COND_GE__		"ge"
   349 #define	__COND_LT__		"lt"
   350 #endif
   351 
   352 #define __DO_RMW3_OP__				\
   353 	asm("ldmia r1, {r4-r7} ");		\
   354 	asm("mov r12, r0 ");			\
   355 	ENSURE_8BYTE_ALIGNMENT(0);		\
   356 	asm("1: ");						\
   357 	LDREXD(0,12);					\
   358 	asm("subs r8, r0, r2 ");		\
   359 	asm("sbcs r9, r1, r3 ");		\
   360 	asm("mov" __COND_GE__ " r8, r4 ");	\
   361 	asm("mov" __COND_GE__ " r9, r5 ");	\
   362 	asm("mov" __COND_LT__ " r8, r6 ");	\
   363 	asm("mov" __COND_LT__ " r9, r7 ");	\
   364 	asm("adds r8, r8, r0 ");		\
   365 	asm("adcs r9, r9, r1 ");		\
   366 	STREXD(10,8,12);				\
   367 	asm("cmp r10, #0 ");			\
   368 	asm("bne 1b ");
   369 
   370 
   371 #ifdef __EABI__
   372 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   373 #else
   374 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int)
   375 #endif
   376 	{
   377 	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
   378 	// return value in R1:R0
   379 #ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
   380 	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
   381 #endif
   382 	}
   383 
   384 #ifdef __EABI__
   385 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   386 #else
   387 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int)
   388 #endif
   389 	{
   390 	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
   391 	// return value in R1:R0
   392 #ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
   393 	__SAVE_REGS__
   394 	__DO_RMW3_OP__
   395 	__RESTORE_REGS__
   396 	__JUMP(,lr);
   397 #endif
   398 	}
   399 
   400 #ifdef __EABI__
   401 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   402 #else
   403 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int)
   404 #endif
   405 	{
   406 	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
   407 	// return value in R1:R0
   408 #ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
   409 	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
   410 #endif
   411 	}
   412 
   413 #ifdef __EABI__
   414 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
   415 #else
   416 extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int)
   417 #endif
   418 	{
   419 	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
   420 	// return value in R1:R0
   421 	__SAVE_REGS__
   422 	__DO_RMW3_OP__
   423 	__LOCAL_DATA_MEMORY_BARRIER__(r10);
   424 	__RESTORE_REGS__
   425 	__JUMP(,lr);
   426 	}
   427 
   428 #undef __SAVE_REGS__
   429 #undef __RESTORE_REGS__
   430 #undef __DO_RMW3_OP__
   431 #undef __COND_GE__
   432 #undef __COND_LT__
   433 
   434 
   435 #endif
   436 
   437 // Second inclusion undefines temporaries
   438 #include "atomic_ops.h"