os/kernelhwsrv/kernel/eka/common/arm/atomic_64_v6k.h
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\common\arm\atomic_64_v6k.h
sl@0
    15
// 64 bit atomic operations on V6K processors
sl@0
    16
// 
sl@0
    17
//
sl@0
    18
sl@0
    19
#include "atomic_ops.h"
sl@0
    20
sl@0
    21
#if defined(__OP_LOAD__)
sl@0
    22
extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_load_acq64(const volatile TAny* /*a*/)
sl@0
    23
	{
sl@0
    24
	// R0=a
sl@0
    25
	// return value in R1:R0
sl@0
    26
sl@0
    27
	asm("mov r2, r0 ");
sl@0
    28
	ENSURE_8BYTE_ALIGNMENT(2);
sl@0
    29
	asm("1: ");
sl@0
    30
	LDREXD(0,2);						// R1:R0 = oldv
sl@0
    31
	STREXD(3,0,2);						// try to write back, R3=0 if success
sl@0
    32
	asm("cmp r3, #0 ");
sl@0
    33
	asm("bne 1b ");						// failed - retry
sl@0
    34
	__LOCAL_DATA_MEMORY_BARRIER__(r3);
sl@0
    35
	__JUMP(,lr);
sl@0
    36
	}
sl@0
    37
sl@0
    38
sl@0
    39
sl@0
    40
#elif defined(__OP_STORE__)
sl@0
    41
sl@0
    42
#define	__DO_STORE__		\
sl@0
    43
	asm("mov r12, r0 ");	\
sl@0
    44
	asm("1: ");				\
sl@0
    45
	LDREXD(0,12);			\
sl@0
    46
	STREXD(1,2,12);			\
sl@0
    47
	asm("cmp r1, #0 ");		\
sl@0
    48
	asm("bne 1b ");
sl@0
    49
sl@0
    50
sl@0
    51
extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_store_rel64(volatile TAny* /*a*/, __TYPE__ /*v*/)
sl@0
    52
	{
sl@0
    53
#ifdef __BARRIERS_NEEDED__				// If no barriers, just fall through to __e32_atomic_store_ord64
sl@0
    54
#ifndef __EABI__
sl@0
    55
	asm("mov r3, r2 ");
sl@0
    56
	asm("mov r2, r1 ");
sl@0
    57
#endif
sl@0
    58
	// R0=a, R3:R2=v
sl@0
    59
	// return value in R1:R0 equal to v
sl@0
    60
	ENSURE_8BYTE_ALIGNMENT(0);
sl@0
    61
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
    62
	__DO_STORE__
sl@0
    63
	asm("mov r0, r2 ");
sl@0
    64
	asm("mov r1, r3 ");
sl@0
    65
	__JUMP(,lr);
sl@0
    66
#endif
sl@0
    67
	}
sl@0
    68
sl@0
    69
extern "C" EXPORT_C __NAKED__ __TYPE__ __e32_atomic_store_ord64(volatile TAny* /*a*/, __TYPE__ /*v*/)
sl@0
    70
	{
sl@0
    71
	// R0=a, R3:R2=v
sl@0
    72
	// return value in R1:R0 equal to v
sl@0
    73
#ifndef __EABI__
sl@0
    74
	asm("mov r3, r2 ");
sl@0
    75
	asm("mov r2, r1 ");
sl@0
    76
#endif
sl@0
    77
	ENSURE_8BYTE_ALIGNMENT(0);
sl@0
    78
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
    79
	__DO_STORE__
sl@0
    80
	__LOCAL_DATA_MEMORY_BARRIER__(r1);
sl@0
    81
	asm("mov r0, r2 ");
sl@0
    82
	asm("mov r1, r3 ");
sl@0
    83
	__JUMP(,lr);
sl@0
    84
	}
sl@0
    85
sl@0
    86
#undef __DO_STORE__
sl@0
    87
sl@0
    88
sl@0
    89
#elif defined(__OP_RMW1__)
sl@0
    90
sl@0
    91
#ifdef __OP_SWP__
sl@0
    92
#define	__SAVE_REGS__		asm("str r6, [sp, #-4]! ");
sl@0
    93
#define	__RESTORE_REGS__	asm("ldr r6, [sp], #4 ");
sl@0
    94
#define	__SOURCE_REG__		2
sl@0
    95
#define	__DO_PROCESSING__
sl@0
    96
#else
sl@0
    97
#define	__SAVE_REGS__		asm("stmfd sp!, {r4-r6} ");
sl@0
    98
#define	__RESTORE_REGS__	asm("ldmfd sp!, {r4-r6} ");
sl@0
    99
#define	__SOURCE_REG__		4
sl@0
   100
#if defined(__OP_ADD__)
sl@0
   101
#define	__DO_PROCESSING__	asm("adds r4, r0, r2 ");	asm("adcs r5, r1, r3 ");
sl@0
   102
#elif defined(__OP_AND__)
sl@0
   103
#define	__DO_PROCESSING__	asm("and r4, r0, r2 ");		asm("and r5, r1, r3 ");
sl@0
   104
#elif defined(__OP_IOR__)
sl@0
   105
#define	__DO_PROCESSING__	asm("orr r4, r0, r2 ");		asm("orr r5, r1, r3 ");
sl@0
   106
#elif defined(__OP_XOR__)
sl@0
   107
#define	__DO_PROCESSING__	asm("eor r4, r0, r2 ");		asm("eor r5, r1, r3 ");
sl@0
   108
#endif
sl@0
   109
#endif
sl@0
   110
sl@0
   111
#define __DO_RMW1_OP__				\
sl@0
   112
	asm("mov r12, r0 ");			\
sl@0
   113
	ENSURE_8BYTE_ALIGNMENT(0);		\
sl@0
   114
	asm("1: ");						\
sl@0
   115
	LDREXD(0,12);					\
sl@0
   116
	__DO_PROCESSING__				\
sl@0
   117
	STREXD(6,__SOURCE_REG__,12);	\
sl@0
   118
	asm("cmp r6, #0 ");				\
sl@0
   119
	asm("bne 1b ");
sl@0
   120
sl@0
   121
sl@0
   122
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
sl@0
   123
	{
sl@0
   124
	// R0=a, R3:R2=v
sl@0
   125
	// return value in R1:R0
sl@0
   126
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   127
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   128
#endif
sl@0
   129
	}
sl@0
   130
sl@0
   131
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
sl@0
   132
	{
sl@0
   133
	// R0=a, R3:R2=v
sl@0
   134
	// return value in R1:R0
sl@0
   135
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   136
#ifndef __EABI__
sl@0
   137
	asm("mov r3, r2 ");
sl@0
   138
	asm("mov r2, r1 ");
sl@0
   139
#endif
sl@0
   140
	__SAVE_REGS__
sl@0
   141
	__DO_RMW1_OP__
sl@0
   142
	__RESTORE_REGS__
sl@0
   143
	__JUMP(,lr);
sl@0
   144
#endif
sl@0
   145
	}
sl@0
   146
sl@0
   147
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
sl@0
   148
	{
sl@0
   149
	// R0=a, R3:R2=v
sl@0
   150
	// return value in R1:R0
sl@0
   151
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   152
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   153
#endif
sl@0
   154
	}
sl@0
   155
sl@0
   156
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*v*/)
sl@0
   157
	{
sl@0
   158
	// R0=a, R3:R2=v
sl@0
   159
	// return value in R1:R0
sl@0
   160
#ifndef __EABI__
sl@0
   161
	asm("mov r3, r2 ");
sl@0
   162
	asm("mov r2, r1 ");
sl@0
   163
#endif
sl@0
   164
	__SAVE_REGS__
sl@0
   165
	__DO_RMW1_OP__
sl@0
   166
	__LOCAL_DATA_MEMORY_BARRIER__(r6);
sl@0
   167
	__RESTORE_REGS__
sl@0
   168
	__JUMP(,lr);
sl@0
   169
	}
sl@0
   170
sl@0
   171
#undef __SAVE_REGS__
sl@0
   172
#undef __RESTORE_REGS__
sl@0
   173
#undef __DO_RMW1_OP__
sl@0
   174
#undef __SOURCE_REG__
sl@0
   175
#undef __DO_PROCESSING__
sl@0
   176
sl@0
   177
sl@0
   178
#elif defined(__OP_CAS__)
sl@0
   179
sl@0
   180
#define	__SAVE_REGS__		asm("stmfd sp!, {r4-r7} ");
sl@0
   181
#define	__RESTORE_REGS__	asm("ldmfd sp!, {r4-r7} ");
sl@0
   182
sl@0
   183
#define __DO_CAS_OP__				\
sl@0
   184
	asm("ldmia r1, {r6-r7} ");		\
sl@0
   185
	ENSURE_8BYTE_ALIGNMENT(0);		\
sl@0
   186
	asm("1: ");						\
sl@0
   187
	LDREXD(4,0);					\
sl@0
   188
	asm("cmp r4, r6 ");				\
sl@0
   189
	asm("cmpeq r5, r7 ");			\
sl@0
   190
	asm("bne 2f ");					\
sl@0
   191
	STREXD(12,2,0);					\
sl@0
   192
	asm("cmp r12, #0 ");			\
sl@0
   193
	asm("bne 1b ");					\
sl@0
   194
	asm("2: ");						\
sl@0
   195
	asm("stmneia r1, {r4-r5} ");	\
sl@0
   196
	asm("movne r0, #0 ");			\
sl@0
   197
	asm("moveq r0, #1 ");
sl@0
   198
sl@0
   199
sl@0
   200
extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
sl@0
   201
	{
sl@0
   202
	// R0=a, R1=q, R3:R2=v
sl@0
   203
	// return value in R0
sl@0
   204
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   205
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   206
#endif
sl@0
   207
	}
sl@0
   208
sl@0
   209
extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
sl@0
   210
	{
sl@0
   211
	// R0=a, R1=q, R3:R2=v
sl@0
   212
	// return value in R0
sl@0
   213
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   214
	__SAVE_REGS__
sl@0
   215
	__DO_CAS_OP__
sl@0
   216
	__RESTORE_REGS__
sl@0
   217
	__JUMP(,lr);
sl@0
   218
#endif
sl@0
   219
	}
sl@0
   220
sl@0
   221
extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
sl@0
   222
	{
sl@0
   223
	// R0=a, R1=q, R3:R2=v
sl@0
   224
	// return value in R0
sl@0
   225
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   226
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   227
#endif
sl@0
   228
	}
sl@0
   229
sl@0
   230
extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
sl@0
   231
	{
sl@0
   232
	// R0=a, R1=q, R3:R2=v
sl@0
   233
	// return value in R0
sl@0
   234
	__SAVE_REGS__
sl@0
   235
	__DO_CAS_OP__
sl@0
   236
	__LOCAL_DATA_MEMORY_BARRIER__(r12);
sl@0
   237
	__RESTORE_REGS__
sl@0
   238
	__JUMP(,lr);
sl@0
   239
	}
sl@0
   240
sl@0
   241
#undef __SAVE_REGS__
sl@0
   242
#undef __RESTORE_REGS__
sl@0
   243
#undef __DO_CAS_OP__
sl@0
   244
sl@0
   245
sl@0
   246
sl@0
   247
#elif defined(__OP_AXO__)
sl@0
   248
sl@0
   249
#ifdef __EABI__
sl@0
   250
#define	__SAVE_REGS__		asm("mov r1, sp "); asm("stmfd sp!, {r4-r8} ");
sl@0
   251
#define	__RESTORE_REGS__	asm("ldmfd sp!, {r4-r8} ");
sl@0
   252
#else
sl@0
   253
#define	__SAVE_REGS__		asm("str r3, [sp, #-4]! "); asm("mov r3, r2 "); asm("mov r2, r1 "); asm("mov r1, sp "); asm("stmfd sp!, {r4-r8} ");
sl@0
   254
#define	__RESTORE_REGS__	asm("ldmfd sp!, {r4-r8,r12} ");
sl@0
   255
#endif
sl@0
   256
sl@0
   257
#define __DO_AXO_OP__				\
sl@0
   258
	asm("ldmia r1, {r4-r5} ");		\
sl@0
   259
	asm("mov r12, r0 ");			\
sl@0
   260
	ENSURE_8BYTE_ALIGNMENT(0);		\
sl@0
   261
	asm("1: ");						\
sl@0
   262
	LDREXD(0,12);					\
sl@0
   263
	asm("and r6, r0, r2 ");			\
sl@0
   264
	asm("and r7, r1, r3 ");			\
sl@0
   265
	asm("eor r6, r6, r4 ");			\
sl@0
   266
	asm("eor r7, r7, r5 ");			\
sl@0
   267
	STREXD(8,6,12);					\
sl@0
   268
	asm("cmp r8, #0 ");				\
sl@0
   269
	asm("bne 1b ");
sl@0
   270
sl@0
   271
sl@0
   272
#ifdef __EABI__
sl@0
   273
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   274
#else
sl@0
   275
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int)
sl@0
   276
#endif
sl@0
   277
	{
sl@0
   278
	// R0=a, R3:R2=u, [SP+4,0]=v
sl@0
   279
	// return value in R1:R0
sl@0
   280
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   281
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   282
#endif
sl@0
   283
	}
sl@0
   284
sl@0
   285
#ifdef __EABI__
sl@0
   286
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   287
#else
sl@0
   288
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int)
sl@0
   289
#endif
sl@0
   290
	{
sl@0
   291
	// R0=a, R3:R2=u, [SP+4,0]=v
sl@0
   292
	// return value in R1:R0
sl@0
   293
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   294
	__SAVE_REGS__
sl@0
   295
	__DO_AXO_OP__
sl@0
   296
	__RESTORE_REGS__
sl@0
   297
	__JUMP(,lr);
sl@0
   298
#endif
sl@0
   299
	}
sl@0
   300
sl@0
   301
#ifdef __EABI__
sl@0
   302
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   303
#else
sl@0
   304
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int)
sl@0
   305
#endif
sl@0
   306
	{
sl@0
   307
	// R0=a, R3:R2=u, [SP+4,0]=v
sl@0
   308
	// return value in R1:R0
sl@0
   309
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   310
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   311
#endif
sl@0
   312
	}
sl@0
   313
sl@0
   314
#ifdef __EABI__
sl@0
   315
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   316
#else
sl@0
   317
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int)
sl@0
   318
#endif
sl@0
   319
	{
sl@0
   320
	// R0=a, R3:R2=u, [SP+4,0]=v
sl@0
   321
	// return value in R1:R0
sl@0
   322
	__SAVE_REGS__
sl@0
   323
	__DO_AXO_OP__
sl@0
   324
	__LOCAL_DATA_MEMORY_BARRIER__(r8);
sl@0
   325
	__RESTORE_REGS__
sl@0
   326
	__JUMP(,lr);
sl@0
   327
	}
sl@0
   328
sl@0
   329
#undef __SAVE_REGS__
sl@0
   330
#undef __RESTORE_REGS__
sl@0
   331
#undef __DO_AXO_OP__
sl@0
   332
sl@0
   333
sl@0
   334
#elif defined(__OP_RMW3__)
sl@0
   335
sl@0
   336
#ifdef __EABI__
sl@0
   337
#define	__SAVE_REGS__		asm("mov r1, sp "); asm("stmfd sp!, {r4-r10} ");
sl@0
   338
#define	__RESTORE_REGS__	asm("ldmfd sp!, {r4-r10} ");
sl@0
   339
#else
sl@0
   340
#define	__SAVE_REGS__		asm("str r3, [sp, #-4]! "); asm("mov r3, r2 "); asm("mov r2, r1 "); asm("mov r1, sp "); asm("stmfd sp!, {r4-r10} ");
sl@0
   341
#define	__RESTORE_REGS__	asm("ldmfd sp!, {r4-r10,r12} ");
sl@0
   342
#endif
sl@0
   343
sl@0
   344
#if	defined(__OP_TAU__)
sl@0
   345
#define	__COND_GE__		"cs"
sl@0
   346
#define	__COND_LT__		"cc"
sl@0
   347
#elif	defined(__OP_TAS__)
sl@0
   348
#define	__COND_GE__		"ge"
sl@0
   349
#define	__COND_LT__		"lt"
sl@0
   350
#endif
sl@0
   351
sl@0
   352
#define __DO_RMW3_OP__				\
sl@0
   353
	asm("ldmia r1, {r4-r7} ");		\
sl@0
   354
	asm("mov r12, r0 ");			\
sl@0
   355
	ENSURE_8BYTE_ALIGNMENT(0);		\
sl@0
   356
	asm("1: ");						\
sl@0
   357
	LDREXD(0,12);					\
sl@0
   358
	asm("subs r8, r0, r2 ");		\
sl@0
   359
	asm("sbcs r9, r1, r3 ");		\
sl@0
   360
	asm("mov" __COND_GE__ " r8, r4 ");	\
sl@0
   361
	asm("mov" __COND_GE__ " r9, r5 ");	\
sl@0
   362
	asm("mov" __COND_LT__ " r8, r6 ");	\
sl@0
   363
	asm("mov" __COND_LT__ " r9, r7 ");	\
sl@0
   364
	asm("adds r8, r8, r0 ");		\
sl@0
   365
	asm("adcs r9, r9, r1 ");		\
sl@0
   366
	STREXD(10,8,12);				\
sl@0
   367
	asm("cmp r10, #0 ");			\
sl@0
   368
	asm("bne 1b ");
sl@0
   369
sl@0
   370
sl@0
   371
#ifdef __EABI__
sl@0
   372
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   373
#else
sl@0
   374
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,64)(int,int,int,int)
sl@0
   375
#endif
sl@0
   376
	{
sl@0
   377
	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
sl@0
   378
	// return value in R1:R0
sl@0
   379
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   380
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   381
#endif
sl@0
   382
	}
sl@0
   383
sl@0
   384
#ifdef __EABI__
sl@0
   385
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   386
#else
sl@0
   387
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,64)(int,int,int,int)
sl@0
   388
#endif
sl@0
   389
	{
sl@0
   390
	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
sl@0
   391
	// return value in R1:R0
sl@0
   392
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   393
	__SAVE_REGS__
sl@0
   394
	__DO_RMW3_OP__
sl@0
   395
	__RESTORE_REGS__
sl@0
   396
	__JUMP(,lr);
sl@0
   397
#endif
sl@0
   398
	}
sl@0
   399
sl@0
   400
#ifdef __EABI__
sl@0
   401
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   402
#else
sl@0
   403
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,64)(int,int,int,int)
sl@0
   404
#endif
sl@0
   405
	{
sl@0
   406
	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
sl@0
   407
	// return value in R1:R0
sl@0
   408
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   409
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   410
#endif
sl@0
   411
	}
sl@0
   412
sl@0
   413
#ifdef __EABI__
sl@0
   414
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   415
#else
sl@0
   416
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,64)(int,int,int,int)
sl@0
   417
#endif
sl@0
   418
	{
sl@0
   419
	// R0=a, R3:R2=t, [SP+4,0]=u, [SP+12,8]=v
sl@0
   420
	// return value in R1:R0
sl@0
   421
	__SAVE_REGS__
sl@0
   422
	__DO_RMW3_OP__
sl@0
   423
	__LOCAL_DATA_MEMORY_BARRIER__(r10);
sl@0
   424
	__RESTORE_REGS__
sl@0
   425
	__JUMP(,lr);
sl@0
   426
	}
sl@0
   427
sl@0
   428
#undef __SAVE_REGS__
sl@0
   429
#undef __RESTORE_REGS__
sl@0
   430
#undef __DO_RMW3_OP__
sl@0
   431
#undef __COND_GE__
sl@0
   432
#undef __COND_LT__
sl@0
   433
sl@0
   434
sl@0
   435
#endif
sl@0
   436
sl@0
   437
// Second inclusion undefines temporaries
sl@0
   438
#include "atomic_ops.h"