os/kernelhwsrv/kernel/eka/common/arm/atomic_32_v6.h
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\common\arm\atomic_32_v6.h
sl@0
    15
// 32 bit atomic operations on V6 and V6K processors
sl@0
    16
// Also 8 and 16 bit atomic operations on V6K processors
sl@0
    17
// Also 8, 16 and 32 bit load/store on all processors
sl@0
    18
// 
sl@0
    19
//
sl@0
    20
sl@0
    21
#include "atomic_ops.h"
sl@0
    22
sl@0
    23
#if defined(__OP_LOAD__)
sl@0
    24
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,__DATA_SIZE__)(const volatile TAny* /*a*/)
sl@0
    25
	{
sl@0
    26
	// R0=a
sl@0
    27
	// return value in R0
sl@0
    28
sl@0
    29
	__LDR_INST__( ," r0, [r0] ");
sl@0
    30
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r1);
sl@0
    31
	__JUMP(,lr);
sl@0
    32
	}
sl@0
    33
sl@0
    34
sl@0
    35
sl@0
    36
#elif defined(__OP_STORE__)
sl@0
    37
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/)
sl@0
    38
	{
sl@0
    39
#ifdef __BARRIERS_NEEDED__
sl@0
    40
	// R0=a, R1=v
sl@0
    41
	// return value in R0 equal to v
sl@0
    42
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
    43
	__STR_INST__( ," r1, [r0] ");
sl@0
    44
	asm("mov r0, r1 ");
sl@0
    45
	__JUMP(,lr);
sl@0
    46
#endif
sl@0
    47
	}
sl@0
    48
sl@0
    49
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/)
sl@0
    50
	{
sl@0
    51
	// R0=a, R1=v
sl@0
    52
	// return value in R0 equal to v
sl@0
    53
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
    54
	__STR_INST__( ," r1, [r0] ");
sl@0
    55
	__LOCAL_DATA_MEMORY_BARRIER__(r12);
sl@0
    56
	asm("mov r0, r1 ");
sl@0
    57
	__JUMP(,lr);
sl@0
    58
	}
sl@0
    59
sl@0
    60
sl@0
    61
#elif defined(__OP_RMW1__)
sl@0
    62
sl@0
    63
#ifdef __OP_SWP__
sl@0
    64
#define	__SOURCE_REG__		1
sl@0
    65
#define	__DO_PROCESSING__
sl@0
    66
#else
sl@0
    67
#define	__SOURCE_REG__		2
sl@0
    68
#if defined(__OP_ADD__)
sl@0
    69
#define	__DO_PROCESSING__	asm("add r2, r0, r1 ");
sl@0
    70
#elif defined(__OP_AND__)
sl@0
    71
#define	__DO_PROCESSING__	asm("and r2, r0, r1 ");
sl@0
    72
#elif defined(__OP_IOR__)
sl@0
    73
#define	__DO_PROCESSING__	asm("orr r2, r0, r1 ");
sl@0
    74
#elif defined(__OP_XOR__)
sl@0
    75
#define	__DO_PROCESSING__	asm("eor r2, r0, r1 ");
sl@0
    76
#endif
sl@0
    77
#endif
sl@0
    78
sl@0
    79
#define __DO_RMW1_OP__				\
sl@0
    80
	asm("mov r12, r0 ");			\
sl@0
    81
	asm("1: ");						\
sl@0
    82
	__LDREX_INST__(0,12);			\
sl@0
    83
	__DO_PROCESSING__				\
sl@0
    84
	__STREX_INST__(3,__SOURCE_REG__,12);	\
sl@0
    85
	asm("cmp r3, #0 ");				\
sl@0
    86
	asm("bne 1b ");
sl@0
    87
sl@0
    88
sl@0
    89
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/)
sl@0
    90
	{
sl@0
    91
	// R0=a, R1=v
sl@0
    92
	// return value in R0
sl@0
    93
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
    94
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
    95
#endif
sl@0
    96
	}
sl@0
    97
sl@0
    98
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/)
sl@0
    99
	{
sl@0
   100
	// R0=a, R1=v
sl@0
   101
	// return value in R0
sl@0
   102
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   103
	__DO_RMW1_OP__
sl@0
   104
	__JUMP(,lr);
sl@0
   105
#endif
sl@0
   106
	}
sl@0
   107
sl@0
   108
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/)
sl@0
   109
	{
sl@0
   110
	// R0=a, R1=v
sl@0
   111
	// return value in R0
sl@0
   112
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   113
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   114
#endif
sl@0
   115
	}
sl@0
   116
sl@0
   117
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*v*/)
sl@0
   118
	{
sl@0
   119
	// R0=a, R1=v
sl@0
   120
	// return value in R0
sl@0
   121
	__DO_RMW1_OP__
sl@0
   122
	__LOCAL_DATA_MEMORY_BARRIER__(r3);
sl@0
   123
	__JUMP(,lr);
sl@0
   124
	}
sl@0
   125
sl@0
   126
#undef __DO_RMW1_OP__
sl@0
   127
#undef __SOURCE_REG__
sl@0
   128
#undef __DO_PROCESSING__
sl@0
   129
sl@0
   130
sl@0
   131
#elif defined(__OP_CAS__)
sl@0
   132
sl@0
   133
#define __DO_CAS_OP__				\
sl@0
   134
	__LDR_INST__( ," r12, [r1] ");	\
sl@0
   135
	asm("1: ");						\
sl@0
   136
	__LDREX_INST__(3,0);			\
sl@0
   137
	asm("cmp r3, r12 ");			\
sl@0
   138
	asm("bne 2f ");					\
sl@0
   139
	__STREX_INST__(3,2,0);			\
sl@0
   140
	asm("cmp r3, #0 ");				\
sl@0
   141
	asm("bne 1b ");					\
sl@0
   142
	asm("2: ");						\
sl@0
   143
	__STR_INST__(ne, "r3, [r1] ");	\
sl@0
   144
	asm("movne r0, #0 ");			\
sl@0
   145
	asm("moveq r0, #1 ");
sl@0
   146
sl@0
   147
sl@0
   148
extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rel,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
sl@0
   149
	{
sl@0
   150
	// R0=a, R1=q, R2=v
sl@0
   151
	// return value in R0
sl@0
   152
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   153
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   154
#endif
sl@0
   155
	}
sl@0
   156
sl@0
   157
extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,rlx,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
sl@0
   158
	{
sl@0
   159
	// R0=a, R1=q, R2=v
sl@0
   160
	// return value in R0
sl@0
   161
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   162
	__DO_CAS_OP__
sl@0
   163
	__JUMP(,lr);
sl@0
   164
#endif
sl@0
   165
	}
sl@0
   166
sl@0
   167
extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,ord,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
sl@0
   168
	{
sl@0
   169
	// R0=a, R1=q, R2=v
sl@0
   170
	// return value in R0
sl@0
   171
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   172
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   173
#endif
sl@0
   174
	}
sl@0
   175
sl@0
   176
extern "C" EXPORT_C __NAKED__ TBool __fname__(__OPERATION__,acq,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ * /*q*/, __TYPE__ /*v*/)
sl@0
   177
	{
sl@0
   178
	// R0=a, R1=q, R2=v
sl@0
   179
	// return value in R0
sl@0
   180
	__DO_CAS_OP__
sl@0
   181
	__LOCAL_DATA_MEMORY_BARRIER__(r3);
sl@0
   182
	__JUMP(,lr);
sl@0
   183
	}
sl@0
   184
sl@0
   185
#undef __DO_CAS_OP__
sl@0
   186
sl@0
   187
sl@0
   188
sl@0
   189
#elif defined(__OP_AXO__)
sl@0
   190
sl@0
   191
#define	__SAVE_REGS__		asm("str r4, [sp, #-4]! ");
sl@0
   192
#define	__RESTORE_REGS__	asm("ldr r4, [sp], #4 ");
sl@0
   193
sl@0
   194
#define __DO_AXO_OP__				\
sl@0
   195
	asm("mov r12, r0 ");			\
sl@0
   196
	asm("1: ");						\
sl@0
   197
	__LDREX_INST__(0,12);			\
sl@0
   198
	asm("and r4, r0, r1 ");			\
sl@0
   199
	asm("eor r4, r4, r2 ");			\
sl@0
   200
	__STREX_INST__(3,4,12);			\
sl@0
   201
	asm("cmp r3, #0 ");				\
sl@0
   202
	asm("bne 1b ");
sl@0
   203
sl@0
   204
sl@0
   205
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   206
	{
sl@0
   207
	// R0=a, R1=u, R2=v
sl@0
   208
	// return value in R0
sl@0
   209
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   210
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   211
#endif
sl@0
   212
	}
sl@0
   213
sl@0
   214
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   215
	{
sl@0
   216
	// R0=a, R1=u, R2=v
sl@0
   217
	// return value in R0
sl@0
   218
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   219
	__SAVE_REGS__
sl@0
   220
	__DO_AXO_OP__
sl@0
   221
	__RESTORE_REGS__
sl@0
   222
	__JUMP(,lr);
sl@0
   223
#endif
sl@0
   224
	}
sl@0
   225
sl@0
   226
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   227
	{
sl@0
   228
	// R0=a, R1=u, R2=v
sl@0
   229
	// return value in R0
sl@0
   230
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   231
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   232
#endif
sl@0
   233
	}
sl@0
   234
sl@0
   235
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   236
	{
sl@0
   237
	// R0=a, R1=u, R2=v
sl@0
   238
	// return value in R0
sl@0
   239
	__SAVE_REGS__
sl@0
   240
	__DO_AXO_OP__
sl@0
   241
	__LOCAL_DATA_MEMORY_BARRIER__(r3);
sl@0
   242
	__RESTORE_REGS__
sl@0
   243
	__JUMP(,lr);
sl@0
   244
	}
sl@0
   245
sl@0
   246
#undef __SAVE_REGS__
sl@0
   247
#undef __RESTORE_REGS__
sl@0
   248
#undef __DO_AXO_OP__
sl@0
   249
sl@0
   250
sl@0
   251
#elif defined(__OP_RMW3__)
sl@0
   252
sl@0
   253
#define	__SAVE_REGS__		asm("stmfd sp!, {r4-r5} ");
sl@0
   254
#define	__RESTORE_REGS__	asm("ldmfd sp!, {r4-r5} ");
sl@0
   255
sl@0
   256
#if	defined(__OP_TAU__)
sl@0
   257
#define	__COND_GE__		"cs"
sl@0
   258
#define	__COND_LT__		"cc"
sl@0
   259
#define	__DO_SIGN_EXTEND__
sl@0
   260
#elif	defined(__OP_TAS__)
sl@0
   261
#define	__COND_GE__		"ge"
sl@0
   262
#define	__COND_LT__		"lt"
sl@0
   263
#define	__DO_SIGN_EXTEND__	__SIGN_EXTEND__(r0)
sl@0
   264
#endif
sl@0
   265
sl@0
   266
#define __DO_RMW3_OP__				\
sl@0
   267
	asm("mov r12, r0 ");			\
sl@0
   268
	asm("1: ");						\
sl@0
   269
	__LDREX_INST__(0,12);			\
sl@0
   270
	__DO_SIGN_EXTEND__				\
sl@0
   271
	asm("cmp r0, r1 ");				\
sl@0
   272
	asm("add" __COND_GE__ " r4, r0, r2 ");	\
sl@0
   273
	asm("add" __COND_LT__ " r4, r0, r3 ");	\
sl@0
   274
	__STREX_INST__(5,4,12);			\
sl@0
   275
	asm("cmp r5, #0 ");				\
sl@0
   276
	asm("bne 1b ");
sl@0
   277
sl@0
   278
sl@0
   279
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rel,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   280
	{
sl@0
   281
	// R0=a, R1=t, R2=u, R3=v
sl@0
   282
	// return value in R0
sl@0
   283
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   284
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   285
#endif
sl@0
   286
	}
sl@0
   287
sl@0
   288
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,rlx,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   289
	{
sl@0
   290
	// R0=a, R1=t, R2=u, R3=v
sl@0
   291
	// return value in R0
sl@0
   292
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   293
	__SAVE_REGS__
sl@0
   294
	__DO_RMW3_OP__
sl@0
   295
	__RESTORE_REGS__
sl@0
   296
	__JUMP(,lr);
sl@0
   297
#endif
sl@0
   298
	}
sl@0
   299
sl@0
   300
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,ord,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   301
	{
sl@0
   302
	// R0=a, R1=t, R2=u, R3=v
sl@0
   303
	// return value in R0
sl@0
   304
#ifdef __BARRIERS_NEEDED__				// If no barriers, all ordering variants collapse to same function
sl@0
   305
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r12);
sl@0
   306
#endif
sl@0
   307
	}
sl@0
   308
sl@0
   309
extern "C" EXPORT_C __NAKED__ __TYPE__ __fname__(__OPERATION__,acq,__DATA_SIZE__)(volatile TAny* /*a*/, __TYPE__ /*t*/, __TYPE__ /*u*/, __TYPE__ /*v*/)
sl@0
   310
	{
sl@0
   311
	// R0=a, R1=t, R2=u, R3=v
sl@0
   312
	// return value in R0
sl@0
   313
	__SAVE_REGS__
sl@0
   314
	__DO_RMW3_OP__
sl@0
   315
	__LOCAL_DATA_MEMORY_BARRIER__(r5);
sl@0
   316
	__RESTORE_REGS__
sl@0
   317
	__JUMP(,lr);
sl@0
   318
	}
sl@0
   319
sl@0
   320
#undef __SAVE_REGS__
sl@0
   321
#undef __RESTORE_REGS__
sl@0
   322
#undef __DO_RMW3_OP__
sl@0
   323
#undef __COND_GE__
sl@0
   324
#undef __COND_LT__
sl@0
   325
#undef __DO_SIGN_EXTEND__
sl@0
   326
sl@0
   327
sl@0
   328
#endif
sl@0
   329
sl@0
   330
// Second inclusion undefines temporaries
sl@0
   331
#include "atomic_ops.h"