os/kernelhwsrv/kernel/eka/common/arm/atomics.cia
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\common\arm\atomics.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
sl@0
    19
#include <cpudefs.h>
sl@0
    20
#include <e32def.h>
sl@0
    21
//#include <e32atomics.h>
sl@0
    22
sl@0
    23
#if defined(__KERNEL_MODE__)
sl@0
    24
#include "nk_cpu.h"
sl@0
    25
#elif defined(__ATOMIC_USE_FAST_EXEC__) || defined(__ATOMIC64_USE_FAST_EXEC__) || defined(__ATOMIC64_USE_SLOW_EXEC__)
sl@0
    26
#include <u32exec.h>
sl@0
    27
#endif
sl@0
    28
sl@0
    29
#define	__concat__(a,b)	a##b
sl@0
    30
#define	__concat3__(a,b,c)	a##b##c
sl@0
    31
#define	__concat5__(a,b,c,d,e)	a##b##c##d##e
sl@0
    32
#define	__fname__(type,order,size)	__concat5__(__e32_atomic_,type,_,order,size)
sl@0
    33
//	__e32_atomic_##type##_##order##size
sl@0
    34
sl@0
    35
#undef	__BARRIERS_NEEDED__
sl@0
    36
#undef	__AVOID_READ_SIDE_EFFECTS__
sl@0
    37
#ifdef __SMP__
sl@0
    38
#define	__BARRIERS_NEEDED__
sl@0
    39
#else
sl@0
    40
#ifdef __KERNEL_MODE__
sl@0
    41
// On non-SMP use interrupt disabling even on V6 and V6K just in case someone
sl@0
    42
// has used the atomic operations on I/O addresses.
sl@0
    43
#define __AVOID_READ_SIDE_EFFECTS__
sl@0
    44
#endif
sl@0
    45
#endif
sl@0
    46
sl@0
    47
#ifdef	__BARRIERS_NEEDED__
sl@0
    48
#define	__LOCAL_DATA_MEMORY_BARRIER__(reg)		__DATA_MEMORY_BARRIER__(reg)
sl@0
    49
#define	__LOCAL_DATA_MEMORY_BARRIER_Z__(reg)	__DATA_MEMORY_BARRIER_Z__(reg)
sl@0
    50
#define	__LOCAL_DATA_SYNC_BARRIER__(reg)		__DATA_SYNC_BARRIER__(reg)
sl@0
    51
#define	__LOCAL_DATA_SYNC_BARRIER_Z__(reg)		__DATA_SYNC_BARRIER_Z__(reg)
sl@0
    52
#define	__LOCAL_INST_SYNC_BARRIER__(reg)		__INST_SYNC_BARRIER__(reg)
sl@0
    53
#define	__LOCAL_INST_SYNC_BARRIER_Z__(reg)		__INST_SYNC_BARRIER_Z__(reg)
sl@0
    54
#else	// __BARRIERS_NEEDED__
sl@0
    55
#define	__LOCAL_DATA_MEMORY_BARRIER__(reg)
sl@0
    56
#define	__LOCAL_DATA_MEMORY_BARRIER_Z__(reg)
sl@0
    57
#define	__LOCAL_DATA_SYNC_BARRIER__(reg)
sl@0
    58
#define	__LOCAL_DATA_SYNC_BARRIER_Z__(reg)
sl@0
    59
#define	__LOCAL_INST_SYNC_BARRIER__(reg)
sl@0
    60
#define	__LOCAL_INST_SYNC_BARRIER_Z__(reg)
sl@0
    61
#endif	// __BARRIERS_NEEDED__
sl@0
    62
sl@0
    63
#ifdef	__CPU_ARM_HAS_CPS
sl@0
    64
#define	__DISABLE_INTERRUPTS__(keep,temp)	asm("mrs "#keep ", cpsr"); CPSIDAIF
sl@0
    65
#define	__RESTORE_INTERRUPTS__(keep)		asm("msr cpsr_c, "#keep )	// flags preserved
sl@0
    66
#else
sl@0
    67
#define	__DISABLE_INTERRUPTS__(keep,temp)	asm("mrs "#keep ", cpsr"); asm("orr "#temp ", "#keep ", #0xc0" ); asm("msr cpsr, "#temp )
sl@0
    68
#define	__RESTORE_INTERRUPTS__(keep)		asm("msr cpsr_c, "#keep )	// flags preserved
sl@0
    69
#endif
sl@0
    70
sl@0
    71
/******************************************************************************
sl@0
    72
 * Barriers
sl@0
    73
 ******************************************************************************/
sl@0
    74
sl@0
    75
extern "C" EXPORT_C __NAKED__ void __e32_memory_barrier()
sl@0
    76
	{
sl@0
    77
	__LOCAL_DATA_MEMORY_BARRIER_Z__(r0);
sl@0
    78
	__JUMP(,lr);
sl@0
    79
	}
sl@0
    80
sl@0
    81
/** Barrier guaranteeing completion as well as ordering
sl@0
    82
sl@0
    83
*/
sl@0
    84
#if defined(__KERNEL_MODE__) || defined(__CPU_ARM_SUPPORTS_USER_MODE_BARRIERS)
sl@0
    85
extern "C" EXPORT_C __NAKED__ void __e32_io_completion_barrier()
sl@0
    86
	{
sl@0
    87
	__DATA_SYNC_BARRIER_Z__(r0);
sl@0
    88
	__JUMP(,lr);
sl@0
    89
	}
sl@0
    90
#else
sl@0
    91
extern "C" EXPORT_C __NAKED__ void __e32_io_completion_barrier()
sl@0
    92
	{
sl@0
    93
	asm("mov	r0, sp ");
sl@0
    94
	asm("mov	r1, #0 ");
sl@0
    95
	SLOW_EXEC2(EExecIMBRange);
sl@0
    96
	}
sl@0
    97
#endif
sl@0
    98
sl@0
    99
sl@0
   100
/******************************************************************************
sl@0
   101
 * Miscellaneous utility functions
sl@0
   102
 ******************************************************************************/
sl@0
   103
sl@0
   104
/** Find the most significant 1 in a 32 bit word
sl@0
   105
sl@0
   106
	@param	v	The word to be scanned
sl@0
   107
	@return		The bit number of the most significant 1 if v != 0
sl@0
   108
				-1 if v == 0
sl@0
   109
*/
sl@0
   110
extern "C" EXPORT_C __NAKED__ TInt __e32_find_ms1_32(TUint32 /*v*/)
sl@0
   111
	{
sl@0
   112
#ifdef __CPU_ARM_HAS_CLZ
sl@0
   113
	CLZ(		1,0);						// r1=31-MSB(r0), 32 if r0=0
sl@0
   114
	asm("rsb	r0, r1, #31 ");				// r0=MSB(r0), -1 if r0=0
sl@0
   115
#else
sl@0
   116
	asm("movs	r1, r0 ");
sl@0
   117
	asm("beq	0f ");
sl@0
   118
	asm("mov	r0, #31 ");
sl@0
   119
	asm("cmp	r1, #0x00010000 ");
sl@0
   120
	asm("movcc	r1, r1, lsl #16 ");
sl@0
   121
	asm("subcc	r0, r0, #16 ");
sl@0
   122
	asm("cmp	r1, #0x01000000 ");
sl@0
   123
	asm("movcc	r1, r1, lsl #8 ");
sl@0
   124
	asm("subcc	r0, r0, #8 ");
sl@0
   125
	asm("cmp	r1, #0x10000000 ");
sl@0
   126
	asm("movcc	r1, r1, lsl #4 ");
sl@0
   127
	asm("subcc	r0, r0, #4 ");
sl@0
   128
	asm("cmp	r1, #0x40000000 ");
sl@0
   129
	asm("movcc	r1, r1, lsl #2 ");
sl@0
   130
	asm("subcc	r0, r0, #2 ");
sl@0
   131
	asm("cmp	r1, #0x80000000 ");
sl@0
   132
	asm("subcc	r0, r0, #1 ");
sl@0
   133
	__JUMP(,	lr);
sl@0
   134
	asm("0: ");
sl@0
   135
	asm("mvn	r0, #0 ");					// if input zero, return -1
sl@0
   136
#endif
sl@0
   137
	__JUMP(,	lr);
sl@0
   138
	}
sl@0
   139
sl@0
   140
sl@0
   141
/** Find the least significant 1 in a 32 bit word
sl@0
   142
sl@0
   143
	@param	v	The word to be scanned
sl@0
   144
	@return		The bit number of the least significant 1 if v != 0
sl@0
   145
				-1 if v == 0
sl@0
   146
*/
sl@0
   147
extern "C" EXPORT_C __NAKED__ TInt __e32_find_ls1_32(TUint32 /*v*/)
sl@0
   148
	{
sl@0
   149
#ifdef __CPU_ARM_HAS_CLZ
sl@0
   150
	asm("subs	r1, r0, #1 ");				// r1 = arg - 1
sl@0
   151
	asm("eorcs	r0, r0, r1 ");				// if arg=0, leave alone else mask upper bits
sl@0
   152
	CLZ(		1,0);						// r1=31-MSB(r0), 32 if r0=0
sl@0
   153
	asm("rsb	r0, r1, #31 ");				// r0=MSB(r0), -1 if r0=0
sl@0
   154
#else
sl@0
   155
	asm("movs	r1, r0 ");
sl@0
   156
	asm("beq	0f ");
sl@0
   157
	asm("mov	r0, #0 ");
sl@0
   158
	asm("movs	r2, r1, lsl #16 ");
sl@0
   159
	asm("movne	r1, r2 ");
sl@0
   160
	asm("addeq	r0, r0, #16 ");
sl@0
   161
	asm("movs	r2, r1, lsl #8 ");
sl@0
   162
	asm("movne	r1, r2 ");
sl@0
   163
	asm("addeq	r0, r0, #8 ");
sl@0
   164
	asm("movs	r2, r1, lsl #4 ");
sl@0
   165
	asm("movne	r1, r2 ");
sl@0
   166
	asm("addeq	r0, r0, #4 ");
sl@0
   167
	asm("movs	r2, r1, lsl #2 ");
sl@0
   168
	asm("movne	r1, r2 ");
sl@0
   169
	asm("addeq	r0, r0, #2 ");
sl@0
   170
	asm("movs	r2, r1, lsl #1 ");
sl@0
   171
	asm("addeq	r0, r0, #1 ");
sl@0
   172
	__JUMP(,	lr);
sl@0
   173
	asm("0: ");
sl@0
   174
	asm("mvn	r0, #0 ");					// if input zero, return -1
sl@0
   175
#endif
sl@0
   176
	__JUMP(,	lr);
sl@0
   177
	}
sl@0
   178
sl@0
   179
sl@0
   180
/** Count the number of 1's in a 32 bit word
sl@0
   181
sl@0
   182
	@param	v	The word to be scanned
sl@0
   183
	@return		The number of 1's
sl@0
   184
*/
sl@0
   185
extern "C" EXPORT_C __NAKED__ TInt __e32_bit_count_32(TUint32 /*v*/)
sl@0
   186
	{
sl@0
   187
	asm("mov	r2, #0x0f ");				// r2=0x0000000f
sl@0
   188
	asm("orr	r2, r2, r2, lsl #8 ");		// r2=0x00000f0f
sl@0
   189
	asm("orr	r2, r2, r2, lsl #16 ");		// r2=0x0f0f0f0f
sl@0
   190
	asm("eor	r3, r2, r2, lsl #2 ");		// r3=0x33333333
sl@0
   191
	asm("eor	ip, r3, r3, lsl #1 ");		// ip=0x55555555
sl@0
   192
	asm("bic	r1, r0, ip ");				// r1=odd bits of input
sl@0
   193
	asm("and	r0, r0, ip ");				// r0=even bits of input
sl@0
   194
	asm("add	r0, r0, r1, lsr #1 ");		// r0[2n:2n+1] = in[2n]+in[2n+1], 0<=n<=15
sl@0
   195
	asm("bic	r1, r0, r3 ");				// r1 = r0[4n+2:4n+3] for 0<=n<=7, other bits 0
sl@0
   196
	asm("and	r0, r0, r3 ");				// r0 = r0[4n:4n+1] for 0<=n<=7, other bits 0
sl@0
   197
	asm("add	r0, r0, r1, lsr #2 ");		// r0 bits 4n:4n+3 = in[4n]+in[4n+1]+in[4n+2]+in[4n+3], 0<=n<=7
sl@0
   198
	asm("add	r0, r0, r0, lsr #4 ");		// r0[8n:8n+3]=in[8n]+in[8n+1]+...+in[8n+7], 0<=n<=3
sl@0
   199
	asm("and	r0, r0, r2 ");				// make sure other bits of r0 are zero
sl@0
   200
	asm("add	r0, r0, r0, lsr #8 ");		// r0[16n:16n+7]=in[16n]+in[16n+1]+...+in[16n+15], n=0,1
sl@0
   201
	asm("add	r0, r0, r0, lsr #16 ");		// r0[0:7]=SUM{ in[n] : 0<=n<=31 }
sl@0
   202
	asm("and	r0, r0, #0xff ");			// mask other unwanted bits
sl@0
   203
	__JUMP(,	lr);
sl@0
   204
	}
sl@0
   205
sl@0
   206
sl@0
   207
/** Find the most significant 1 in a 64 bit word
sl@0
   208
sl@0
   209
	@param	v	The word to be scanned
sl@0
   210
	@return		The bit number of the most significant 1 if v != 0
sl@0
   211
				-1 if v == 0
sl@0
   212
*/
sl@0
   213
extern "C" EXPORT_C __NAKED__ TInt __e32_find_ms1_64(TUint64 /*v*/)
sl@0
   214
	{
sl@0
   215
	/* On entry argument in R1:R0 */
sl@0
   216
#ifdef __CPU_ARM_HAS_CLZ
sl@0
   217
	CLZ(		2,1);						// r2=31-MSB(r1), 32 if r1=0
sl@0
   218
	asm("subs	r2, r2, #32 ");				// r2=-1-MSB(r1), 0 if r1=0
sl@0
   219
	CLZcc(CC_EQ,2,0);						// if r1=0, r2=31-MSB(r0), 32 if r0=0
sl@0
   220
	asm("rsb	r0, r2, #31 ");				// if r1!=0, r0=32+MSB(r1) else if r0!=0 r0=MSB(r0) else r0=-1
sl@0
   221
#else
sl@0
   222
	asm("cmp	r1, #1 ");					// r1>=1 ?
sl@0
   223
	asm("movcs	r0, #63 ");					// if so r0=63
sl@0
   224
	asm("movccs	r1, r0 ");					// else r1=r0, test for zero (C unaffected)
sl@0
   225
	asm("beq	0f ");
sl@0
   226
	asm("movcc	r0, #31 ");					// if r1=0 and r0!=0, r1=original r0 and r0=31
sl@0
   227
	asm("cmp	r1, #0x00010000 ");
sl@0
   228
	asm("movcc	r1, r1, lsl #16 ");
sl@0
   229
	asm("subcc	r0, r0, #16 ");
sl@0
   230
	asm("cmp	r1, #0x01000000 ");
sl@0
   231
	asm("movcc	r1, r1, lsl #8 ");
sl@0
   232
	asm("subcc	r0, r0, #8 ");
sl@0
   233
	asm("cmp	r1, #0x10000000 ");
sl@0
   234
	asm("movcc	r1, r1, lsl #4 ");
sl@0
   235
	asm("subcc	r0, r0, #4 ");
sl@0
   236
	asm("cmp	r1, #0x40000000 ");
sl@0
   237
	asm("movcc	r1, r1, lsl #2 ");
sl@0
   238
	asm("subcc	r0, r0, #2 ");
sl@0
   239
	asm("cmp	r1, #0x80000000 ");
sl@0
   240
	asm("subcc	r0, r0, #1 ");
sl@0
   241
	__JUMP(,	lr);
sl@0
   242
	asm("0: ");
sl@0
   243
	asm("mvn	r0, #0 ");					// if input zero, return -1
sl@0
   244
#endif
sl@0
   245
	__JUMP(,	lr);
sl@0
   246
	}
sl@0
   247
sl@0
   248
sl@0
   249
/** Find the least significant 1 in a 64 bit word
sl@0
   250
sl@0
   251
	@param	v	The word to be scanned
sl@0
   252
	@return		The bit number of the least significant 1 if v != 0
sl@0
   253
				-1 if v == 0
sl@0
   254
*/
sl@0
   255
extern "C" EXPORT_C __NAKED__ TInt __e32_find_ls1_64(TUint64 /*v*/)
sl@0
   256
	{
sl@0
   257
	/* On entry argument in R1:R0 */
sl@0
   258
#ifdef __CPU_ARM_HAS_CLZ
sl@0
   259
	asm("subs	r2, r0, #1 ");
sl@0
   260
	asm("sbcs	r3, r1, #0 ");				// r3:r2 = arg - 1
sl@0
   261
	asm("eorcs	r0, r0, r2 ");				// if arg=0 leave alone else mask upper bits
sl@0
   262
	asm("eorcs	r1, r1, r3 ");
sl@0
   263
	CLZ(		2,1);						// r2=31-MSB(r1), 32 if r1=0
sl@0
   264
	asm("subs	r2, r2, #32 ");				// r2=-1-MSB(r1), 0 if r1=0
sl@0
   265
	CLZcc(CC_EQ,2,0);						// if r1=0, r2=31-MSB(r0), 32 if r0=0
sl@0
   266
	asm("rsb	r0, r2, #31 ");				// if r1!=0, r0=32+MSB(r1) else if r0!=0 r0=MSB(r0) else r0=-1
sl@0
   267
#else
sl@0
   268
	asm("cmp	r0, #1 ");					// LSW(arg) >= 1?
sl@0
   269
	asm("movcs	r1, r0 ");					// if so r1=r0
sl@0
   270
	asm("movcs	r0, #32 ");					// and r0=32
sl@0
   271
	asm("movcc	r0, #0 ");					// else r0=0
sl@0
   272
	asm("cmpcc	r1, #1 ");					// and test if MSW(arg) >= 1
sl@0
   273
	asm("bcc	0f ");						// if not, return -1
sl@0
   274
	asm("movs	r2, r1, lsl #16 ");
sl@0
   275
	asm("movne	r1, r2 ");
sl@0
   276
	asm("addeq	r0, r0, #16 ");
sl@0
   277
	asm("movs	r2, r1, lsl #8 ");
sl@0
   278
	asm("movne	r1, r2 ");
sl@0
   279
	asm("addeq	r0, r0, #8 ");
sl@0
   280
	asm("movs	r2, r1, lsl #4 ");
sl@0
   281
	asm("movne	r1, r2 ");
sl@0
   282
	asm("addeq	r0, r0, #4 ");
sl@0
   283
	asm("movs	r2, r1, lsl #2 ");
sl@0
   284
	asm("movne	r1, r2 ");
sl@0
   285
	asm("addeq	r0, r0, #2 ");
sl@0
   286
	asm("movs	r2, r1, lsl #1 ");
sl@0
   287
	asm("addeq	r0, r0, #1 ");
sl@0
   288
	__JUMP(,	lr);
sl@0
   289
	asm("0: ");
sl@0
   290
	asm("mvn	r0, #0 ");					// if input zero, return -1
sl@0
   291
#endif
sl@0
   292
	__JUMP(,	lr);
sl@0
   293
	}
sl@0
   294
sl@0
   295
sl@0
   296
/** Count the number of 1's in a 64 bit word
sl@0
   297
sl@0
   298
	@param	v	The word to be scanned
sl@0
   299
	@return		The number of 1's
sl@0
   300
*/
sl@0
   301
extern "C" EXPORT_C __NAKED__ TInt __e32_bit_count_64(TUint64 /*v*/)
sl@0
   302
	{
sl@0
   303
	/* On entry argument in R1:R0 */
sl@0
   304
	asm("str	r4, [sp, #-4]! ");
sl@0
   305
	asm("mov	r2, #0x0f ");				// r2=0x0000000f
sl@0
   306
	asm("orr	r2, r2, r2, lsl #8 ");		// r2=0x00000f0f
sl@0
   307
	asm("orr	r2, r2, r2, lsl #16 ");		// r2=0x0f0f0f0f
sl@0
   308
	asm("eor	r3, r2, r2, lsl #2 ");		// r3=0x33333333
sl@0
   309
	asm("eor	ip, r3, r3, lsl #1 ");		// ip=0x55555555
sl@0
   310
sl@0
   311
	asm("bic	r4, r0, ip ");				// r4=odd bits of input LSW
sl@0
   312
	asm("and	r0, r0, ip ");				// r0=even bits of input LSW
sl@0
   313
	asm("add	r0, r0, r4, lsr #1 ");		// r0[2n:2n+1] = in[2n]+in[2n+1], 0<=n<=15
sl@0
   314
	asm("bic	r4, r0, r3 ");				// r4 = r0[4n+2:4n+3] for 0<=n<=7, other bits 0
sl@0
   315
	asm("and	r0, r0, r3 ");				// r0 = r0[4n:4n+1] for 0<=n<=7, other bits 0
sl@0
   316
	asm("add	r0, r0, r4, lsr #2 ");		// r0 bits 4n:4n+3 = in[4n]+in[4n+1]+in[4n+2]+in[4n+3], 0<=n<=7
sl@0
   317
sl@0
   318
	asm("bic	r4, r1, ip ");				// r4=odd bits of input MSW
sl@0
   319
	asm("and	r1, r1, ip ");				// r1=even bits of input MSW
sl@0
   320
	asm("add	r1, r1, r4, lsr #1 ");		// r1[2n:2n+1] = in[2n+32]+in[2n+33], 0<=n<=15
sl@0
   321
	asm("bic	r4, r1, r3 ");				// r4 = r1[4n+34:4n+35] for 0<=n<=7, other bits 0
sl@0
   322
	asm("and	r1, r1, r3 ");				// r1 = r1[4n+32:4n+33] for 0<=n<=7, other bits 0
sl@0
   323
	asm("add	r1, r1, r4, lsr #2 ");		// r1 bits 4n:4n+3 = in[4n+32]+in[4n+33]+in[4n+34]+in[4n+35], 0<=n<=7
sl@0
   324
	asm("ldr	r4, [sp], #4 ");
sl@0
   325
sl@0
   326
	asm("add	r0, r0, r1 ");				// r0 bits 4n:4n+3 = in[4n]+in[4n+1]+in[4n+2]+in[4n+3]+in[4n+32]+in[4n+33]+in[4n+34]+in[4n+35], 0<=n<=7
sl@0
   327
	asm("bic	r1, r0, r2 ");				// odd nibbles only
sl@0
   328
	asm("and	r0, r0, r2 ");				// even nibbles only
sl@0
   329
	asm("add	r0, r0, r1, lsr #4 ");		// r0[8n:8n+7]=bit count of byte n of MSW + bit count of byte n of LSW
sl@0
   330
	asm("add	r0, r0, r0, lsr #8 ");		// r0[16n:16n+7]=bit count of hword n of MSW + bit count of hword n of LSW
sl@0
   331
	asm("add	r0, r0, r0, lsr #16 ");		// r0[0:7]=total bit count
sl@0
   332
	asm("and	r0, r0, #0xff ");			// mask other unwanted bits
sl@0
   333
	__JUMP(,	lr);
sl@0
   334
	}
sl@0
   335
sl@0
   336
sl@0
   337
sl@0
   338
/******************************************************************************
sl@0
   339
 * 64 bit operations
sl@0
   340
 ******************************************************************************/
sl@0
   341
#define	__DATA_SIZE__ 64
sl@0
   342
#if defined(__CPU_ARM_HAS_LDREX_STREX_V6K) && !defined(__AVOID_READ_SIDE_EFFECTS__)
sl@0
   343
sl@0
   344
// Include LDREXD/STREXD-based 64 bit operations
sl@0
   345
#define	__OP_LOAD__
sl@0
   346
#include "atomic_64_v6k.h"
sl@0
   347
#define	__OP_STORE__
sl@0
   348
#include "atomic_64_v6k.h"
sl@0
   349
#define	__OP_SWP__
sl@0
   350
#include "atomic_64_v6k.h"
sl@0
   351
#define	__OP_CAS__
sl@0
   352
#include "atomic_64_v6k.h"
sl@0
   353
#define	__OP_ADD__
sl@0
   354
#include "atomic_64_v6k.h"
sl@0
   355
#define	__OP_AND__
sl@0
   356
#include "atomic_64_v6k.h"
sl@0
   357
#define	__OP_IOR__
sl@0
   358
#include "atomic_64_v6k.h"
sl@0
   359
#define	__OP_XOR__
sl@0
   360
#include "atomic_64_v6k.h"
sl@0
   361
#define	__OP_AXO__
sl@0
   362
#include "atomic_64_v6k.h"
sl@0
   363
#define	__OP_TAU__
sl@0
   364
#include "atomic_64_v6k.h"
sl@0
   365
#define	__OP_TAS__
sl@0
   366
#include "atomic_64_v6k.h"
sl@0
   367
sl@0
   368
#else
sl@0
   369
#ifdef __KERNEL_MODE__
sl@0
   370
sl@0
   371
// Include interrupt-disabling 64 bit operations
sl@0
   372
#define	__OP_LOAD__
sl@0
   373
#include "atomic_64_v6_v5.h"
sl@0
   374
#define	__OP_STORE__
sl@0
   375
#include "atomic_64_v6_v5.h"
sl@0
   376
#define	__OP_SWP__
sl@0
   377
#include "atomic_64_v6_v5.h"
sl@0
   378
#define	__OP_CAS__
sl@0
   379
#include "atomic_64_v6_v5.h"
sl@0
   380
#define	__OP_ADD__
sl@0
   381
#include "atomic_64_v6_v5.h"
sl@0
   382
#define	__OP_AND__
sl@0
   383
#include "atomic_64_v6_v5.h"
sl@0
   384
#define	__OP_IOR__
sl@0
   385
#include "atomic_64_v6_v5.h"
sl@0
   386
#define	__OP_XOR__
sl@0
   387
#include "atomic_64_v6_v5.h"
sl@0
   388
#define	__OP_AXO__
sl@0
   389
#include "atomic_64_v6_v5.h"
sl@0
   390
#define	__OP_TAU__
sl@0
   391
#include "atomic_64_v6_v5.h"
sl@0
   392
#define	__OP_TAS__
sl@0
   393
#include "atomic_64_v6_v5.h"
sl@0
   394
sl@0
   395
#else
sl@0
   396
sl@0
   397
// Include 64 bit operations using Exec calls
sl@0
   398
#define	__OP_LOAD__
sl@0
   399
#include "atomic_64_v6_v5.h"
sl@0
   400
#define	__OP_STORE__
sl@0
   401
#include "atomic_64_v6_v5.h"
sl@0
   402
#define	__OP_SWP__
sl@0
   403
#include "atomic_64_exec.h"
sl@0
   404
#define	__OP_CAS__
sl@0
   405
#include "atomic_64_exec.h"
sl@0
   406
#define	__OP_ADD__
sl@0
   407
#include "atomic_64_exec.h"
sl@0
   408
#define	__OP_AND__
sl@0
   409
#include "atomic_64_exec.h"
sl@0
   410
#define	__OP_IOR__
sl@0
   411
#include "atomic_64_exec.h"
sl@0
   412
#define	__OP_XOR__
sl@0
   413
#include "atomic_64_exec.h"
sl@0
   414
#define	__OP_AXO__
sl@0
   415
#include "atomic_64_exec.h"
sl@0
   416
#define	__OP_TAU__
sl@0
   417
#include "atomic_64_exec.h"
sl@0
   418
#define	__OP_TAS__
sl@0
   419
#include "atomic_64_exec.h"
sl@0
   420
sl@0
   421
#endif
sl@0
   422
#endif
sl@0
   423
#undef	__DATA_SIZE__
sl@0
   424
sl@0
   425
/******************************************************************************
sl@0
   426
 * 8,16,32 bit load/store operations
sl@0
   427
 ******************************************************************************/
sl@0
   428
sl@0
   429
#define	__DATA_SIZE__ 8
sl@0
   430
#define	__OP_LOAD__
sl@0
   431
#include "atomic_32_v6.h"
sl@0
   432
#define	__OP_STORE__
sl@0
   433
#include "atomic_32_v6.h"
sl@0
   434
#undef	__DATA_SIZE__
sl@0
   435
sl@0
   436
#define	__DATA_SIZE__ 16
sl@0
   437
#define	__OP_LOAD__
sl@0
   438
#include "atomic_32_v6.h"
sl@0
   439
#define	__OP_STORE__
sl@0
   440
#include "atomic_32_v6.h"
sl@0
   441
#undef	__DATA_SIZE__
sl@0
   442
sl@0
   443
#define	__DATA_SIZE__ 32
sl@0
   444
#define	__OP_LOAD__
sl@0
   445
#include "atomic_32_v6.h"
sl@0
   446
#define	__OP_STORE__
sl@0
   447
#include "atomic_32_v6.h"
sl@0
   448
#undef	__DATA_SIZE__
sl@0
   449
sl@0
   450
/******************************************************************************
sl@0
   451
 * 8,16,32 bit RMW operations
sl@0
   452
 ******************************************************************************/
sl@0
   453
sl@0
   454
#if defined(__CPU_ARM_HAS_LDREX_STREX_V6K) && !defined(__AVOID_READ_SIDE_EFFECTS__)
sl@0
   455
// V6K - Use variants of LDREX/STREX for everything
sl@0
   456
#define	__ATOMIC_8_IMPL__	"atomic_32_v6.h"
sl@0
   457
#define	__ATOMIC_16_IMPL__	"atomic_32_v6.h"
sl@0
   458
#define	__ATOMIC_32_IMPL__	"atomic_32_v6.h"
sl@0
   459
#elif defined(__CPU_ARM_HAS_LDREX_STREX) && !defined(__AVOID_READ_SIDE_EFFECTS__)
sl@0
   460
// V6 - Use LDREX/STREX for 32 bit operations
sl@0
   461
//		Use LDREX/STREX with shifts/rotates for 8/16 bit operations
sl@0
   462
#define	__ATOMIC_8_IMPL__	"atomic_8_16_v6.h"
sl@0
   463
#define	__ATOMIC_16_IMPL__	"atomic_8_16_v6.h"
sl@0
   464
#define	__ATOMIC_32_IMPL__	"atomic_32_v6.h"
sl@0
   465
#else
sl@0
   466
// V5 - Use interrupt disabling kernel side, Exec calls user side
sl@0
   467
#ifdef __KERNEL_MODE__
sl@0
   468
#define	__ATOMIC_8_IMPL__	"atomic_8_16_32_irq.h"
sl@0
   469
#define	__ATOMIC_16_IMPL__	"atomic_8_16_32_irq.h"
sl@0
   470
#define	__ATOMIC_32_IMPL__	"atomic_8_16_32_irq.h"
sl@0
   471
#else
sl@0
   472
#define	__ATOMIC_8_IMPL__	"atomic_8_16_32_exec.h"
sl@0
   473
#define	__ATOMIC_16_IMPL__	"atomic_8_16_32_exec.h"
sl@0
   474
#define	__ATOMIC_32_IMPL__	"atomic_8_16_32_exec.h"
sl@0
   475
#endif
sl@0
   476
#endif
sl@0
   477
sl@0
   478
#define	__DATA_SIZE__ 8
sl@0
   479
#define	__OP_SWP__
sl@0
   480
#include __ATOMIC_8_IMPL__
sl@0
   481
#define	__OP_CAS__
sl@0
   482
#include __ATOMIC_8_IMPL__
sl@0
   483
#define	__OP_ADD__
sl@0
   484
#include __ATOMIC_8_IMPL__
sl@0
   485
#define	__OP_AND__
sl@0
   486
#include __ATOMIC_8_IMPL__
sl@0
   487
#define	__OP_IOR__
sl@0
   488
#include __ATOMIC_8_IMPL__
sl@0
   489
#define	__OP_XOR__
sl@0
   490
#include __ATOMIC_8_IMPL__
sl@0
   491
#define	__OP_AXO__
sl@0
   492
#include __ATOMIC_8_IMPL__
sl@0
   493
#define	__OP_TAU__
sl@0
   494
#include __ATOMIC_8_IMPL__
sl@0
   495
#define	__OP_TAS__
sl@0
   496
#include __ATOMIC_8_IMPL__
sl@0
   497
#undef	__DATA_SIZE__
sl@0
   498
sl@0
   499
#define	__DATA_SIZE__ 16
sl@0
   500
#define	__OP_SWP__
sl@0
   501
#include __ATOMIC_16_IMPL__
sl@0
   502
#define	__OP_CAS__
sl@0
   503
#include __ATOMIC_16_IMPL__
sl@0
   504
#define	__OP_ADD__
sl@0
   505
#include __ATOMIC_16_IMPL__
sl@0
   506
#define	__OP_AND__
sl@0
   507
#include __ATOMIC_16_IMPL__
sl@0
   508
#define	__OP_IOR__
sl@0
   509
#include __ATOMIC_16_IMPL__
sl@0
   510
#define	__OP_XOR__
sl@0
   511
#include __ATOMIC_16_IMPL__
sl@0
   512
#define	__OP_AXO__
sl@0
   513
#include __ATOMIC_16_IMPL__
sl@0
   514
#define	__OP_TAU__
sl@0
   515
#include __ATOMIC_16_IMPL__
sl@0
   516
#define	__OP_TAS__
sl@0
   517
#include __ATOMIC_16_IMPL__
sl@0
   518
#undef	__DATA_SIZE__
sl@0
   519
sl@0
   520
#define	__DATA_SIZE__ 32
sl@0
   521
#define	__OP_SWP__
sl@0
   522
#include __ATOMIC_32_IMPL__
sl@0
   523
#define	__OP_CAS__
sl@0
   524
#include __ATOMIC_32_IMPL__
sl@0
   525
#define	__OP_ADD__
sl@0
   526
#include __ATOMIC_32_IMPL__
sl@0
   527
#define	__OP_AND__
sl@0
   528
#include __ATOMIC_32_IMPL__
sl@0
   529
#define	__OP_IOR__
sl@0
   530
#include __ATOMIC_32_IMPL__
sl@0
   531
#define	__OP_XOR__
sl@0
   532
#include __ATOMIC_32_IMPL__
sl@0
   533
#define	__OP_AXO__
sl@0
   534
#include __ATOMIC_32_IMPL__
sl@0
   535
#define	__OP_TAU__
sl@0
   536
#include __ATOMIC_32_IMPL__
sl@0
   537
#define	__OP_TAS__
sl@0
   538
#include __ATOMIC_32_IMPL__
sl@0
   539
#undef	__DATA_SIZE__
sl@0
   540