os/kernelhwsrv/kernel/eka/common/win32/atomic_skeleton.h
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\common\x86\atomic_skeleton.h
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
/**
sl@0
    19
 Read an 8/16/32 bit quantity with acquire semantics
sl@0
    20
 
sl@0
    21
 @param	a	Address of data to be read - must be naturally aligned
sl@0
    22
 @return		The value read
sl@0
    23
*/
sl@0
    24
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_load_acq)(const volatile TAny* /*a*/)
sl@0
    25
	{
sl@0
    26
	_asm mov ecx, [esp+4]
sl@0
    27
	_asm mov __A_REG__, [ecx]
sl@0
    28
#ifdef __BARRIERS_NEEDED__
sl@0
    29
	_asm lock add dword ptr [esp], 0
sl@0
    30
#endif
sl@0
    31
	_asm ret
sl@0
    32
	}
sl@0
    33
sl@0
    34
sl@0
    35
/** Write an 8/16/32 bit quantity with release semantics
sl@0
    36
sl@0
    37
	@param	a	Address of data to be written - must be naturally aligned
sl@0
    38
	@param	v	The value to be written
sl@0
    39
	@return		The value written
sl@0
    40
*/
sl@0
    41
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_store_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
    42
	{
sl@0
    43
	_asm mov ecx, [esp+4]
sl@0
    44
	_asm mov __D_REG__, [esp+8]
sl@0
    45
	_asm mov __A_REG__, __D_REG__
sl@0
    46
	_asm __LOCK__ xchg [ecx], __D_REG__
sl@0
    47
	_asm ret
sl@0
    48
	}
sl@0
    49
sl@0
    50
sl@0
    51
/** Write an 8/16/32 bit quantity with full barrier semantics
sl@0
    52
sl@0
    53
	@param	a	Address of data to be written - must be naturally aligned
sl@0
    54
	@param	v	The value to be written
sl@0
    55
	@return		The value written
sl@0
    56
*/
sl@0
    57
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_store_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
    58
	{
sl@0
    59
	_asm jmp __fname__(__e32_atomic_store_rel)
sl@0
    60
	}
sl@0
    61
sl@0
    62
sl@0
    63
/** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
sl@0
    64
	Relaxed ordering.
sl@0
    65
sl@0
    66
	@param	a	Address of data to be written - must be naturally aligned
sl@0
    67
	@param	v	The value to be written
sl@0
    68
	@return		The original value of *a
sl@0
    69
*/
sl@0
    70
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_swp_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
    71
	{
sl@0
    72
	_asm jmp __fname__(__e32_atomic_swp_ord)
sl@0
    73
	}
sl@0
    74
sl@0
    75
sl@0
    76
/** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
sl@0
    77
	Acquire semantics.
sl@0
    78
sl@0
    79
	@param	a	Address of data to be written - must be naturally aligned
sl@0
    80
	@param	v	The value to be written
sl@0
    81
	@return		The original value of *a
sl@0
    82
*/
sl@0
    83
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_swp_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
    84
	{
sl@0
    85
	_asm jmp __fname__(__e32_atomic_swp_ord)
sl@0
    86
	}
sl@0
    87
sl@0
    88
sl@0
    89
/** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
sl@0
    90
	Release semantics.
sl@0
    91
sl@0
    92
	@param	a	Address of data to be written - must be naturally aligned
sl@0
    93
	@param	v	The value to be written
sl@0
    94
	@return		The original value of *a
sl@0
    95
*/
sl@0
    96
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_swp_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
    97
	{
sl@0
    98
	_asm jmp __fname__(__e32_atomic_swp_ord)
sl@0
    99
	}
sl@0
   100
sl@0
   101
sl@0
   102
/** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
sl@0
   103
	Full barrier semantics.
sl@0
   104
sl@0
   105
	@param	a	Address of data to be written - must be naturally aligned
sl@0
   106
	@param	v	The value to be written
sl@0
   107
	@return		The original value of *a
sl@0
   108
*/
sl@0
   109
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_swp_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   110
	{
sl@0
   111
	_asm mov ecx, [esp+4]
sl@0
   112
	_asm mov __A_REG__, [esp+8]
sl@0
   113
	_asm __LOCK__ xchg [ecx], __A_REG__
sl@0
   114
	_asm ret
sl@0
   115
	}
sl@0
   116
sl@0
   117
sl@0
   118
/** 8/16/32 bit compare and swap, relaxed ordering.
sl@0
   119
sl@0
   120
	Atomically performs the following operation:
sl@0
   121
		if (*a == *q)	{ *a = v; return TRUE; }
sl@0
   122
		else			{ *q = *a; return FALSE; }
sl@0
   123
sl@0
   124
	@param	a	Address of data to be written - must be naturally aligned
sl@0
   125
	@param	q	Address of location containing expected value
sl@0
   126
	@param	v	The new value to be written if the old value is as expected
sl@0
   127
	@return		TRUE if *a was updated, FALSE otherwise
sl@0
   128
*/
sl@0
   129
EXPORT_C __NAKED__ TBool		__fname__(__e32_atomic_cas_rlx)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
sl@0
   130
	{
sl@0
   131
	_asm jmp __fname__(__e32_atomic_cas_ord)
sl@0
   132
	}
sl@0
   133
sl@0
   134
sl@0
   135
/** 8/16/32 bit compare and swap, acquire semantics.
sl@0
   136
sl@0
   137
	Atomically performs the following operation:
sl@0
   138
		if (*a == *q)	{ *a = v; return TRUE; }
sl@0
   139
		else			{ *q = *a; return FALSE; }
sl@0
   140
sl@0
   141
	@param	a	Address of data to be written - must be naturally aligned
sl@0
   142
	@param	q	Address of location containing expected value
sl@0
   143
	@param	v	The new value to be written if the old value is as expected
sl@0
   144
	@return		TRUE if *a was updated, FALSE otherwise
sl@0
   145
*/
sl@0
   146
EXPORT_C __NAKED__ TBool		__fname__(__e32_atomic_cas_acq)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
sl@0
   147
	{
sl@0
   148
	_asm jmp __fname__(__e32_atomic_cas_ord)
sl@0
   149
	}
sl@0
   150
sl@0
   151
sl@0
   152
/** 8/16/32 bit compare and swap, release semantics.
sl@0
   153
sl@0
   154
	Atomically performs the following operation:
sl@0
   155
		if (*a == *q)	{ *a = v; return TRUE; }
sl@0
   156
		else			{ *q = *a; return FALSE; }
sl@0
   157
sl@0
   158
	@param	a	Address of data to be written - must be naturally aligned
sl@0
   159
	@param	q	Address of location containing expected value
sl@0
   160
	@param	v	The new value to be written if the old value is as expected
sl@0
   161
	@return		TRUE if *a was updated, FALSE otherwise
sl@0
   162
*/
sl@0
   163
EXPORT_C __NAKED__ TBool		__fname__(__e32_atomic_cas_rel)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
sl@0
   164
	{
sl@0
   165
	_asm jmp __fname__(__e32_atomic_cas_ord)
sl@0
   166
	}
sl@0
   167
sl@0
   168
sl@0
   169
/** 8/16/32 bit compare and swap, full barrier semantics.
sl@0
   170
sl@0
   171
	Atomically performs the following operation:
sl@0
   172
		if (*a == *q)	{ *a = v; return TRUE; }
sl@0
   173
		else			{ *q = *a; return FALSE; }
sl@0
   174
sl@0
   175
	@param	a	Address of data to be written - must be naturally aligned
sl@0
   176
	@param	q	Address of location containing expected value
sl@0
   177
	@param	v	The new value to be written if the old value is as expected
sl@0
   178
	@return		TRUE if *a was updated, FALSE otherwise
sl@0
   179
*/
sl@0
   180
EXPORT_C __NAKED__ TBool		__fname__(__e32_atomic_cas_ord)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
sl@0
   181
	{
sl@0
   182
	_asm mov ecx, [esp+4]
sl@0
   183
	_asm mov eax, [esp+8]
sl@0
   184
	_asm mov __D_REG__, [esp+12]
sl@0
   185
	_asm mov __A_REG__, [eax]
sl@0
   186
	_asm __LOCK__ cmpxchg [ecx], __D_REG__
sl@0
   187
	_asm jne short cas_fail
sl@0
   188
	_asm mov eax, 1
sl@0
   189
	_asm ret
sl@0
   190
	_asm cas_fail:
sl@0
   191
	_asm mov edx, [esp+8]
sl@0
   192
	_asm mov [edx], __A_REG__
sl@0
   193
	_asm xor eax, eax
sl@0
   194
	_asm ret
sl@0
   195
	}
sl@0
   196
sl@0
   197
sl@0
   198
/** 8/16/32 bit atomic add, relaxed ordering.
sl@0
   199
sl@0
   200
	Atomically performs the following operation:
sl@0
   201
		oldv = *a; *a = oldv + v; return oldv;
sl@0
   202
sl@0
   203
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   204
	@param	v	The value to be added
sl@0
   205
	@return		The original value of *a
sl@0
   206
*/
sl@0
   207
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_add_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   208
	{
sl@0
   209
	_asm jmp __fname__(__e32_atomic_add_ord)
sl@0
   210
	}
sl@0
   211
sl@0
   212
sl@0
   213
/** 8/16/32 bit atomic add, acquire semantics.
sl@0
   214
sl@0
   215
	Atomically performs the following operation:
sl@0
   216
		oldv = *a; *a = oldv + v; return oldv;
sl@0
   217
sl@0
   218
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   219
	@param	v	The value to be added
sl@0
   220
	@return		The original value of *a
sl@0
   221
*/
sl@0
   222
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_add_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   223
	{
sl@0
   224
	_asm jmp __fname__(__e32_atomic_add_ord)
sl@0
   225
	}
sl@0
   226
sl@0
   227
sl@0
   228
/** 8/16/32 bit atomic add, release semantics.
sl@0
   229
sl@0
   230
	Atomically performs the following operation:
sl@0
   231
		oldv = *a; *a = oldv + v; return oldv;
sl@0
   232
sl@0
   233
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   234
	@param	v	The value to be added
sl@0
   235
	@return		The original value of *a
sl@0
   236
*/
sl@0
   237
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_add_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   238
	{
sl@0
   239
	_asm jmp __fname__(__e32_atomic_add_ord)
sl@0
   240
	}
sl@0
   241
sl@0
   242
sl@0
   243
/** 8/16/32 bit atomic add, full barrier semantics.
sl@0
   244
sl@0
   245
	Atomically performs the following operation:
sl@0
   246
		oldv = *a; *a = oldv + v; return oldv;
sl@0
   247
sl@0
   248
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   249
	@param	v	The value to be added
sl@0
   250
	@return		The original value of *a
sl@0
   251
*/
sl@0
   252
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_add_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   253
	{
sl@0
   254
	_asm mov ecx, [esp+4]
sl@0
   255
	_asm mov __A_REG__, [esp+8]
sl@0
   256
	_asm __LOCK__ xadd [ecx], __A_REG__
sl@0
   257
	_asm ret
sl@0
   258
	}
sl@0
   259
sl@0
   260
sl@0
   261
/** 8/16/32 bit atomic bitwise logical AND, relaxed ordering.
sl@0
   262
sl@0
   263
	Atomically performs the following operation:
sl@0
   264
		oldv = *a; *a = oldv & v; return oldv;
sl@0
   265
sl@0
   266
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   267
	@param	v	The value to be ANDed with *a
sl@0
   268
	@return		The original value of *a
sl@0
   269
*/
sl@0
   270
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_and_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   271
	{
sl@0
   272
	_asm jmp __fname__(__e32_atomic_and_ord)
sl@0
   273
	}
sl@0
   274
sl@0
   275
sl@0
   276
/** 8/16/32 bit atomic bitwise logical AND, acquire semantics.
sl@0
   277
sl@0
   278
	Atomically performs the following operation:
sl@0
   279
		oldv = *a; *a = oldv & v; return oldv;
sl@0
   280
sl@0
   281
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   282
	@param	v	The value to be ANDed with *a
sl@0
   283
	@return		The original value of *a
sl@0
   284
*/
sl@0
   285
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_and_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   286
	{
sl@0
   287
	_asm jmp __fname__(__e32_atomic_and_ord)
sl@0
   288
	}
sl@0
   289
sl@0
   290
sl@0
   291
/** 8/16/32 bit atomic bitwise logical AND, release semantics.
sl@0
   292
sl@0
   293
	Atomically performs the following operation:
sl@0
   294
		oldv = *a; *a = oldv & v; return oldv;
sl@0
   295
sl@0
   296
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   297
	@param	v	The value to be ANDed with *a
sl@0
   298
	@return		The original value of *a
sl@0
   299
*/
sl@0
   300
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_and_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   301
	{
sl@0
   302
	_asm jmp __fname__(__e32_atomic_and_ord)
sl@0
   303
	}
sl@0
   304
sl@0
   305
sl@0
   306
/** 8/16/32 bit atomic bitwise logical AND, full barrier semantics.
sl@0
   307
sl@0
   308
	Atomically performs the following operation:
sl@0
   309
		oldv = *a; *a = oldv & v; return oldv;
sl@0
   310
sl@0
   311
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   312
	@param	v	The value to be ANDed with *a
sl@0
   313
	@return		The original value of *a
sl@0
   314
*/
sl@0
   315
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_and_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   316
	{
sl@0
   317
	_asm mov ecx, [esp+4]
sl@0
   318
	_asm mov __A_REG__, [ecx]
sl@0
   319
	_asm retry:
sl@0
   320
	_asm mov __D_REG__, [esp+8]
sl@0
   321
	_asm and __D_REG__, __A_REG__
sl@0
   322
	_asm __LOCK__ cmpxchg [ecx], __D_REG__
sl@0
   323
	_asm jne short retry
sl@0
   324
	_asm ret
sl@0
   325
	}
sl@0
   326
sl@0
   327
sl@0
   328
/** 8/16/32 bit atomic bitwise logical inclusive OR, relaxed ordering.
sl@0
   329
sl@0
   330
	Atomically performs the following operation:
sl@0
   331
		oldv = *a; *a = oldv | v; return oldv;
sl@0
   332
sl@0
   333
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   334
	@param	v	The value to be ORed with *a
sl@0
   335
	@return		The original value of *a
sl@0
   336
*/
sl@0
   337
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_ior_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   338
	{
sl@0
   339
	_asm jmp __fname__(__e32_atomic_ior_ord)
sl@0
   340
	}
sl@0
   341
sl@0
   342
sl@0
   343
/** 8/16/32 bit atomic bitwise logical inclusive OR, acquire semantics.
sl@0
   344
sl@0
   345
	Atomically performs the following operation:
sl@0
   346
		oldv = *a; *a = oldv | v; return oldv;
sl@0
   347
sl@0
   348
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   349
	@param	v	The value to be ORed with *a
sl@0
   350
	@return		The original value of *a
sl@0
   351
*/
sl@0
   352
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_ior_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   353
	{
sl@0
   354
	_asm jmp __fname__(__e32_atomic_ior_ord)
sl@0
   355
	}
sl@0
   356
sl@0
   357
sl@0
   358
/** 8/16/32 bit atomic bitwise logical inclusive OR, release semantics.
sl@0
   359
sl@0
   360
	Atomically performs the following operation:
sl@0
   361
		oldv = *a; *a = oldv | v; return oldv;
sl@0
   362
sl@0
   363
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   364
	@param	v	The value to be ORed with *a
sl@0
   365
	@return		The original value of *a
sl@0
   366
*/
sl@0
   367
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_ior_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   368
	{
sl@0
   369
	_asm jmp __fname__(__e32_atomic_ior_ord)
sl@0
   370
	}
sl@0
   371
sl@0
   372
sl@0
   373
/** 8/16/32 bit atomic bitwise logical inclusive OR, full barrier semantics.
sl@0
   374
sl@0
   375
	Atomically performs the following operation:
sl@0
   376
		oldv = *a; *a = oldv | v; return oldv;
sl@0
   377
sl@0
   378
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   379
	@param	v	The value to be ORed with *a
sl@0
   380
	@return		The original value of *a
sl@0
   381
*/
sl@0
   382
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_ior_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   383
	{
sl@0
   384
	_asm mov ecx, [esp+4]
sl@0
   385
	_asm mov __A_REG__, [ecx]
sl@0
   386
	_asm retry:
sl@0
   387
	_asm mov __D_REG__, [esp+8]
sl@0
   388
	_asm or __D_REG__, __A_REG__
sl@0
   389
	_asm __LOCK__ cmpxchg [ecx], __D_REG__
sl@0
   390
	_asm jne short retry
sl@0
   391
	_asm ret
sl@0
   392
	}
sl@0
   393
sl@0
   394
sl@0
   395
/** 8/16/32 bit atomic bitwise logical exclusive OR, relaxed ordering.
sl@0
   396
sl@0
   397
	Atomically performs the following operation:
sl@0
   398
		oldv = *a; *a = oldv ^ v; return oldv;
sl@0
   399
sl@0
   400
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   401
	@param	v	The value to be XORed with *a
sl@0
   402
	@return		The original value of *a
sl@0
   403
*/
sl@0
   404
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_xor_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   405
	{
sl@0
   406
	_asm jmp __fname__(__e32_atomic_xor_ord)
sl@0
   407
	}
sl@0
   408
sl@0
   409
sl@0
   410
/** 8/16/32 bit atomic bitwise logical exclusive OR, acquire semantics.
sl@0
   411
sl@0
   412
	Atomically performs the following operation:
sl@0
   413
		oldv = *a; *a = oldv ^ v; return oldv;
sl@0
   414
sl@0
   415
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   416
	@param	v	The value to be XORed with *a
sl@0
   417
	@return		The original value of *a
sl@0
   418
*/
sl@0
   419
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_xor_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   420
	{
sl@0
   421
	_asm jmp __fname__(__e32_atomic_xor_ord)
sl@0
   422
	}
sl@0
   423
sl@0
   424
sl@0
   425
/** 8/16/32 bit atomic bitwise logical exclusive OR, release semantics.
sl@0
   426
sl@0
   427
	Atomically performs the following operation:
sl@0
   428
		oldv = *a; *a = oldv ^ v; return oldv;
sl@0
   429
sl@0
   430
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   431
	@param	v	The value to be XORed with *a
sl@0
   432
	@return		The original value of *a
sl@0
   433
*/
sl@0
   434
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_xor_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   435
	{
sl@0
   436
	_asm jmp __fname__(__e32_atomic_xor_ord)
sl@0
   437
	}
sl@0
   438
sl@0
   439
sl@0
   440
/** 8/16/32 bit atomic bitwise logical exclusive OR, full barrier semantics.
sl@0
   441
sl@0
   442
	Atomically performs the following operation:
sl@0
   443
		oldv = *a; *a = oldv ^ v; return oldv;
sl@0
   444
sl@0
   445
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   446
	@param	v	The value to be XORed with *a
sl@0
   447
	@return		The original value of *a
sl@0
   448
*/
sl@0
   449
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_xor_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
sl@0
   450
	{
sl@0
   451
	_asm mov ecx, [esp+4]
sl@0
   452
	_asm mov __A_REG__, [ecx]
sl@0
   453
	_asm retry:
sl@0
   454
	_asm mov __D_REG__, [esp+8]
sl@0
   455
	_asm xor __D_REG__, __A_REG__
sl@0
   456
	_asm __LOCK__ cmpxchg [ecx], __D_REG__
sl@0
   457
	_asm jne short retry
sl@0
   458
	_asm ret
sl@0
   459
	}
sl@0
   460
sl@0
   461
sl@0
   462
/** 8/16/32 bit atomic bitwise universal function, relaxed ordering.
sl@0
   463
sl@0
   464
	Atomically performs the following operation:
sl@0
   465
		oldv = *a; *a = (oldv & u) ^ v; return oldv;
sl@0
   466
sl@0
   467
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   468
	@param	u	The value to be ANDed with *a
sl@0
   469
	@param	v	The value to be XORed with (*a&u)
sl@0
   470
	@return		The original value of *a
sl@0
   471
*/
sl@0
   472
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_axo_rlx)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
sl@0
   473
	{
sl@0
   474
	_asm jmp __fname__(__e32_atomic_axo_ord)
sl@0
   475
	}
sl@0
   476
sl@0
   477
sl@0
   478
/** 8/16/32 bit atomic bitwise universal function, acquire semantics.
sl@0
   479
sl@0
   480
	Atomically performs the following operation:
sl@0
   481
		oldv = *a; *a = (oldv & u) ^ v; return oldv;
sl@0
   482
sl@0
   483
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   484
	@param	u	The value to be ANDed with *a
sl@0
   485
	@param	v	The value to be XORed with (*a&u)
sl@0
   486
	@return		The original value of *a
sl@0
   487
*/
sl@0
   488
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_axo_acq)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
sl@0
   489
	{
sl@0
   490
	_asm jmp __fname__(__e32_atomic_axo_ord)
sl@0
   491
	}
sl@0
   492
sl@0
   493
sl@0
   494
/** 8/16/32 bit atomic bitwise universal function, release semantics.
sl@0
   495
sl@0
   496
	Atomically performs the following operation:
sl@0
   497
		oldv = *a; *a = (oldv & u) ^ v; return oldv;
sl@0
   498
sl@0
   499
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   500
	@param	u	The value to be ANDed with *a
sl@0
   501
	@param	v	The value to be XORed with (*a&u)
sl@0
   502
	@return		The original value of *a
sl@0
   503
*/
sl@0
   504
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_axo_rel)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
sl@0
   505
	{
sl@0
   506
	_asm jmp __fname__(__e32_atomic_axo_ord)
sl@0
   507
	}
sl@0
   508
sl@0
   509
sl@0
   510
/** 8/16/32 bit atomic bitwise universal function, full barrier semantics.
sl@0
   511
sl@0
   512
	Atomically performs the following operation:
sl@0
   513
		oldv = *a; *a = (oldv & u) ^ v; return oldv;
sl@0
   514
sl@0
   515
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   516
	@param	u	The value to be ANDed with *a
sl@0
   517
	@param	v	The value to be XORed with (*a&u)
sl@0
   518
	@return		The original value of *a
sl@0
   519
*/
sl@0
   520
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_axo_ord)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
sl@0
   521
	{
sl@0
   522
	_asm mov ecx, [esp+4]
sl@0
   523
	_asm mov __A_REG__, [ecx]
sl@0
   524
	_asm retry:
sl@0
   525
	_asm mov __D_REG__, [esp+8]
sl@0
   526
	_asm and __D_REG__, __A_REG__
sl@0
   527
	_asm xor __D_REG__, [esp+12]
sl@0
   528
	_asm __LOCK__ cmpxchg [ecx], __D_REG__
sl@0
   529
	_asm jne short retry
sl@0
   530
	_asm ret
sl@0
   531
	}
sl@0
   532
sl@0
   533
sl@0
   534
/** 8/16/32 bit threshold and add, unsigned, relaxed ordering.
sl@0
   535
sl@0
   536
	Atomically performs the following operation:
sl@0
   537
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
sl@0
   538
sl@0
   539
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   540
	@param	t	The threshold to compare *a to (unsigned compare)
sl@0
   541
	@param	u	The value to be added to *a if it is originally >= t
sl@0
   542
	@param	u	The value to be added to *a if it is originally < t
sl@0
   543
	@return		The original value of *a
sl@0
   544
*/
sl@0
   545
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_tau_rlx)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
sl@0
   546
	{
sl@0
   547
	_asm jmp __fname__(__e32_atomic_tau_ord)
sl@0
   548
	}
sl@0
   549
sl@0
   550
sl@0
   551
/** 8/16/32 bit threshold and add, unsigned, acquire semantics.
sl@0
   552
sl@0
   553
	Atomically performs the following operation:
sl@0
   554
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
sl@0
   555
sl@0
   556
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   557
	@param	t	The threshold to compare *a to (unsigned compare)
sl@0
   558
	@param	u	The value to be added to *a if it is originally >= t
sl@0
   559
	@param	u	The value to be added to *a if it is originally < t
sl@0
   560
	@return		The original value of *a
sl@0
   561
*/
sl@0
   562
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_tau_acq)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
sl@0
   563
	{
sl@0
   564
	_asm jmp __fname__(__e32_atomic_tau_ord)
sl@0
   565
	}
sl@0
   566
sl@0
   567
sl@0
   568
/** 8/16/32 bit threshold and add, unsigned, release semantics.
sl@0
   569
sl@0
   570
	Atomically performs the following operation:
sl@0
   571
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
sl@0
   572
sl@0
   573
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   574
	@param	t	The threshold to compare *a to (unsigned compare)
sl@0
   575
	@param	u	The value to be added to *a if it is originally >= t
sl@0
   576
	@param	u	The value to be added to *a if it is originally < t
sl@0
   577
	@return		The original value of *a
sl@0
   578
*/
sl@0
   579
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_tau_rel)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
sl@0
   580
	{
sl@0
   581
	_asm jmp __fname__(__e32_atomic_tau_ord)
sl@0
   582
	}
sl@0
   583
sl@0
   584
sl@0
   585
/** 8/16/32 bit threshold and add, unsigned, full barrier semantics.
sl@0
   586
sl@0
   587
	Atomically performs the following operation:
sl@0
   588
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
sl@0
   589
sl@0
   590
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   591
	@param	t	The threshold to compare *a to (unsigned compare)
sl@0
   592
	@param	u	The value to be added to *a if it is originally >= t
sl@0
   593
	@param	u	The value to be added to *a if it is originally < t
sl@0
   594
	@return		The original value of *a
sl@0
   595
*/
sl@0
   596
EXPORT_C __NAKED__ __TUintX__	__fname__(__e32_atomic_tau_ord)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
sl@0
   597
	{
sl@0
   598
	_asm mov ecx, [esp+4]
sl@0
   599
	_asm mov __A_REG__, [ecx]
sl@0
   600
	_asm retry:
sl@0
   601
	_asm mov __D_REG__, [esp+12]
sl@0
   602
	_asm cmp __A_REG__, [esp+8]
sl@0
   603
	_asm jae short use_u
sl@0
   604
	_asm mov __D_REG__, [esp+16]
sl@0
   605
	_asm use_u:
sl@0
   606
	_asm add __D_REG__, __A_REG__
sl@0
   607
	_asm __LOCK__ cmpxchg [ecx], __D_REG__
sl@0
   608
	_asm jne short retry
sl@0
   609
	_asm ret
sl@0
   610
	}
sl@0
   611
sl@0
   612
sl@0
   613
/** 8/16/32 bit threshold and add, signed, relaxed ordering.
sl@0
   614
sl@0
   615
	Atomically performs the following operation:
sl@0
   616
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
sl@0
   617
sl@0
   618
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   619
	@param	t	The threshold to compare *a to (signed compare)
sl@0
   620
	@param	u	The value to be added to *a if it is originally >= t
sl@0
   621
	@param	u	The value to be added to *a if it is originally < t
sl@0
   622
	@return		The original value of *a
sl@0
   623
*/
sl@0
   624
EXPORT_C __NAKED__ __TIntX__	__fname__(__e32_atomic_tas_rlx)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
sl@0
   625
	{
sl@0
   626
	_asm jmp __fname__(__e32_atomic_tas_ord)
sl@0
   627
	}
sl@0
   628
sl@0
   629
sl@0
   630
/** 8/16/32 bit threshold and add, signed, acquire semantics.
sl@0
   631
sl@0
   632
	Atomically performs the following operation:
sl@0
   633
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
sl@0
   634
sl@0
   635
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   636
	@param	t	The threshold to compare *a to (signed compare)
sl@0
   637
	@param	u	The value to be added to *a if it is originally >= t
sl@0
   638
	@param	u	The value to be added to *a if it is originally < t
sl@0
   639
	@return		The original value of *a
sl@0
   640
*/
sl@0
   641
EXPORT_C __NAKED__ __TIntX__	__fname__(__e32_atomic_tas_acq)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
sl@0
   642
	{
sl@0
   643
	_asm jmp __fname__(__e32_atomic_tas_ord)
sl@0
   644
	}
sl@0
   645
sl@0
   646
sl@0
   647
/** 8/16/32 bit threshold and add, signed, release semantics.
sl@0
   648
sl@0
   649
	Atomically performs the following operation:
sl@0
   650
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
sl@0
   651
sl@0
   652
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   653
	@param	t	The threshold to compare *a to (signed compare)
sl@0
   654
	@param	u	The value to be added to *a if it is originally >= t
sl@0
   655
	@param	u	The value to be added to *a if it is originally < t
sl@0
   656
	@return		The original value of *a
sl@0
   657
*/
sl@0
   658
EXPORT_C __NAKED__ __TIntX__	__fname__(__e32_atomic_tas_rel)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
sl@0
   659
	{
sl@0
   660
	_asm jmp __fname__(__e32_atomic_tas_ord)
sl@0
   661
	}
sl@0
   662
sl@0
   663
sl@0
   664
/** 8/16/32 bit threshold and add, signed, full barrier semantics.
sl@0
   665
sl@0
   666
	Atomically performs the following operation:
sl@0
   667
		oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
sl@0
   668
sl@0
   669
	@param	a	Address of data to be updated - must be naturally aligned
sl@0
   670
	@param	t	The threshold to compare *a to (signed compare)
sl@0
   671
	@param	u	The value to be added to *a if it is originally >= t
sl@0
   672
	@param	u	The value to be added to *a if it is originally < t
sl@0
   673
	@return		The original value of *a
sl@0
   674
*/
sl@0
   675
EXPORT_C __NAKED__ __TIntX__	__fname__(__e32_atomic_tas_ord)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
sl@0
   676
	{
sl@0
   677
	_asm mov ecx, [esp+4]
sl@0
   678
	_asm mov __A_REG__, [ecx]
sl@0
   679
	_asm retry:
sl@0
   680
	_asm mov __D_REG__, [esp+12]
sl@0
   681
	_asm cmp __A_REG__, [esp+8]
sl@0
   682
	_asm jge short use_u
sl@0
   683
	_asm mov __D_REG__, [esp+16]
sl@0
   684
	_asm use_u:
sl@0
   685
	_asm add __D_REG__, __A_REG__
sl@0
   686
	_asm __LOCK__ cmpxchg [ecx], __D_REG__
sl@0
   687
	_asm jne short retry
sl@0
   688
	_asm ret
sl@0
   689
	}
sl@0
   690
sl@0
   691