epoc32/include/e32atomics.h
author William Roberts <williamr@symbian.org>
Wed, 31 Mar 2010 12:33:34 +0100
branchSymbian3
changeset 4 837f303aceeb
permissions -rw-r--r--
Current Symbian^3 public API header files (from PDK 3.0.h)
This is the epoc32/include tree with the "platform" subtrees removed, and
all but a selected few mbg and rsg files removed.
williamr@4
     1
/*
williamr@4
     2
* Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
williamr@4
     3
* All rights reserved.
williamr@4
     4
* This component and the accompanying materials are made available
williamr@4
     5
* under the terms of the License "Eclipse Public License v1.0"
williamr@4
     6
* which accompanies this distribution, and is available
williamr@4
     7
* at the URL "http://www.eclipse.org/legal/epl-v10.html".
williamr@4
     8
*
williamr@4
     9
* Initial Contributors:
williamr@4
    10
* Nokia Corporation - initial contribution.
williamr@4
    11
*
williamr@4
    12
* Contributors:
williamr@4
    13
*
williamr@4
    14
* Description:
williamr@4
    15
* e32/include/e32atomics.h
williamr@4
    16
* 
williamr@4
    17
*
williamr@4
    18
*/
williamr@4
    19
williamr@4
    20
williamr@4
    21
williamr@4
    22
#ifndef __E32ATOMICS_H__
williamr@4
    23
#define __E32ATOMICS_H__
williamr@4
    24
#include <e32def.h>
williamr@4
    25
williamr@4
    26
/**	@file e32atomics.h
williamr@4
    27
	@publishedAll
williamr@4
    28
	@prototype
williamr@4
    29
*/
williamr@4
    30
williamr@4
    31
williamr@4
    32
/*
williamr@4
    33
Versions needed:
williamr@4
    34
	WINS/WINSCW		Use X86 locked operations. Assume Pentium or above CPU (CMPXCHG8B available)
williamr@4
    35
	X86				For Pentium and above use locked operations
williamr@4
    36
					For 486 use locked operations for 8, 16, 32 bit. For 64 bit must disable interrupts.
williamr@4
    37
					NOTE: 486 not supported at the moment
williamr@4
    38
	ARMv4/ARMv5		Must disable interrupts.
williamr@4
    39
	ARMv6			LDREX/STREX for 8, 16, 32 bit. For 64 bit must disable interrupts (maybe).
williamr@4
    40
	ARMv6K/ARMv7	LDREXB/LDREXH/LDREX/LDREXD
williamr@4
    41
*/
williamr@4
    42
williamr@4
    43
#ifdef __cplusplus
williamr@4
    44
extern "C" {
williamr@4
    45
#endif
williamr@4
    46
williamr@4
    47
IMPORT_C void		__e32_memory_barrier();												/* Barrier guaranteeing ordering of memory accesses */
williamr@4
    48
IMPORT_C void		__e32_io_completion_barrier();										/* Barrier guaranteeing ordering and completion of memory accesses */
williamr@4
    49
williamr@4
    50
/* Atomic operations on 8 bit quantities */
williamr@4
    51
IMPORT_C TUint8		__e32_atomic_load_acq8(const volatile TAny* a);						/* read 8 bit acquire semantics */
williamr@4
    52
IMPORT_C TUint8		__e32_atomic_store_rel8(volatile TAny* a, TUint8 v);				/* write 8 bit, return v, release semantics */
williamr@4
    53
IMPORT_C TUint8		__e32_atomic_store_ord8(volatile TAny* a, TUint8 v);				/* write 8 bit, return v, full fence */
williamr@4
    54
IMPORT_C TUint8		__e32_atomic_swp_rlx8(volatile TAny* a, TUint8 v);					/* write 8 bit, return original, relaxed */
williamr@4
    55
IMPORT_C TUint8		__e32_atomic_swp_acq8(volatile TAny* a, TUint8 v);					/* write 8 bit, return original, acquire */
williamr@4
    56
IMPORT_C TUint8		__e32_atomic_swp_rel8(volatile TAny* a, TUint8 v);					/* write 8 bit, return original, release */
williamr@4
    57
IMPORT_C TUint8		__e32_atomic_swp_ord8(volatile TAny* a, TUint8 v);					/* write 8 bit, return original, full fence */
williamr@4
    58
IMPORT_C TBool		__e32_atomic_cas_rlx8(volatile TAny* a, TUint8* q, TUint8 v);		/* if (*a==*q) {*a=v; return TRUE;} else {*q=*a; return FALSE;} */
williamr@4
    59
IMPORT_C TBool		__e32_atomic_cas_acq8(volatile TAny* a, TUint8* q, TUint8 v);
williamr@4
    60
IMPORT_C TBool		__e32_atomic_cas_rel8(volatile TAny* a, TUint8* q, TUint8 v);
williamr@4
    61
IMPORT_C TBool		__e32_atomic_cas_ord8(volatile TAny* a, TUint8* q, TUint8 v);
williamr@4
    62
IMPORT_C TUint8		__e32_atomic_add_rlx8(volatile TAny* a, TUint8 v);					/* *a += v; return original *a; */
williamr@4
    63
IMPORT_C TUint8		__e32_atomic_add_acq8(volatile TAny* a, TUint8 v);
williamr@4
    64
IMPORT_C TUint8		__e32_atomic_add_rel8(volatile TAny* a, TUint8 v);
williamr@4
    65
IMPORT_C TUint8		__e32_atomic_add_ord8(volatile TAny* a, TUint8 v);
williamr@4
    66
IMPORT_C TUint8		__e32_atomic_and_rlx8(volatile TAny* a, TUint8 v);					/* *a &= v; return original *a; */
williamr@4
    67
IMPORT_C TUint8		__e32_atomic_and_acq8(volatile TAny* a, TUint8 v);
williamr@4
    68
IMPORT_C TUint8		__e32_atomic_and_rel8(volatile TAny* a, TUint8 v);
williamr@4
    69
IMPORT_C TUint8		__e32_atomic_and_ord8(volatile TAny* a, TUint8 v);
williamr@4
    70
IMPORT_C TUint8		__e32_atomic_ior_rlx8(volatile TAny* a, TUint8 v);					/* *a |= v; return original *a; */
williamr@4
    71
IMPORT_C TUint8		__e32_atomic_ior_acq8(volatile TAny* a, TUint8 v);
williamr@4
    72
IMPORT_C TUint8		__e32_atomic_ior_rel8(volatile TAny* a, TUint8 v);
williamr@4
    73
IMPORT_C TUint8		__e32_atomic_ior_ord8(volatile TAny* a, TUint8 v);
williamr@4
    74
IMPORT_C TUint8		__e32_atomic_xor_rlx8(volatile TAny* a, TUint8 v);					/* *a ^= v; return original *a; */
williamr@4
    75
IMPORT_C TUint8		__e32_atomic_xor_acq8(volatile TAny* a, TUint8 v);
williamr@4
    76
IMPORT_C TUint8		__e32_atomic_xor_rel8(volatile TAny* a, TUint8 v);
williamr@4
    77
IMPORT_C TUint8		__e32_atomic_xor_ord8(volatile TAny* a, TUint8 v);
williamr@4
    78
IMPORT_C TUint8		__e32_atomic_axo_rlx8(volatile TAny* a, TUint8 u, TUint8 v);		/* *a = (*a & u) ^ v; return original *a; */
williamr@4
    79
IMPORT_C TUint8		__e32_atomic_axo_acq8(volatile TAny* a, TUint8 u, TUint8 v);
williamr@4
    80
IMPORT_C TUint8		__e32_atomic_axo_rel8(volatile TAny* a, TUint8 u, TUint8 v);
williamr@4
    81
IMPORT_C TUint8		__e32_atomic_axo_ord8(volatile TAny* a, TUint8 u, TUint8 v);
williamr@4
    82
IMPORT_C TUint8		__e32_atomic_tau_rlx8(volatile TAny* a, TUint8 t, TUint8 u, TUint8 v);	/* if (*a>=t) *a+=u else *a+=v; return original *a; */
williamr@4
    83
IMPORT_C TUint8		__e32_atomic_tau_acq8(volatile TAny* a, TUint8 t, TUint8 u, TUint8 v);
williamr@4
    84
IMPORT_C TUint8		__e32_atomic_tau_rel8(volatile TAny* a, TUint8 t, TUint8 u, TUint8 v);
williamr@4
    85
IMPORT_C TUint8		__e32_atomic_tau_ord8(volatile TAny* a, TUint8 t, TUint8 u, TUint8 v);
williamr@4
    86
IMPORT_C TInt8		__e32_atomic_tas_rlx8(volatile TAny* a, TInt8 t, TInt8 u, TInt8 v);	/* if (*a>=t) *a+=u else *a+=v; return original *a; */
williamr@4
    87
IMPORT_C TInt8		__e32_atomic_tas_acq8(volatile TAny* a, TInt8 t, TInt8 u, TInt8 v);
williamr@4
    88
IMPORT_C TInt8		__e32_atomic_tas_rel8(volatile TAny* a, TInt8 t, TInt8 u, TInt8 v);
williamr@4
    89
IMPORT_C TInt8		__e32_atomic_tas_ord8(volatile TAny* a, TInt8 t, TInt8 u, TInt8 v);
williamr@4
    90
williamr@4
    91
/* Atomic operations on 16 bit quantities */
williamr@4
    92
IMPORT_C TUint16	__e32_atomic_load_acq16(const volatile TAny* a);					/* read 16 bit acquire semantics */
williamr@4
    93
IMPORT_C TUint16	__e32_atomic_store_rel16(volatile TAny* a, TUint16 v);				/* write 16 bit, return v, release semantics */
williamr@4
    94
IMPORT_C TUint16	__e32_atomic_store_ord16(volatile TAny* a, TUint16 v);				/* write 16 bit, return v, full fence */
williamr@4
    95
IMPORT_C TUint16	__e32_atomic_swp_rlx16(volatile TAny* a, TUint16 v);				/* write 16 bit, return original, relaxed */
williamr@4
    96
IMPORT_C TUint16	__e32_atomic_swp_acq16(volatile TAny* a, TUint16 v);				/* write 16 bit, return original, acquire */
williamr@4
    97
IMPORT_C TUint16	__e32_atomic_swp_rel16(volatile TAny* a, TUint16 v);				/* write 16 bit, return original, release */
williamr@4
    98
IMPORT_C TUint16	__e32_atomic_swp_ord16(volatile TAny* a, TUint16 v);				/* write 16 bit, return original, full fence */
williamr@4
    99
IMPORT_C TBool		__e32_atomic_cas_rlx16(volatile TAny* a, TUint16* q, TUint16 v);	/* if (*a==*q) {*a=v; return TRUE;} else {*q=*a; return FALSE;} */
williamr@4
   100
IMPORT_C TBool		__e32_atomic_cas_acq16(volatile TAny* a, TUint16* q, TUint16 v);
williamr@4
   101
IMPORT_C TBool		__e32_atomic_cas_rel16(volatile TAny* a, TUint16* q, TUint16 v);
williamr@4
   102
IMPORT_C TBool		__e32_atomic_cas_ord16(volatile TAny* a, TUint16* q, TUint16 v);
williamr@4
   103
IMPORT_C TUint16	__e32_atomic_add_rlx16(volatile TAny* a, TUint16 v);				/* *a += v; return original *a; */
williamr@4
   104
IMPORT_C TUint16	__e32_atomic_add_acq16(volatile TAny* a, TUint16 v);
williamr@4
   105
IMPORT_C TUint16	__e32_atomic_add_rel16(volatile TAny* a, TUint16 v);
williamr@4
   106
IMPORT_C TUint16	__e32_atomic_add_ord16(volatile TAny* a, TUint16 v);
williamr@4
   107
IMPORT_C TUint16	__e32_atomic_and_rlx16(volatile TAny* a, TUint16 v);				/* *a &= v; return original *a; */
williamr@4
   108
IMPORT_C TUint16	__e32_atomic_and_acq16(volatile TAny* a, TUint16 v);
williamr@4
   109
IMPORT_C TUint16	__e32_atomic_and_rel16(volatile TAny* a, TUint16 v);
williamr@4
   110
IMPORT_C TUint16	__e32_atomic_and_ord16(volatile TAny* a, TUint16 v);
williamr@4
   111
IMPORT_C TUint16	__e32_atomic_ior_rlx16(volatile TAny* a, TUint16 v);				/* *a |= v; return original *a; */
williamr@4
   112
IMPORT_C TUint16	__e32_atomic_ior_acq16(volatile TAny* a, TUint16 v);
williamr@4
   113
IMPORT_C TUint16	__e32_atomic_ior_rel16(volatile TAny* a, TUint16 v);
williamr@4
   114
IMPORT_C TUint16	__e32_atomic_ior_ord16(volatile TAny* a, TUint16 v);
williamr@4
   115
IMPORT_C TUint16	__e32_atomic_xor_rlx16(volatile TAny* a, TUint16 v);				/* *a ^= v; return original *a; */
williamr@4
   116
IMPORT_C TUint16	__e32_atomic_xor_acq16(volatile TAny* a, TUint16 v);
williamr@4
   117
IMPORT_C TUint16	__e32_atomic_xor_rel16(volatile TAny* a, TUint16 v);
williamr@4
   118
IMPORT_C TUint16	__e32_atomic_xor_ord16(volatile TAny* a, TUint16 v);
williamr@4
   119
IMPORT_C TUint16	__e32_atomic_axo_rlx16(volatile TAny* a, TUint16 u, TUint16 v);		/* *a = (*a & u) ^ v; return original *a; */
williamr@4
   120
IMPORT_C TUint16	__e32_atomic_axo_acq16(volatile TAny* a, TUint16 u, TUint16 v);
williamr@4
   121
IMPORT_C TUint16	__e32_atomic_axo_rel16(volatile TAny* a, TUint16 u, TUint16 v);
williamr@4
   122
IMPORT_C TUint16	__e32_atomic_axo_ord16(volatile TAny* a, TUint16 u, TUint16 v);
williamr@4
   123
IMPORT_C TUint16	__e32_atomic_tau_rlx16(volatile TAny* a, TUint16 t, TUint16 u, TUint16 v);	/* if (*a>=t) *a+=u else *a+=v; return original *a; */
williamr@4
   124
IMPORT_C TUint16	__e32_atomic_tau_acq16(volatile TAny* a, TUint16 t, TUint16 u, TUint16 v);
williamr@4
   125
IMPORT_C TUint16	__e32_atomic_tau_rel16(volatile TAny* a, TUint16 t, TUint16 u, TUint16 v);
williamr@4
   126
IMPORT_C TUint16	__e32_atomic_tau_ord16(volatile TAny* a, TUint16 t, TUint16 u, TUint16 v);
williamr@4
   127
IMPORT_C TInt16		__e32_atomic_tas_rlx16(volatile TAny* a, TInt16 t, TInt16 u, TInt16 v);	/* if (*a>=t) *a+=u else *a+=v; return original *a; */
williamr@4
   128
IMPORT_C TInt16		__e32_atomic_tas_acq16(volatile TAny* a, TInt16 t, TInt16 u, TInt16 v);
williamr@4
   129
IMPORT_C TInt16		__e32_atomic_tas_rel16(volatile TAny* a, TInt16 t, TInt16 u, TInt16 v);
williamr@4
   130
IMPORT_C TInt16		__e32_atomic_tas_ord16(volatile TAny* a, TInt16 t, TInt16 u, TInt16 v);
williamr@4
   131
williamr@4
   132
/* Atomic operations on 32 bit quantities */
williamr@4
   133
IMPORT_C TUint32	__e32_atomic_load_acq32(const volatile TAny* a);					/* read 32 bit acquire semantics */
williamr@4
   134
IMPORT_C TUint32	__e32_atomic_store_rel32(volatile TAny* a, TUint32 v);				/* write 32 bit, return v, release semantics */
williamr@4
   135
IMPORT_C TUint32	__e32_atomic_store_ord32(volatile TAny* a, TUint32 v);				/* write 32 bit, return v, full fence */
williamr@4
   136
IMPORT_C TUint32	__e32_atomic_swp_rlx32(volatile TAny* a, TUint32 v);				/* write 32 bit, return original, relaxed */
williamr@4
   137
IMPORT_C TUint32	__e32_atomic_swp_acq32(volatile TAny* a, TUint32 v);				/* write 32 bit, return original, acquire */
williamr@4
   138
IMPORT_C TUint32	__e32_atomic_swp_rel32(volatile TAny* a, TUint32 v);				/* write 32 bit, return original, release */
williamr@4
   139
IMPORT_C TUint32	__e32_atomic_swp_ord32(volatile TAny* a, TUint32 v);				/* write 32 bit, return original, full fence */
williamr@4
   140
IMPORT_C TBool		__e32_atomic_cas_rlx32(volatile TAny* a, TUint32* q, TUint32 v);	/* if (*a==*q) {*a=v; return TRUE;} else {*q=*a; return FALSE;} */
williamr@4
   141
IMPORT_C TBool		__e32_atomic_cas_acq32(volatile TAny* a, TUint32* q, TUint32 v);
williamr@4
   142
IMPORT_C TBool		__e32_atomic_cas_rel32(volatile TAny* a, TUint32* q, TUint32 v);
williamr@4
   143
IMPORT_C TBool		__e32_atomic_cas_ord32(volatile TAny* a, TUint32* q, TUint32 v);
williamr@4
   144
IMPORT_C TUint32	__e32_atomic_add_rlx32(volatile TAny* a, TUint32 v);				/* *a += v; return original *a; */
williamr@4
   145
IMPORT_C TUint32	__e32_atomic_add_acq32(volatile TAny* a, TUint32 v);
williamr@4
   146
IMPORT_C TUint32	__e32_atomic_add_rel32(volatile TAny* a, TUint32 v);
williamr@4
   147
IMPORT_C TUint32	__e32_atomic_add_ord32(volatile TAny* a, TUint32 v);
williamr@4
   148
IMPORT_C TUint32	__e32_atomic_and_rlx32(volatile TAny* a, TUint32 v);				/* *a &= v; return original *a; */
williamr@4
   149
IMPORT_C TUint32	__e32_atomic_and_acq32(volatile TAny* a, TUint32 v);
williamr@4
   150
IMPORT_C TUint32	__e32_atomic_and_rel32(volatile TAny* a, TUint32 v);
williamr@4
   151
IMPORT_C TUint32	__e32_atomic_and_ord32(volatile TAny* a, TUint32 v);
williamr@4
   152
IMPORT_C TUint32	__e32_atomic_ior_rlx32(volatile TAny* a, TUint32 v);				/* *a |= v; return original *a; */
williamr@4
   153
IMPORT_C TUint32	__e32_atomic_ior_acq32(volatile TAny* a, TUint32 v);
williamr@4
   154
IMPORT_C TUint32	__e32_atomic_ior_rel32(volatile TAny* a, TUint32 v);
williamr@4
   155
IMPORT_C TUint32	__e32_atomic_ior_ord32(volatile TAny* a, TUint32 v);
williamr@4
   156
IMPORT_C TUint32	__e32_atomic_xor_rlx32(volatile TAny* a, TUint32 v);				/* *a ^= v; return original *a; */
williamr@4
   157
IMPORT_C TUint32	__e32_atomic_xor_acq32(volatile TAny* a, TUint32 v);
williamr@4
   158
IMPORT_C TUint32	__e32_atomic_xor_rel32(volatile TAny* a, TUint32 v);
williamr@4
   159
IMPORT_C TUint32	__e32_atomic_xor_ord32(volatile TAny* a, TUint32 v);
williamr@4
   160
IMPORT_C TUint32	__e32_atomic_axo_rlx32(volatile TAny* a, TUint32 u, TUint32 v);		/* *a = (*a & u) ^ v; return original *a; */
williamr@4
   161
IMPORT_C TUint32	__e32_atomic_axo_acq32(volatile TAny* a, TUint32 u, TUint32 v);
williamr@4
   162
IMPORT_C TUint32	__e32_atomic_axo_rel32(volatile TAny* a, TUint32 u, TUint32 v);
williamr@4
   163
IMPORT_C TUint32	__e32_atomic_axo_ord32(volatile TAny* a, TUint32 u, TUint32 v);
williamr@4
   164
IMPORT_C TUint32	__e32_atomic_tau_rlx32(volatile TAny* a, TUint32 t, TUint32 u, TUint32 v);	/* if (*a>=t) *a+=u else *a+=v; return original *a; */
williamr@4
   165
IMPORT_C TUint32	__e32_atomic_tau_acq32(volatile TAny* a, TUint32 t, TUint32 u, TUint32 v);
williamr@4
   166
IMPORT_C TUint32	__e32_atomic_tau_rel32(volatile TAny* a, TUint32 t, TUint32 u, TUint32 v);
williamr@4
   167
IMPORT_C TUint32	__e32_atomic_tau_ord32(volatile TAny* a, TUint32 t, TUint32 u, TUint32 v);
williamr@4
   168
IMPORT_C TInt32		__e32_atomic_tas_rlx32(volatile TAny* a, TInt32 t, TInt32 u, TInt32 v);	/* if (*a>=t) *a+=u else *a+=v; return original *a; */
williamr@4
   169
IMPORT_C TInt32		__e32_atomic_tas_acq32(volatile TAny* a, TInt32 t, TInt32 u, TInt32 v);
williamr@4
   170
IMPORT_C TInt32		__e32_atomic_tas_rel32(volatile TAny* a, TInt32 t, TInt32 u, TInt32 v);
williamr@4
   171
IMPORT_C TInt32		__e32_atomic_tas_ord32(volatile TAny* a, TInt32 t, TInt32 u, TInt32 v);
williamr@4
   172
williamr@4
   173
/* Atomic operations on 64 bit quantities */
williamr@4
   174
IMPORT_C TUint64	__e32_atomic_load_acq64(const volatile TAny* a);					/* read 64 bit acquire semantics */
williamr@4
   175
IMPORT_C TUint64	__e32_atomic_store_rel64(volatile TAny* a, TUint64 v);				/* write 64 bit, return v, release semantics */
williamr@4
   176
IMPORT_C TUint64	__e32_atomic_store_ord64(volatile TAny* a, TUint64 v);				/* write 64 bit, return v, full fence */
williamr@4
   177
IMPORT_C TUint64	__e32_atomic_swp_rlx64(volatile TAny* a, TUint64 v);				/* write 64 bit, return original, relaxed */
williamr@4
   178
IMPORT_C TUint64	__e32_atomic_swp_acq64(volatile TAny* a, TUint64 v);				/* write 64 bit, return original, acquire */
williamr@4
   179
IMPORT_C TUint64	__e32_atomic_swp_rel64(volatile TAny* a, TUint64 v);				/* write 64 bit, return original, release */
williamr@4
   180
IMPORT_C TUint64	__e32_atomic_swp_ord64(volatile TAny* a, TUint64 v);				/* write 64 bit, return original, full fence */
williamr@4
   181
IMPORT_C TBool		__e32_atomic_cas_rlx64(volatile TAny* a, TUint64* q, TUint64 v);	/* if (*a==*q) {*a=v; return TRUE;} else {*q=*a; return FALSE;} */
williamr@4
   182
IMPORT_C TBool		__e32_atomic_cas_acq64(volatile TAny* a, TUint64* q, TUint64 v);
williamr@4
   183
IMPORT_C TBool		__e32_atomic_cas_rel64(volatile TAny* a, TUint64* q, TUint64 v);
williamr@4
   184
IMPORT_C TBool		__e32_atomic_cas_ord64(volatile TAny* a, TUint64* q, TUint64 v);
williamr@4
   185
IMPORT_C TUint64	__e32_atomic_add_rlx64(volatile TAny* a, TUint64 v);				/* *a += v; return original *a; */
williamr@4
   186
IMPORT_C TUint64	__e32_atomic_add_acq64(volatile TAny* a, TUint64 v);
williamr@4
   187
IMPORT_C TUint64	__e32_atomic_add_rel64(volatile TAny* a, TUint64 v);
williamr@4
   188
IMPORT_C TUint64	__e32_atomic_add_ord64(volatile TAny* a, TUint64 v);
williamr@4
   189
IMPORT_C TUint64	__e32_atomic_and_rlx64(volatile TAny* a, TUint64 v);				/* *a &= v; return original *a; */
williamr@4
   190
IMPORT_C TUint64	__e32_atomic_and_acq64(volatile TAny* a, TUint64 v);
williamr@4
   191
IMPORT_C TUint64	__e32_atomic_and_rel64(volatile TAny* a, TUint64 v);
williamr@4
   192
IMPORT_C TUint64	__e32_atomic_and_ord64(volatile TAny* a, TUint64 v);
williamr@4
   193
IMPORT_C TUint64	__e32_atomic_ior_rlx64(volatile TAny* a, TUint64 v);				/* *a |= v; return original *a; */
williamr@4
   194
IMPORT_C TUint64	__e32_atomic_ior_acq64(volatile TAny* a, TUint64 v);
williamr@4
   195
IMPORT_C TUint64	__e32_atomic_ior_rel64(volatile TAny* a, TUint64 v);
williamr@4
   196
IMPORT_C TUint64	__e32_atomic_ior_ord64(volatile TAny* a, TUint64 v);
williamr@4
   197
IMPORT_C TUint64	__e32_atomic_xor_rlx64(volatile TAny* a, TUint64 v);				/* *a ^= v; return original *a; */
williamr@4
   198
IMPORT_C TUint64	__e32_atomic_xor_acq64(volatile TAny* a, TUint64 v);
williamr@4
   199
IMPORT_C TUint64	__e32_atomic_xor_rel64(volatile TAny* a, TUint64 v);
williamr@4
   200
IMPORT_C TUint64	__e32_atomic_xor_ord64(volatile TAny* a, TUint64 v);
williamr@4
   201
IMPORT_C TUint64	__e32_atomic_axo_rlx64(volatile TAny* a, TUint64 u, TUint64 v);		/* *a = (*a & u) ^ v; return original *a; */
williamr@4
   202
IMPORT_C TUint64	__e32_atomic_axo_acq64(volatile TAny* a, TUint64 u, TUint64 v);
williamr@4
   203
IMPORT_C TUint64	__e32_atomic_axo_rel64(volatile TAny* a, TUint64 u, TUint64 v);
williamr@4
   204
IMPORT_C TUint64	__e32_atomic_axo_ord64(volatile TAny* a, TUint64 u, TUint64 v);
williamr@4
   205
IMPORT_C TUint64	__e32_atomic_tau_rlx64(volatile TAny* a, TUint64 t, TUint64 u, TUint64 v);	/* if (*a>=t) *a+=u else *a+=v; return original *a; */
williamr@4
   206
IMPORT_C TUint64	__e32_atomic_tau_acq64(volatile TAny* a, TUint64 t, TUint64 u, TUint64 v);
williamr@4
   207
IMPORT_C TUint64	__e32_atomic_tau_rel64(volatile TAny* a, TUint64 t, TUint64 u, TUint64 v);
williamr@4
   208
IMPORT_C TUint64	__e32_atomic_tau_ord64(volatile TAny* a, TUint64 t, TUint64 u, TUint64 v);
williamr@4
   209
IMPORT_C TInt64		__e32_atomic_tas_rlx64(volatile TAny* a, TInt64 t, TInt64 u, TInt64 v);	/* if (*a>=t) *a+=u else *a+=v; return original *a; */
williamr@4
   210
IMPORT_C TInt64		__e32_atomic_tas_acq64(volatile TAny* a, TInt64 t, TInt64 u, TInt64 v);
williamr@4
   211
IMPORT_C TInt64		__e32_atomic_tas_rel64(volatile TAny* a, TInt64 t, TInt64 u, TInt64 v);
williamr@4
   212
IMPORT_C TInt64		__e32_atomic_tas_ord64(volatile TAny* a, TInt64 t, TInt64 u, TInt64 v);
williamr@4
   213
williamr@4
   214
/*	Atomic operations on pointers
williamr@4
   215
	These are implemented as macro definitions over the 32 or 64 bit operations
williamr@4
   216
*/
williamr@4
   217
/*	IMPORT_C TAny*		__e32_atomic_load_acq_ptr(const volatile TAny* a);												*/
williamr@4
   218
#define	__e32_atomic_load_acq_ptr(a)		((TAny*)__e32_atomic_load_acq32(a))
williamr@4
   219
/*	IMPORT_C TAny*		__e32_atomic_store_rel_ptr(volatile TAny* a, const volatile TAny* v);							*/
williamr@4
   220
#define	__e32_atomic_store_rel_ptr(a,v)		((TAny*)__e32_atomic_store_rel32(a,(T_UintPtr)(v)))
williamr@4
   221
/*	IMPORT_C TAny*		__e32_atomic_store_ord_ptr(volatile TAny* a, const volatile TAny* v);							*/
williamr@4
   222
#define	__e32_atomic_store_ord_ptr(a,v)		((TAny*)__e32_atomic_store_ord32(a,(T_UintPtr)(v)))
williamr@4
   223
/*	IMPORT_C TAny*		__e32_atomic_swp_rlx_ptr(volatile TAny* a, const volatile TAny* v);								*/
williamr@4
   224
#define	__e32_atomic_swp_rlx_ptr(a,v)		((TAny*)__e32_atomic_swp_rlx32(a,(T_UintPtr)(v)))
williamr@4
   225
/*	IMPORT_C TAny*		__e32_atomic_swp_acq_ptr(volatile TAny* a, const volatile TAny* v);								*/
williamr@4
   226
#define	__e32_atomic_swp_acq_ptr(a,v)		((TAny*)__e32_atomic_swp_acq32(a,(T_UintPtr)(v)))
williamr@4
   227
/*	IMPORT_C TAny*		__e32_atomic_swp_rel_ptr(volatile TAny* a, const volatile TAny* v);								*/
williamr@4
   228
#define	__e32_atomic_swp_rel_ptr(a,v)		((TAny*)__e32_atomic_swp_rel32(a,(T_UintPtr)(v)))
williamr@4
   229
/*	IMPORT_C TAny*		__e32_atomic_swp_ord_ptr(volatile TAny* a, const volatile TAny* v);								*/
williamr@4
   230
#define	__e32_atomic_swp_ord_ptr(a,v)		((TAny*)__e32_atomic_swp_ord32(a,(T_UintPtr)(v)))
williamr@4
   231
/*	IMPORT_C TBool		__e32_atomic_cas_rlx_ptr(volatile TAny* a, const volatile TAny** q, const volatile TAny* v);	*/
williamr@4
   232
#define	__e32_atomic_cas_rlx_ptr(a,q,v)		(__e32_atomic_cas_rlx32(a,(T_UintPtr*)(q),(T_UintPtr)(v)))
williamr@4
   233
/*	IMPORT_C TBool		__e32_atomic_cas_acq_ptr(volatile TAny* a, const volatile TAny** q, const volatile TAny* v);	*/
williamr@4
   234
#define	__e32_atomic_cas_acq_ptr(a,q,v)		(__e32_atomic_cas_acq32(a,(T_UintPtr*)(q),(T_UintPtr)(v)))
williamr@4
   235
/*	IMPORT_C TBool		__e32_atomic_cas_rel_ptr(volatile TAny* a, const volatile TAny** q, const volatile TAny* v);	*/
williamr@4
   236
#define	__e32_atomic_cas_rel_ptr(a,q,v)		(__e32_atomic_cas_rel32(a,(T_UintPtr*)(q),(T_UintPtr)(v)))
williamr@4
   237
/*	IMPORT_C TBool		__e32_atomic_cas_ord_ptr(volatile TAny* a, const volatile TAny** q, const volatile TAny* v);	*/
williamr@4
   238
#define	__e32_atomic_cas_ord_ptr(a,q,v)		(__e32_atomic_cas_ord32(a,(T_UintPtr*)(q),(T_UintPtr)(v)))
williamr@4
   239
/*	IMPORT_C TAny*		__e32_atomic_add_rlx_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   240
#define	__e32_atomic_add_rlx_ptr(a,v)		((TAny*)__e32_atomic_add_rlx32(a,(T_UintPtr)(v)))
williamr@4
   241
/*	IMPORT_C TAny*		__e32_atomic_add_acq_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   242
#define	__e32_atomic_add_acq_ptr(a,v)		((TAny*)__e32_atomic_add_acq32(a,(T_UintPtr)(v)))
williamr@4
   243
/*	IMPORT_C TAny*		__e32_atomic_add_rel_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   244
#define	__e32_atomic_add_rel_ptr(a,v)		((TAny*)__e32_atomic_add_rel32(a,(T_UintPtr)(v)))
williamr@4
   245
/*	IMPORT_C TAny*		__e32_atomic_add_ord_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   246
#define	__e32_atomic_add_ord_ptr(a,v)		((TAny*)__e32_atomic_add_ord32(a,(T_UintPtr)(v)))
williamr@4
   247
/*	IMPORT_C TAny*		__e32_atomic_and_rlx_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   248
#define	__e32_atomic_and_rlx_ptr(a,v)		((TAny*)__e32_atomic_and_rlx32(a,(T_UintPtr)(v)))
williamr@4
   249
/*	IMPORT_C TAny*		__e32_atomic_and_acq_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   250
#define	__e32_atomic_and_acq_ptr(a,v)		((TAny*)__e32_atomic_and_acq32(a,(T_UintPtr)(v)))
williamr@4
   251
/*	IMPORT_C TAny*		__e32_atomic_and_rel_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   252
#define	__e32_atomic_and_rel_ptr(a,v)		((TAny*)__e32_atomic_and_rel32(a,(T_UintPtr)(v)))
williamr@4
   253
/*	IMPORT_C TAny*		__e32_atomic_and_ord_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   254
#define	__e32_atomic_and_ord_ptr(a,v)		((TAny*)__e32_atomic_and_ord32(a,(T_UintPtr)(v)))
williamr@4
   255
/*	IMPORT_C TAny*		__e32_atomic_ior_rlx_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   256
#define	__e32_atomic_ior_rlx_ptr(a,v)		((TAny*)__e32_atomic_ior_rlx32(a,(T_UintPtr)(v)))
williamr@4
   257
/*	IMPORT_C TAny*		__e32_atomic_ior_acq_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   258
#define	__e32_atomic_ior_acq_ptr(a,v)		((TAny*)__e32_atomic_ior_acq32(a,(T_UintPtr)(v)))
williamr@4
   259
/*	IMPORT_C TAny*		__e32_atomic_ior_rel_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   260
#define	__e32_atomic_ior_rel_ptr(a,v)		((TAny*)__e32_atomic_ior_rel32(a,(T_UintPtr)(v)))
williamr@4
   261
/*	IMPORT_C TAny*		__e32_atomic_ior_ord_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   262
#define	__e32_atomic_ior_ord_ptr(a,v)		((TAny*)__e32_atomic_ior_ord32(a,(T_UintPtr)(v)))
williamr@4
   263
/*	IMPORT_C TAny*		__e32_atomic_xor_rlx_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   264
#define	__e32_atomic_xor_rlx_ptr(a,v)		((TAny*)__e32_atomic_xor_rlx32(a,(T_UintPtr)(v)))
williamr@4
   265
/*	IMPORT_C TAny*		__e32_atomic_xor_acq_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   266
#define	__e32_atomic_xor_acq_ptr(a,v)		((TAny*)__e32_atomic_xor_acq32(a,(T_UintPtr)(v)))
williamr@4
   267
/*	IMPORT_C TAny*		__e32_atomic_xor_rel_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   268
#define	__e32_atomic_xor_rel_ptr(a,v)		((TAny*)__e32_atomic_xor_rel32(a,(T_UintPtr)(v)))
williamr@4
   269
/*	IMPORT_C TAny*		__e32_atomic_xor_ord_ptr(volatile TAny* a, T_UintPtr v);										*/
williamr@4
   270
#define	__e32_atomic_xor_ord_ptr(a,v)		((TAny*)__e32_atomic_xor_ord32(a,(T_UintPtr)(v)))
williamr@4
   271
/*	IMPORT_C TAny*		__e32_atomic_axo_rlx_ptr(volatile TAny* a, T_UintPtr u, T_UintPtr v);							*/
williamr@4
   272
#define	__e32_atomic_axo_rlx_ptr(a,u,v)		((TAny*)__e32_atomic_axo_rlx32(a,(T_UintPtr)(u),(T_UintPtr)(v)))
williamr@4
   273
/*	IMPORT_C TAny*		__e32_atomic_axo_acq_ptr(volatile TAny* a, T_UintPtr u, T_UintPtr v);							*/
williamr@4
   274
#define	__e32_atomic_axo_acq_ptr(a,u,v)		((TAny*)__e32_atomic_axo_acq32(a,(T_UintPtr)(u),(T_UintPtr)(v)))
williamr@4
   275
/*	IMPORT_C TAny*		__e32_atomic_axo_rel_ptr(volatile TAny* a, T_UintPtr u, T_UintPtr v);							*/
williamr@4
   276
#define	__e32_atomic_axo_rel_ptr(a,u,v)		((TAny*)__e32_atomic_axo_rel32(a,(T_UintPtr)(u),(T_UintPtr)(v)))
williamr@4
   277
/*	IMPORT_C TAny*		__e32_atomic_axo_ord_ptr(volatile TAny* a, T_UintPtr u, T_UintPtr v);							*/
williamr@4
   278
#define	__e32_atomic_axo_ord_ptr(a,u,v)		((TAny*)__e32_atomic_axo_ord32(a,(T_UintPtr)(u),(T_UintPtr)(v)))
williamr@4
   279
/*	IMPORT_C TAny*		__e32_atomic_tau_rlx_ptr(volatile TAny* a, const volatile TAny* t, T_UintPtr u, T_UintPtr v);	*/
williamr@4
   280
#define	__e32_atomic_tau_rlx_ptr(a,t,u,v)	((TAny*)__e32_atomic_tau_rlx32(a,(T_UintPtr)(t),(T_UintPtr)(u),(T_UintPtr)(v)))
williamr@4
   281
/*	IMPORT_C TAny*		__e32_atomic_tau_acq_ptr(volatile TAny* a, const volatile TAny* t, T_UintPtr u, T_UintPtr v);	*/
williamr@4
   282
#define	__e32_atomic_tau_acq_ptr(a,t,u,v)	((TAny*)__e32_atomic_tau_acq32(a,(T_UintPtr)(t),(T_UintPtr)(u),(T_UintPtr)(v)))
williamr@4
   283
/*	IMPORT_C TAny*		__e32_atomic_tau_rel_ptr(volatile TAny* a, const volatile TAny* t, T_UintPtr u, T_UintPtr v);	*/
williamr@4
   284
#define	__e32_atomic_tau_rel_ptr(a,t,u,v)	((TAny*)__e32_atomic_tau_rel32(a,(T_UintPtr)(t),(T_UintPtr)(u),(T_UintPtr)(v)))
williamr@4
   285
/*	IMPORT_C TAny*		__e32_atomic_tau_ord_ptr(volatile TAny* a, const volatile TAny* t, T_UintPtr u, T_UintPtr v);	*/
williamr@4
   286
#define	__e32_atomic_tau_ord_ptr(a,t,u,v)	((TAny*)__e32_atomic_tau_ord32(a,(T_UintPtr)(t),(T_UintPtr)(u),(T_UintPtr)(v)))
williamr@4
   287
williamr@4
   288
/*	Miscellaneous utility functions
williamr@4
   289
*/
williamr@4
   290
IMPORT_C TInt		__e32_find_ms1_32(TUint32 v);		/* return bit number of most significant 1, -1 if argument zero */
williamr@4
   291
IMPORT_C TInt		__e32_find_ls1_32(TUint32 v);		/* return bit number of least significant 1, -1 if argument zero */
williamr@4
   292
IMPORT_C TInt		__e32_bit_count_32(TUint32 v);		/* return number of bits with value 1 */
williamr@4
   293
IMPORT_C TInt		__e32_find_ms1_64(TUint64 v);		/* return bit number of most significant 1, -1 if argument zero */
williamr@4
   294
IMPORT_C TInt		__e32_find_ls1_64(TUint64 v);		/* return bit number of least significant 1, -1 if argument zero */
williamr@4
   295
IMPORT_C TInt		__e32_bit_count_64(TUint64 v);		/* return number of bits with value 1 */
williamr@4
   296
williamr@4
   297
#ifdef __cplusplus
williamr@4
   298
} /* extern "C" */
williamr@4
   299
#endif
williamr@4
   300
williamr@4
   301
williamr@4
   302
#endif	/* __E32ATOMICS_H__ */