1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/include/e32atomics.h Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,302 @@
1.4 +/*
1.5 +* Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
1.6 +* All rights reserved.
1.7 +* This component and the accompanying materials are made available
1.8 +* under the terms of the License "Eclipse Public License v1.0"
1.9 +* which accompanies this distribution, and is available
1.10 +* at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.11 +*
1.12 +* Initial Contributors:
1.13 +* Nokia Corporation - initial contribution.
1.14 +*
1.15 +* Contributors:
1.16 +*
1.17 +* Description:
1.18 +* e32/include/e32atomics.h
1.19 +*
1.20 +*
1.21 +*/
1.22 +
1.23 +
1.24 +
1.25 +#ifndef __E32ATOMICS_H__
1.26 +#define __E32ATOMICS_H__
1.27 +#include <e32def.h>
1.28 +
1.29 +/** @file e32atomics.h
1.30 + @publishedAll
1.31 + @prototype
1.32 +*/
1.33 +
1.34 +
1.35 +/*
1.36 +Versions needed:
1.37 + WINS/WINSCW Use X86 locked operations. Assume Pentium or above CPU (CMPXCHG8B available)
1.38 + X86 For Pentium and above use locked operations
1.39 + For 486 use locked operations for 8, 16, 32 bit. For 64 bit must disable interrupts.
1.40 + NOTE: 486 not supported at the moment
1.41 + ARMv4/ARMv5 Must disable interrupts.
1.42 + ARMv6 LDREX/STREX for 8, 16, 32 bit. For 64 bit must disable interrupts (maybe).
1.43 + ARMv6K/ARMv7 LDREXB/LDREXH/LDREX/LDREXD
1.44 +*/
1.45 +
1.46 +#ifdef __cplusplus
1.47 +extern "C" {
1.48 +#endif
1.49 +
1.50 +IMPORT_C void __e32_memory_barrier(); /* Barrier guaranteeing ordering of memory accesses */
1.51 +IMPORT_C void __e32_io_completion_barrier(); /* Barrier guaranteeing ordering and completion of memory accesses */
1.52 +
1.53 +/* Atomic operations on 8 bit quantities */
1.54 +IMPORT_C TUint8 __e32_atomic_load_acq8(const volatile TAny* a); /* read 8 bit acquire semantics */
1.55 +IMPORT_C TUint8 __e32_atomic_store_rel8(volatile TAny* a, TUint8 v); /* write 8 bit, return v, release semantics */
1.56 +IMPORT_C TUint8 __e32_atomic_store_ord8(volatile TAny* a, TUint8 v); /* write 8 bit, return v, full fence */
1.57 +IMPORT_C TUint8 __e32_atomic_swp_rlx8(volatile TAny* a, TUint8 v); /* write 8 bit, return original, relaxed */
1.58 +IMPORT_C TUint8 __e32_atomic_swp_acq8(volatile TAny* a, TUint8 v); /* write 8 bit, return original, acquire */
1.59 +IMPORT_C TUint8 __e32_atomic_swp_rel8(volatile TAny* a, TUint8 v); /* write 8 bit, return original, release */
1.60 +IMPORT_C TUint8 __e32_atomic_swp_ord8(volatile TAny* a, TUint8 v); /* write 8 bit, return original, full fence */
1.61 +IMPORT_C TBool __e32_atomic_cas_rlx8(volatile TAny* a, TUint8* q, TUint8 v); /* if (*a==*q) {*a=v; return TRUE;} else {*q=*a; return FALSE;} */
1.62 +IMPORT_C TBool __e32_atomic_cas_acq8(volatile TAny* a, TUint8* q, TUint8 v);
1.63 +IMPORT_C TBool __e32_atomic_cas_rel8(volatile TAny* a, TUint8* q, TUint8 v);
1.64 +IMPORT_C TBool __e32_atomic_cas_ord8(volatile TAny* a, TUint8* q, TUint8 v);
1.65 +IMPORT_C TUint8 __e32_atomic_add_rlx8(volatile TAny* a, TUint8 v); /* *a += v; return original *a; */
1.66 +IMPORT_C TUint8 __e32_atomic_add_acq8(volatile TAny* a, TUint8 v);
1.67 +IMPORT_C TUint8 __e32_atomic_add_rel8(volatile TAny* a, TUint8 v);
1.68 +IMPORT_C TUint8 __e32_atomic_add_ord8(volatile TAny* a, TUint8 v);
1.69 +IMPORT_C TUint8 __e32_atomic_and_rlx8(volatile TAny* a, TUint8 v); /* *a &= v; return original *a; */
1.70 +IMPORT_C TUint8 __e32_atomic_and_acq8(volatile TAny* a, TUint8 v);
1.71 +IMPORT_C TUint8 __e32_atomic_and_rel8(volatile TAny* a, TUint8 v);
1.72 +IMPORT_C TUint8 __e32_atomic_and_ord8(volatile TAny* a, TUint8 v);
1.73 +IMPORT_C TUint8 __e32_atomic_ior_rlx8(volatile TAny* a, TUint8 v); /* *a |= v; return original *a; */
1.74 +IMPORT_C TUint8 __e32_atomic_ior_acq8(volatile TAny* a, TUint8 v);
1.75 +IMPORT_C TUint8 __e32_atomic_ior_rel8(volatile TAny* a, TUint8 v);
1.76 +IMPORT_C TUint8 __e32_atomic_ior_ord8(volatile TAny* a, TUint8 v);
1.77 +IMPORT_C TUint8 __e32_atomic_xor_rlx8(volatile TAny* a, TUint8 v); /* *a ^= v; return original *a; */
1.78 +IMPORT_C TUint8 __e32_atomic_xor_acq8(volatile TAny* a, TUint8 v);
1.79 +IMPORT_C TUint8 __e32_atomic_xor_rel8(volatile TAny* a, TUint8 v);
1.80 +IMPORT_C TUint8 __e32_atomic_xor_ord8(volatile TAny* a, TUint8 v);
1.81 +IMPORT_C TUint8 __e32_atomic_axo_rlx8(volatile TAny* a, TUint8 u, TUint8 v); /* *a = (*a & u) ^ v; return original *a; */
1.82 +IMPORT_C TUint8 __e32_atomic_axo_acq8(volatile TAny* a, TUint8 u, TUint8 v);
1.83 +IMPORT_C TUint8 __e32_atomic_axo_rel8(volatile TAny* a, TUint8 u, TUint8 v);
1.84 +IMPORT_C TUint8 __e32_atomic_axo_ord8(volatile TAny* a, TUint8 u, TUint8 v);
1.85 +IMPORT_C TUint8 __e32_atomic_tau_rlx8(volatile TAny* a, TUint8 t, TUint8 u, TUint8 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */
1.86 +IMPORT_C TUint8 __e32_atomic_tau_acq8(volatile TAny* a, TUint8 t, TUint8 u, TUint8 v);
1.87 +IMPORT_C TUint8 __e32_atomic_tau_rel8(volatile TAny* a, TUint8 t, TUint8 u, TUint8 v);
1.88 +IMPORT_C TUint8 __e32_atomic_tau_ord8(volatile TAny* a, TUint8 t, TUint8 u, TUint8 v);
1.89 +IMPORT_C TInt8 __e32_atomic_tas_rlx8(volatile TAny* a, TInt8 t, TInt8 u, TInt8 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */
1.90 +IMPORT_C TInt8 __e32_atomic_tas_acq8(volatile TAny* a, TInt8 t, TInt8 u, TInt8 v);
1.91 +IMPORT_C TInt8 __e32_atomic_tas_rel8(volatile TAny* a, TInt8 t, TInt8 u, TInt8 v);
1.92 +IMPORT_C TInt8 __e32_atomic_tas_ord8(volatile TAny* a, TInt8 t, TInt8 u, TInt8 v);
1.93 +
1.94 +/* Atomic operations on 16 bit quantities */
1.95 +IMPORT_C TUint16 __e32_atomic_load_acq16(const volatile TAny* a); /* read 16 bit acquire semantics */
1.96 +IMPORT_C TUint16 __e32_atomic_store_rel16(volatile TAny* a, TUint16 v); /* write 16 bit, return v, release semantics */
1.97 +IMPORT_C TUint16 __e32_atomic_store_ord16(volatile TAny* a, TUint16 v); /* write 16 bit, return v, full fence */
1.98 +IMPORT_C TUint16 __e32_atomic_swp_rlx16(volatile TAny* a, TUint16 v); /* write 16 bit, return original, relaxed */
1.99 +IMPORT_C TUint16 __e32_atomic_swp_acq16(volatile TAny* a, TUint16 v); /* write 16 bit, return original, acquire */
1.100 +IMPORT_C TUint16 __e32_atomic_swp_rel16(volatile TAny* a, TUint16 v); /* write 16 bit, return original, release */
1.101 +IMPORT_C TUint16 __e32_atomic_swp_ord16(volatile TAny* a, TUint16 v); /* write 16 bit, return original, full fence */
1.102 +IMPORT_C TBool __e32_atomic_cas_rlx16(volatile TAny* a, TUint16* q, TUint16 v); /* if (*a==*q) {*a=v; return TRUE;} else {*q=*a; return FALSE;} */
1.103 +IMPORT_C TBool __e32_atomic_cas_acq16(volatile TAny* a, TUint16* q, TUint16 v);
1.104 +IMPORT_C TBool __e32_atomic_cas_rel16(volatile TAny* a, TUint16* q, TUint16 v);
1.105 +IMPORT_C TBool __e32_atomic_cas_ord16(volatile TAny* a, TUint16* q, TUint16 v);
1.106 +IMPORT_C TUint16 __e32_atomic_add_rlx16(volatile TAny* a, TUint16 v); /* *a += v; return original *a; */
1.107 +IMPORT_C TUint16 __e32_atomic_add_acq16(volatile TAny* a, TUint16 v);
1.108 +IMPORT_C TUint16 __e32_atomic_add_rel16(volatile TAny* a, TUint16 v);
1.109 +IMPORT_C TUint16 __e32_atomic_add_ord16(volatile TAny* a, TUint16 v);
1.110 +IMPORT_C TUint16 __e32_atomic_and_rlx16(volatile TAny* a, TUint16 v); /* *a &= v; return original *a; */
1.111 +IMPORT_C TUint16 __e32_atomic_and_acq16(volatile TAny* a, TUint16 v);
1.112 +IMPORT_C TUint16 __e32_atomic_and_rel16(volatile TAny* a, TUint16 v);
1.113 +IMPORT_C TUint16 __e32_atomic_and_ord16(volatile TAny* a, TUint16 v);
1.114 +IMPORT_C TUint16 __e32_atomic_ior_rlx16(volatile TAny* a, TUint16 v); /* *a |= v; return original *a; */
1.115 +IMPORT_C TUint16 __e32_atomic_ior_acq16(volatile TAny* a, TUint16 v);
1.116 +IMPORT_C TUint16 __e32_atomic_ior_rel16(volatile TAny* a, TUint16 v);
1.117 +IMPORT_C TUint16 __e32_atomic_ior_ord16(volatile TAny* a, TUint16 v);
1.118 +IMPORT_C TUint16 __e32_atomic_xor_rlx16(volatile TAny* a, TUint16 v); /* *a ^= v; return original *a; */
1.119 +IMPORT_C TUint16 __e32_atomic_xor_acq16(volatile TAny* a, TUint16 v);
1.120 +IMPORT_C TUint16 __e32_atomic_xor_rel16(volatile TAny* a, TUint16 v);
1.121 +IMPORT_C TUint16 __e32_atomic_xor_ord16(volatile TAny* a, TUint16 v);
1.122 +IMPORT_C TUint16 __e32_atomic_axo_rlx16(volatile TAny* a, TUint16 u, TUint16 v); /* *a = (*a & u) ^ v; return original *a; */
1.123 +IMPORT_C TUint16 __e32_atomic_axo_acq16(volatile TAny* a, TUint16 u, TUint16 v);
1.124 +IMPORT_C TUint16 __e32_atomic_axo_rel16(volatile TAny* a, TUint16 u, TUint16 v);
1.125 +IMPORT_C TUint16 __e32_atomic_axo_ord16(volatile TAny* a, TUint16 u, TUint16 v);
1.126 +IMPORT_C TUint16 __e32_atomic_tau_rlx16(volatile TAny* a, TUint16 t, TUint16 u, TUint16 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */
1.127 +IMPORT_C TUint16 __e32_atomic_tau_acq16(volatile TAny* a, TUint16 t, TUint16 u, TUint16 v);
1.128 +IMPORT_C TUint16 __e32_atomic_tau_rel16(volatile TAny* a, TUint16 t, TUint16 u, TUint16 v);
1.129 +IMPORT_C TUint16 __e32_atomic_tau_ord16(volatile TAny* a, TUint16 t, TUint16 u, TUint16 v);
1.130 +IMPORT_C TInt16 __e32_atomic_tas_rlx16(volatile TAny* a, TInt16 t, TInt16 u, TInt16 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */
1.131 +IMPORT_C TInt16 __e32_atomic_tas_acq16(volatile TAny* a, TInt16 t, TInt16 u, TInt16 v);
1.132 +IMPORT_C TInt16 __e32_atomic_tas_rel16(volatile TAny* a, TInt16 t, TInt16 u, TInt16 v);
1.133 +IMPORT_C TInt16 __e32_atomic_tas_ord16(volatile TAny* a, TInt16 t, TInt16 u, TInt16 v);
1.134 +
1.135 +/* Atomic operations on 32 bit quantities */
1.136 +IMPORT_C TUint32 __e32_atomic_load_acq32(const volatile TAny* a); /* read 32 bit acquire semantics */
1.137 +IMPORT_C TUint32 __e32_atomic_store_rel32(volatile TAny* a, TUint32 v); /* write 32 bit, return v, release semantics */
1.138 +IMPORT_C TUint32 __e32_atomic_store_ord32(volatile TAny* a, TUint32 v); /* write 32 bit, return v, full fence */
1.139 +IMPORT_C TUint32 __e32_atomic_swp_rlx32(volatile TAny* a, TUint32 v); /* write 32 bit, return original, relaxed */
1.140 +IMPORT_C TUint32 __e32_atomic_swp_acq32(volatile TAny* a, TUint32 v); /* write 32 bit, return original, acquire */
1.141 +IMPORT_C TUint32 __e32_atomic_swp_rel32(volatile TAny* a, TUint32 v); /* write 32 bit, return original, release */
1.142 +IMPORT_C TUint32 __e32_atomic_swp_ord32(volatile TAny* a, TUint32 v); /* write 32 bit, return original, full fence */
1.143 +IMPORT_C TBool __e32_atomic_cas_rlx32(volatile TAny* a, TUint32* q, TUint32 v); /* if (*a==*q) {*a=v; return TRUE;} else {*q=*a; return FALSE;} */
1.144 +IMPORT_C TBool __e32_atomic_cas_acq32(volatile TAny* a, TUint32* q, TUint32 v);
1.145 +IMPORT_C TBool __e32_atomic_cas_rel32(volatile TAny* a, TUint32* q, TUint32 v);
1.146 +IMPORT_C TBool __e32_atomic_cas_ord32(volatile TAny* a, TUint32* q, TUint32 v);
1.147 +IMPORT_C TUint32 __e32_atomic_add_rlx32(volatile TAny* a, TUint32 v); /* *a += v; return original *a; */
1.148 +IMPORT_C TUint32 __e32_atomic_add_acq32(volatile TAny* a, TUint32 v);
1.149 +IMPORT_C TUint32 __e32_atomic_add_rel32(volatile TAny* a, TUint32 v);
1.150 +IMPORT_C TUint32 __e32_atomic_add_ord32(volatile TAny* a, TUint32 v);
1.151 +IMPORT_C TUint32 __e32_atomic_and_rlx32(volatile TAny* a, TUint32 v); /* *a &= v; return original *a; */
1.152 +IMPORT_C TUint32 __e32_atomic_and_acq32(volatile TAny* a, TUint32 v);
1.153 +IMPORT_C TUint32 __e32_atomic_and_rel32(volatile TAny* a, TUint32 v);
1.154 +IMPORT_C TUint32 __e32_atomic_and_ord32(volatile TAny* a, TUint32 v);
1.155 +IMPORT_C TUint32 __e32_atomic_ior_rlx32(volatile TAny* a, TUint32 v); /* *a |= v; return original *a; */
1.156 +IMPORT_C TUint32 __e32_atomic_ior_acq32(volatile TAny* a, TUint32 v);
1.157 +IMPORT_C TUint32 __e32_atomic_ior_rel32(volatile TAny* a, TUint32 v);
1.158 +IMPORT_C TUint32 __e32_atomic_ior_ord32(volatile TAny* a, TUint32 v);
1.159 +IMPORT_C TUint32 __e32_atomic_xor_rlx32(volatile TAny* a, TUint32 v); /* *a ^= v; return original *a; */
1.160 +IMPORT_C TUint32 __e32_atomic_xor_acq32(volatile TAny* a, TUint32 v);
1.161 +IMPORT_C TUint32 __e32_atomic_xor_rel32(volatile TAny* a, TUint32 v);
1.162 +IMPORT_C TUint32 __e32_atomic_xor_ord32(volatile TAny* a, TUint32 v);
1.163 +IMPORT_C TUint32 __e32_atomic_axo_rlx32(volatile TAny* a, TUint32 u, TUint32 v); /* *a = (*a & u) ^ v; return original *a; */
1.164 +IMPORT_C TUint32 __e32_atomic_axo_acq32(volatile TAny* a, TUint32 u, TUint32 v);
1.165 +IMPORT_C TUint32 __e32_atomic_axo_rel32(volatile TAny* a, TUint32 u, TUint32 v);
1.166 +IMPORT_C TUint32 __e32_atomic_axo_ord32(volatile TAny* a, TUint32 u, TUint32 v);
1.167 +IMPORT_C TUint32 __e32_atomic_tau_rlx32(volatile TAny* a, TUint32 t, TUint32 u, TUint32 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */
1.168 +IMPORT_C TUint32 __e32_atomic_tau_acq32(volatile TAny* a, TUint32 t, TUint32 u, TUint32 v);
1.169 +IMPORT_C TUint32 __e32_atomic_tau_rel32(volatile TAny* a, TUint32 t, TUint32 u, TUint32 v);
1.170 +IMPORT_C TUint32 __e32_atomic_tau_ord32(volatile TAny* a, TUint32 t, TUint32 u, TUint32 v);
1.171 +IMPORT_C TInt32 __e32_atomic_tas_rlx32(volatile TAny* a, TInt32 t, TInt32 u, TInt32 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */
1.172 +IMPORT_C TInt32 __e32_atomic_tas_acq32(volatile TAny* a, TInt32 t, TInt32 u, TInt32 v);
1.173 +IMPORT_C TInt32 __e32_atomic_tas_rel32(volatile TAny* a, TInt32 t, TInt32 u, TInt32 v);
1.174 +IMPORT_C TInt32 __e32_atomic_tas_ord32(volatile TAny* a, TInt32 t, TInt32 u, TInt32 v);
1.175 +
1.176 +/* Atomic operations on 64 bit quantities */
1.177 +IMPORT_C TUint64 __e32_atomic_load_acq64(const volatile TAny* a); /* read 64 bit acquire semantics */
1.178 +IMPORT_C TUint64 __e32_atomic_store_rel64(volatile TAny* a, TUint64 v); /* write 64 bit, return v, release semantics */
1.179 +IMPORT_C TUint64 __e32_atomic_store_ord64(volatile TAny* a, TUint64 v); /* write 64 bit, return v, full fence */
1.180 +IMPORT_C TUint64 __e32_atomic_swp_rlx64(volatile TAny* a, TUint64 v); /* write 64 bit, return original, relaxed */
1.181 +IMPORT_C TUint64 __e32_atomic_swp_acq64(volatile TAny* a, TUint64 v); /* write 64 bit, return original, acquire */
1.182 +IMPORT_C TUint64 __e32_atomic_swp_rel64(volatile TAny* a, TUint64 v); /* write 64 bit, return original, release */
1.183 +IMPORT_C TUint64 __e32_atomic_swp_ord64(volatile TAny* a, TUint64 v); /* write 64 bit, return original, full fence */
1.184 +IMPORT_C TBool __e32_atomic_cas_rlx64(volatile TAny* a, TUint64* q, TUint64 v); /* if (*a==*q) {*a=v; return TRUE;} else {*q=*a; return FALSE;} */
1.185 +IMPORT_C TBool __e32_atomic_cas_acq64(volatile TAny* a, TUint64* q, TUint64 v);
1.186 +IMPORT_C TBool __e32_atomic_cas_rel64(volatile TAny* a, TUint64* q, TUint64 v);
1.187 +IMPORT_C TBool __e32_atomic_cas_ord64(volatile TAny* a, TUint64* q, TUint64 v);
1.188 +IMPORT_C TUint64 __e32_atomic_add_rlx64(volatile TAny* a, TUint64 v); /* *a += v; return original *a; */
1.189 +IMPORT_C TUint64 __e32_atomic_add_acq64(volatile TAny* a, TUint64 v);
1.190 +IMPORT_C TUint64 __e32_atomic_add_rel64(volatile TAny* a, TUint64 v);
1.191 +IMPORT_C TUint64 __e32_atomic_add_ord64(volatile TAny* a, TUint64 v);
1.192 +IMPORT_C TUint64 __e32_atomic_and_rlx64(volatile TAny* a, TUint64 v); /* *a &= v; return original *a; */
1.193 +IMPORT_C TUint64 __e32_atomic_and_acq64(volatile TAny* a, TUint64 v);
1.194 +IMPORT_C TUint64 __e32_atomic_and_rel64(volatile TAny* a, TUint64 v);
1.195 +IMPORT_C TUint64 __e32_atomic_and_ord64(volatile TAny* a, TUint64 v);
1.196 +IMPORT_C TUint64 __e32_atomic_ior_rlx64(volatile TAny* a, TUint64 v); /* *a |= v; return original *a; */
1.197 +IMPORT_C TUint64 __e32_atomic_ior_acq64(volatile TAny* a, TUint64 v);
1.198 +IMPORT_C TUint64 __e32_atomic_ior_rel64(volatile TAny* a, TUint64 v);
1.199 +IMPORT_C TUint64 __e32_atomic_ior_ord64(volatile TAny* a, TUint64 v);
1.200 +IMPORT_C TUint64 __e32_atomic_xor_rlx64(volatile TAny* a, TUint64 v); /* *a ^= v; return original *a; */
1.201 +IMPORT_C TUint64 __e32_atomic_xor_acq64(volatile TAny* a, TUint64 v);
1.202 +IMPORT_C TUint64 __e32_atomic_xor_rel64(volatile TAny* a, TUint64 v);
1.203 +IMPORT_C TUint64 __e32_atomic_xor_ord64(volatile TAny* a, TUint64 v);
1.204 +IMPORT_C TUint64 __e32_atomic_axo_rlx64(volatile TAny* a, TUint64 u, TUint64 v); /* *a = (*a & u) ^ v; return original *a; */
1.205 +IMPORT_C TUint64 __e32_atomic_axo_acq64(volatile TAny* a, TUint64 u, TUint64 v);
1.206 +IMPORT_C TUint64 __e32_atomic_axo_rel64(volatile TAny* a, TUint64 u, TUint64 v);
1.207 +IMPORT_C TUint64 __e32_atomic_axo_ord64(volatile TAny* a, TUint64 u, TUint64 v);
1.208 +IMPORT_C TUint64 __e32_atomic_tau_rlx64(volatile TAny* a, TUint64 t, TUint64 u, TUint64 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */
1.209 +IMPORT_C TUint64 __e32_atomic_tau_acq64(volatile TAny* a, TUint64 t, TUint64 u, TUint64 v);
1.210 +IMPORT_C TUint64 __e32_atomic_tau_rel64(volatile TAny* a, TUint64 t, TUint64 u, TUint64 v);
1.211 +IMPORT_C TUint64 __e32_atomic_tau_ord64(volatile TAny* a, TUint64 t, TUint64 u, TUint64 v);
1.212 +IMPORT_C TInt64 __e32_atomic_tas_rlx64(volatile TAny* a, TInt64 t, TInt64 u, TInt64 v); /* if (*a>=t) *a+=u else *a+=v; return original *a; */
1.213 +IMPORT_C TInt64 __e32_atomic_tas_acq64(volatile TAny* a, TInt64 t, TInt64 u, TInt64 v);
1.214 +IMPORT_C TInt64 __e32_atomic_tas_rel64(volatile TAny* a, TInt64 t, TInt64 u, TInt64 v);
1.215 +IMPORT_C TInt64 __e32_atomic_tas_ord64(volatile TAny* a, TInt64 t, TInt64 u, TInt64 v);
1.216 +
1.217 +/* Atomic operations on pointers
1.218 + These are implemented as macro definitions over the 32 or 64 bit operations
1.219 +*/
1.220 +/* IMPORT_C TAny* __e32_atomic_load_acq_ptr(const volatile TAny* a); */
1.221 +#define __e32_atomic_load_acq_ptr(a) ((TAny*)__e32_atomic_load_acq32(a))
1.222 +/* IMPORT_C TAny* __e32_atomic_store_rel_ptr(volatile TAny* a, const volatile TAny* v); */
1.223 +#define __e32_atomic_store_rel_ptr(a,v) ((TAny*)__e32_atomic_store_rel32(a,(T_UintPtr)(v)))
1.224 +/* IMPORT_C TAny* __e32_atomic_store_ord_ptr(volatile TAny* a, const volatile TAny* v); */
1.225 +#define __e32_atomic_store_ord_ptr(a,v) ((TAny*)__e32_atomic_store_ord32(a,(T_UintPtr)(v)))
1.226 +/* IMPORT_C TAny* __e32_atomic_swp_rlx_ptr(volatile TAny* a, const volatile TAny* v); */
1.227 +#define __e32_atomic_swp_rlx_ptr(a,v) ((TAny*)__e32_atomic_swp_rlx32(a,(T_UintPtr)(v)))
1.228 +/* IMPORT_C TAny* __e32_atomic_swp_acq_ptr(volatile TAny* a, const volatile TAny* v); */
1.229 +#define __e32_atomic_swp_acq_ptr(a,v) ((TAny*)__e32_atomic_swp_acq32(a,(T_UintPtr)(v)))
1.230 +/* IMPORT_C TAny* __e32_atomic_swp_rel_ptr(volatile TAny* a, const volatile TAny* v); */
1.231 +#define __e32_atomic_swp_rel_ptr(a,v) ((TAny*)__e32_atomic_swp_rel32(a,(T_UintPtr)(v)))
1.232 +/* IMPORT_C TAny* __e32_atomic_swp_ord_ptr(volatile TAny* a, const volatile TAny* v); */
1.233 +#define __e32_atomic_swp_ord_ptr(a,v) ((TAny*)__e32_atomic_swp_ord32(a,(T_UintPtr)(v)))
1.234 +/* IMPORT_C TBool __e32_atomic_cas_rlx_ptr(volatile TAny* a, const volatile TAny** q, const volatile TAny* v); */
1.235 +#define __e32_atomic_cas_rlx_ptr(a,q,v) (__e32_atomic_cas_rlx32(a,(T_UintPtr*)(q),(T_UintPtr)(v)))
1.236 +/* IMPORT_C TBool __e32_atomic_cas_acq_ptr(volatile TAny* a, const volatile TAny** q, const volatile TAny* v); */
1.237 +#define __e32_atomic_cas_acq_ptr(a,q,v) (__e32_atomic_cas_acq32(a,(T_UintPtr*)(q),(T_UintPtr)(v)))
1.238 +/* IMPORT_C TBool __e32_atomic_cas_rel_ptr(volatile TAny* a, const volatile TAny** q, const volatile TAny* v); */
1.239 +#define __e32_atomic_cas_rel_ptr(a,q,v) (__e32_atomic_cas_rel32(a,(T_UintPtr*)(q),(T_UintPtr)(v)))
1.240 +/* IMPORT_C TBool __e32_atomic_cas_ord_ptr(volatile TAny* a, const volatile TAny** q, const volatile TAny* v); */
1.241 +#define __e32_atomic_cas_ord_ptr(a,q,v) (__e32_atomic_cas_ord32(a,(T_UintPtr*)(q),(T_UintPtr)(v)))
1.242 +/* IMPORT_C TAny* __e32_atomic_add_rlx_ptr(volatile TAny* a, T_UintPtr v); */
1.243 +#define __e32_atomic_add_rlx_ptr(a,v) ((TAny*)__e32_atomic_add_rlx32(a,(T_UintPtr)(v)))
1.244 +/* IMPORT_C TAny* __e32_atomic_add_acq_ptr(volatile TAny* a, T_UintPtr v); */
1.245 +#define __e32_atomic_add_acq_ptr(a,v) ((TAny*)__e32_atomic_add_acq32(a,(T_UintPtr)(v)))
1.246 +/* IMPORT_C TAny* __e32_atomic_add_rel_ptr(volatile TAny* a, T_UintPtr v); */
1.247 +#define __e32_atomic_add_rel_ptr(a,v) ((TAny*)__e32_atomic_add_rel32(a,(T_UintPtr)(v)))
1.248 +/* IMPORT_C TAny* __e32_atomic_add_ord_ptr(volatile TAny* a, T_UintPtr v); */
1.249 +#define __e32_atomic_add_ord_ptr(a,v) ((TAny*)__e32_atomic_add_ord32(a,(T_UintPtr)(v)))
1.250 +/* IMPORT_C TAny* __e32_atomic_and_rlx_ptr(volatile TAny* a, T_UintPtr v); */
1.251 +#define __e32_atomic_and_rlx_ptr(a,v) ((TAny*)__e32_atomic_and_rlx32(a,(T_UintPtr)(v)))
1.252 +/* IMPORT_C TAny* __e32_atomic_and_acq_ptr(volatile TAny* a, T_UintPtr v); */
1.253 +#define __e32_atomic_and_acq_ptr(a,v) ((TAny*)__e32_atomic_and_acq32(a,(T_UintPtr)(v)))
1.254 +/* IMPORT_C TAny* __e32_atomic_and_rel_ptr(volatile TAny* a, T_UintPtr v); */
1.255 +#define __e32_atomic_and_rel_ptr(a,v) ((TAny*)__e32_atomic_and_rel32(a,(T_UintPtr)(v)))
1.256 +/* IMPORT_C TAny* __e32_atomic_and_ord_ptr(volatile TAny* a, T_UintPtr v); */
1.257 +#define __e32_atomic_and_ord_ptr(a,v) ((TAny*)__e32_atomic_and_ord32(a,(T_UintPtr)(v)))
1.258 +/* IMPORT_C TAny* __e32_atomic_ior_rlx_ptr(volatile TAny* a, T_UintPtr v); */
1.259 +#define __e32_atomic_ior_rlx_ptr(a,v) ((TAny*)__e32_atomic_ior_rlx32(a,(T_UintPtr)(v)))
1.260 +/* IMPORT_C TAny* __e32_atomic_ior_acq_ptr(volatile TAny* a, T_UintPtr v); */
1.261 +#define __e32_atomic_ior_acq_ptr(a,v) ((TAny*)__e32_atomic_ior_acq32(a,(T_UintPtr)(v)))
1.262 +/* IMPORT_C TAny* __e32_atomic_ior_rel_ptr(volatile TAny* a, T_UintPtr v); */
1.263 +#define __e32_atomic_ior_rel_ptr(a,v) ((TAny*)__e32_atomic_ior_rel32(a,(T_UintPtr)(v)))
1.264 +/* IMPORT_C TAny* __e32_atomic_ior_ord_ptr(volatile TAny* a, T_UintPtr v); */
1.265 +#define __e32_atomic_ior_ord_ptr(a,v) ((TAny*)__e32_atomic_ior_ord32(a,(T_UintPtr)(v)))
1.266 +/* IMPORT_C TAny* __e32_atomic_xor_rlx_ptr(volatile TAny* a, T_UintPtr v); */
1.267 +#define __e32_atomic_xor_rlx_ptr(a,v) ((TAny*)__e32_atomic_xor_rlx32(a,(T_UintPtr)(v)))
1.268 +/* IMPORT_C TAny* __e32_atomic_xor_acq_ptr(volatile TAny* a, T_UintPtr v); */
1.269 +#define __e32_atomic_xor_acq_ptr(a,v) ((TAny*)__e32_atomic_xor_acq32(a,(T_UintPtr)(v)))
1.270 +/* IMPORT_C TAny* __e32_atomic_xor_rel_ptr(volatile TAny* a, T_UintPtr v); */
1.271 +#define __e32_atomic_xor_rel_ptr(a,v) ((TAny*)__e32_atomic_xor_rel32(a,(T_UintPtr)(v)))
1.272 +/* IMPORT_C TAny* __e32_atomic_xor_ord_ptr(volatile TAny* a, T_UintPtr v); */
1.273 +#define __e32_atomic_xor_ord_ptr(a,v) ((TAny*)__e32_atomic_xor_ord32(a,(T_UintPtr)(v)))
1.274 +/* IMPORT_C TAny* __e32_atomic_axo_rlx_ptr(volatile TAny* a, T_UintPtr u, T_UintPtr v); */
1.275 +#define __e32_atomic_axo_rlx_ptr(a,u,v) ((TAny*)__e32_atomic_axo_rlx32(a,(T_UintPtr)(u),(T_UintPtr)(v)))
1.276 +/* IMPORT_C TAny* __e32_atomic_axo_acq_ptr(volatile TAny* a, T_UintPtr u, T_UintPtr v); */
1.277 +#define __e32_atomic_axo_acq_ptr(a,u,v) ((TAny*)__e32_atomic_axo_acq32(a,(T_UintPtr)(u),(T_UintPtr)(v)))
1.278 +/* IMPORT_C TAny* __e32_atomic_axo_rel_ptr(volatile TAny* a, T_UintPtr u, T_UintPtr v); */
1.279 +#define __e32_atomic_axo_rel_ptr(a,u,v) ((TAny*)__e32_atomic_axo_rel32(a,(T_UintPtr)(u),(T_UintPtr)(v)))
1.280 +/* IMPORT_C TAny* __e32_atomic_axo_ord_ptr(volatile TAny* a, T_UintPtr u, T_UintPtr v); */
1.281 +#define __e32_atomic_axo_ord_ptr(a,u,v) ((TAny*)__e32_atomic_axo_ord32(a,(T_UintPtr)(u),(T_UintPtr)(v)))
1.282 +/* IMPORT_C TAny* __e32_atomic_tau_rlx_ptr(volatile TAny* a, const volatile TAny* t, T_UintPtr u, T_UintPtr v); */
1.283 +#define __e32_atomic_tau_rlx_ptr(a,t,u,v) ((TAny*)__e32_atomic_tau_rlx32(a,(T_UintPtr)(t),(T_UintPtr)(u),(T_UintPtr)(v)))
1.284 +/* IMPORT_C TAny* __e32_atomic_tau_acq_ptr(volatile TAny* a, const volatile TAny* t, T_UintPtr u, T_UintPtr v); */
1.285 +#define __e32_atomic_tau_acq_ptr(a,t,u,v) ((TAny*)__e32_atomic_tau_acq32(a,(T_UintPtr)(t),(T_UintPtr)(u),(T_UintPtr)(v)))
1.286 +/* IMPORT_C TAny* __e32_atomic_tau_rel_ptr(volatile TAny* a, const volatile TAny* t, T_UintPtr u, T_UintPtr v); */
1.287 +#define __e32_atomic_tau_rel_ptr(a,t,u,v) ((TAny*)__e32_atomic_tau_rel32(a,(T_UintPtr)(t),(T_UintPtr)(u),(T_UintPtr)(v)))
1.288 +/* IMPORT_C TAny* __e32_atomic_tau_ord_ptr(volatile TAny* a, const volatile TAny* t, T_UintPtr u, T_UintPtr v); */
1.289 +#define __e32_atomic_tau_ord_ptr(a,t,u,v) ((TAny*)__e32_atomic_tau_ord32(a,(T_UintPtr)(t),(T_UintPtr)(u),(T_UintPtr)(v)))
1.290 +
1.291 +/* Miscellaneous utility functions
1.292 +*/
1.293 +IMPORT_C TInt __e32_find_ms1_32(TUint32 v); /* return bit number of most significant 1, -1 if argument zero */
1.294 +IMPORT_C TInt __e32_find_ls1_32(TUint32 v); /* return bit number of least significant 1, -1 if argument zero */
1.295 +IMPORT_C TInt __e32_bit_count_32(TUint32 v); /* return number of bits with value 1 */
1.296 +IMPORT_C TInt __e32_find_ms1_64(TUint64 v); /* return bit number of most significant 1, -1 if argument zero */
1.297 +IMPORT_C TInt __e32_find_ls1_64(TUint64 v); /* return bit number of least significant 1, -1 if argument zero */
1.298 +IMPORT_C TInt __e32_bit_count_64(TUint64 v); /* return number of bits with value 1 */
1.299 +
1.300 +#ifdef __cplusplus
1.301 +} /* extern "C" */
1.302 +#endif
1.303 +
1.304 +
1.305 +#endif /* __E32ATOMICS_H__ */