sl@0: // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\common\x86\atomic_skeleton.h sl@0: // sl@0: // sl@0: sl@0: /** sl@0: Read an 8/16/32 bit quantity with acquire semantics sl@0: sl@0: @param a Address of data to be read - must be naturally aligned sl@0: @return The value read sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_load_acq)(const volatile TAny* /*a*/) sl@0: { sl@0: asm("mov ecx, [esp+4] "); sl@0: asm("mov " __A_REG__ ", [ecx] "); sl@0: #ifdef __BARRIERS_NEEDED__ sl@0: asm("lock add dword ptr [esp], 0 "); sl@0: #endif sl@0: asm("ret "); sl@0: } sl@0: sl@0: sl@0: /** Write an 8/16/32 bit quantity with release semantics sl@0: sl@0: @param a Address of data to be written - must be naturally aligned sl@0: @param v The value to be written sl@0: @return The value written sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_store_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: asm("mov ecx, [esp+4] "); sl@0: asm("mov " __D_REG__ ", [esp+8] "); sl@0: asm("mov " __A_REG__ ", " __D_REG__ ); sl@0: asm(__LOCK__ "xchg [ecx], " __D_REG__ ); sl@0: asm("ret "); sl@0: } sl@0: sl@0: sl@0: /** Write an 8/16/32 bit quantity with full barrier semantics sl@0: sl@0: @param a Address of data to be written - must be naturally aligned sl@0: @param v The value to be written sl@0: @return The value written sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_store_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_store_rel); sl@0: } sl@0: sl@0: sl@0: /** Write an 8/16/32 bit quantity to memory and return the original value of the memory. sl@0: Relaxed ordering. sl@0: sl@0: @param a Address of data to be written - must be naturally aligned sl@0: @param v The value to be written sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_swp_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_swp_ord); sl@0: } sl@0: sl@0: sl@0: /** Write an 8/16/32 bit quantity to memory and return the original value of the memory. sl@0: Acquire semantics. sl@0: sl@0: @param a Address of data to be written - must be naturally aligned sl@0: @param v The value to be written sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_swp_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_swp_ord); sl@0: } sl@0: sl@0: sl@0: /** Write an 8/16/32 bit quantity to memory and return the original value of the memory. sl@0: Release semantics. sl@0: sl@0: @param a Address of data to be written - must be naturally aligned sl@0: @param v The value to be written sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_swp_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_swp_ord); sl@0: } sl@0: sl@0: sl@0: /** Write an 8/16/32 bit quantity to memory and return the original value of the memory. sl@0: Full barrier semantics. sl@0: sl@0: @param a Address of data to be written - must be naturally aligned sl@0: @param v The value to be written sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_swp_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: asm("mov ecx, [esp+4] "); sl@0: asm("mov " __A_REG__ ", [esp+8] "); sl@0: asm(__LOCK__ "xchg [ecx], " __A_REG__ ); sl@0: asm("ret "); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit compare and swap, relaxed ordering. sl@0: sl@0: Atomically performs the following operation: sl@0: if (*a == *q) { *a = v; return TRUE; } sl@0: else { *q = *a; return FALSE; } sl@0: sl@0: @param a Address of data to be written - must be naturally aligned sl@0: @param q Address of location containing expected value sl@0: @param v The new value to be written if the old value is as expected sl@0: @return TRUE if *a was updated, FALSE otherwise sl@0: */ sl@0: EXPORT_C __NAKED__ TBool __fname__(__e32_atomic_cas_rlx)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_cas_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit compare and swap, acquire semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: if (*a == *q) { *a = v; return TRUE; } sl@0: else { *q = *a; return FALSE; } sl@0: sl@0: @param a Address of data to be written - must be naturally aligned sl@0: @param q Address of location containing expected value sl@0: @param v The new value to be written if the old value is as expected sl@0: @return TRUE if *a was updated, FALSE otherwise sl@0: */ sl@0: EXPORT_C __NAKED__ TBool __fname__(__e32_atomic_cas_acq)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_cas_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit compare and swap, release semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: if (*a == *q) { *a = v; return TRUE; } sl@0: else { *q = *a; return FALSE; } sl@0: sl@0: @param a Address of data to be written - must be naturally aligned sl@0: @param q Address of location containing expected value sl@0: @param v The new value to be written if the old value is as expected sl@0: @return TRUE if *a was updated, FALSE otherwise sl@0: */ sl@0: EXPORT_C __NAKED__ TBool __fname__(__e32_atomic_cas_rel)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_cas_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit compare and swap, full barrier semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: if (*a == *q) { *a = v; return TRUE; } sl@0: else { *q = *a; return FALSE; } sl@0: sl@0: @param a Address of data to be written - must be naturally aligned sl@0: @param q Address of location containing expected value sl@0: @param v The new value to be written if the old value is as expected sl@0: @return TRUE if *a was updated, FALSE otherwise sl@0: */ sl@0: EXPORT_C __NAKED__ TBool __fname__(__e32_atomic_cas_ord)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/) sl@0: { sl@0: asm("mov ecx, [esp+4] "); sl@0: asm("mov eax, [esp+8] "); sl@0: asm("mov " __D_REG__ ", [esp+12] "); sl@0: asm("mov " __A_REG__ ", [eax] "); sl@0: asm(__LOCK__ "cmpxchg [ecx], " __D_REG__ ); sl@0: asm("jne short 2f "); sl@0: asm("mov eax, 1 "); sl@0: asm("ret "); sl@0: asm("2: "); sl@0: asm("mov edx, [esp+8] "); sl@0: asm("mov [edx], " __A_REG__ ); sl@0: asm("xor eax, eax "); sl@0: asm("ret "); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic add, relaxed ordering. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv + v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be added sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_add_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_add_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic add, acquire semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv + v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be added sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_add_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_add_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic add, release semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv + v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be added sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_add_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_add_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic add, full barrier semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv + v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be added sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_add_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: asm("mov ecx, [esp+4] "); sl@0: asm("mov " __A_REG__ ", [esp+8] "); sl@0: asm(__LOCK__ "xadd [ecx], " __A_REG__ ); sl@0: asm("ret "); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise logical AND, relaxed ordering. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv & v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be ANDed with *a sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_and_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_and_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise logical AND, acquire semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv & v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be ANDed with *a sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_and_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_and_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise logical AND, release semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv & v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be ANDed with *a sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_and_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_and_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise logical AND, full barrier semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv & v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be ANDed with *a sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_and_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: asm("mov ecx, [esp+4] "); sl@0: asm("mov " __A_REG__ ", [ecx] "); sl@0: asm("1: "); sl@0: asm("mov " __D_REG__ ", [esp+8] "); sl@0: asm("and " __D_REG__ ", " __A_REG__ ); sl@0: asm(__LOCK__ "cmpxchg [ecx], " __D_REG__ ); sl@0: asm("jne short 1b "); sl@0: asm("ret "); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise logical inclusive OR, relaxed ordering. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv | v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be ORed with *a sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_ior_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_ior_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise logical inclusive OR, acquire semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv | v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be ORed with *a sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_ior_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_ior_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise logical inclusive OR, release semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv | v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be ORed with *a sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_ior_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_ior_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise logical inclusive OR, full barrier semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv | v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be ORed with *a sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_ior_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: asm("mov ecx, [esp+4] "); sl@0: asm("mov " __A_REG__ ", [ecx] "); sl@0: asm("1: "); sl@0: asm("mov " __D_REG__ ", [esp+8] "); sl@0: asm("or " __D_REG__ ", " __A_REG__ ); sl@0: asm(__LOCK__ "cmpxchg [ecx], " __D_REG__ ); sl@0: asm("jne short 1b "); sl@0: asm("ret "); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise logical exclusive OR, relaxed ordering. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv ^ v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be XORed with *a sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_xor_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_xor_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise logical exclusive OR, acquire semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv ^ v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be XORed with *a sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_xor_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_xor_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise logical exclusive OR, release semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv ^ v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be XORed with *a sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_xor_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_xor_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise logical exclusive OR, full barrier semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = oldv ^ v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param v The value to be XORed with *a sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_xor_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/) sl@0: { sl@0: asm("mov ecx, [esp+4] "); sl@0: asm("mov " __A_REG__ ", [ecx] "); sl@0: asm("1: "); sl@0: asm("mov " __D_REG__ ", [esp+8] "); sl@0: asm("xor " __D_REG__ ", " __A_REG__ ); sl@0: asm(__LOCK__ "cmpxchg [ecx], " __D_REG__ ); sl@0: asm("jne short 1b "); sl@0: asm("ret "); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise universal function, relaxed ordering. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = (oldv & u) ^ v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param u The value to be ANDed with *a sl@0: @param v The value to be XORed with (*a&u) sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_axo_rlx)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_axo_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise universal function, acquire semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = (oldv & u) ^ v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param u The value to be ANDed with *a sl@0: @param v The value to be XORed with (*a&u) sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_axo_acq)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_axo_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise universal function, release semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = (oldv & u) ^ v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param u The value to be ANDed with *a sl@0: @param v The value to be XORed with (*a&u) sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_axo_rel)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_axo_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit atomic bitwise universal function, full barrier semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; *a = (oldv & u) ^ v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param u The value to be ANDed with *a sl@0: @param v The value to be XORed with (*a&u) sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_axo_ord)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/) sl@0: { sl@0: asm("mov ecx, [esp+4] "); sl@0: asm("mov " __A_REG__ ", [ecx] "); sl@0: asm("1: "); sl@0: asm("mov " __D_REG__ ", [esp+8] "); sl@0: asm("and " __D_REG__ ", " __A_REG__ ); sl@0: asm("xor " __D_REG__ ", [esp+12] "); sl@0: asm(__LOCK__ "cmpxchg [ecx], " __D_REG__ ); sl@0: asm("jne short 1b "); sl@0: asm("ret "); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit threshold and add, unsigned, relaxed ordering. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param t The threshold to compare *a to (unsigned compare) sl@0: @param u The value to be added to *a if it is originally >= t sl@0: @param u The value to be added to *a if it is originally < t sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_tau_rlx)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_tau_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit threshold and add, unsigned, acquire semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param t The threshold to compare *a to (unsigned compare) sl@0: @param u The value to be added to *a if it is originally >= t sl@0: @param u The value to be added to *a if it is originally < t sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_tau_acq)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_tau_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit threshold and add, unsigned, release semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param t The threshold to compare *a to (unsigned compare) sl@0: @param u The value to be added to *a if it is originally >= t sl@0: @param u The value to be added to *a if it is originally < t sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_tau_rel)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_tau_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit threshold and add, unsigned, full barrier semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param t The threshold to compare *a to (unsigned compare) sl@0: @param u The value to be added to *a if it is originally >= t sl@0: @param u The value to be added to *a if it is originally < t sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_tau_ord)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/) sl@0: { sl@0: asm("mov ecx, [esp+4] "); sl@0: asm("mov " __A_REG__ ", [ecx] "); sl@0: asm("1: "); sl@0: asm("mov " __D_REG__ ", [esp+12] "); sl@0: asm("cmp " __A_REG__ ", [esp+8] "); sl@0: asm("jae short 2f "); sl@0: asm("mov " __D_REG__ ", [esp+16] "); sl@0: asm("2: "); sl@0: asm("add " __D_REG__ ", " __A_REG__ ); sl@0: asm(__LOCK__ "cmpxchg [ecx], " __D_REG__ ); sl@0: asm("jne short 1b "); sl@0: asm("ret "); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit threshold and add, signed, relaxed ordering. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param t The threshold to compare *a to (signed compare) sl@0: @param u The value to be added to *a if it is originally >= t sl@0: @param u The value to be added to *a if it is originally < t sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TIntX__ __fname__(__e32_atomic_tas_rlx)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_tas_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit threshold and add, signed, acquire semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param t The threshold to compare *a to (signed compare) sl@0: @param u The value to be added to *a if it is originally >= t sl@0: @param u The value to be added to *a if it is originally < t sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TIntX__ __fname__(__e32_atomic_tas_acq)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_tas_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit threshold and add, signed, release semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param t The threshold to compare *a to (signed compare) sl@0: @param u The value to be added to *a if it is originally >= t sl@0: @param u The value to be added to *a if it is originally < t sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TIntX__ __fname__(__e32_atomic_tas_rel)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/) sl@0: { sl@0: __redir__(__e32_atomic_tas_ord); sl@0: } sl@0: sl@0: sl@0: /** 8/16/32 bit threshold and add, signed, full barrier semantics. sl@0: sl@0: Atomically performs the following operation: sl@0: oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv; sl@0: sl@0: @param a Address of data to be updated - must be naturally aligned sl@0: @param t The threshold to compare *a to (signed compare) sl@0: @param u The value to be added to *a if it is originally >= t sl@0: @param u The value to be added to *a if it is originally < t sl@0: @return The original value of *a sl@0: */ sl@0: EXPORT_C __NAKED__ __TIntX__ __fname__(__e32_atomic_tas_ord)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/) sl@0: { sl@0: asm("mov ecx, [esp+4] "); sl@0: asm("mov " __A_REG__ ", [ecx] "); sl@0: asm("1: "); sl@0: asm("mov " __D_REG__ ", [esp+12] "); sl@0: asm("cmp " __A_REG__ ", [esp+8] "); sl@0: asm("jge short 2f "); sl@0: asm("mov " __D_REG__ ", [esp+16] "); sl@0: asm("2: "); sl@0: asm("add " __D_REG__ ", " __A_REG__ ); sl@0: asm(__LOCK__ "cmpxchg [ecx], " __D_REG__ ); sl@0: asm("jne short 1b "); sl@0: asm("ret "); sl@0: } sl@0: sl@0: