First public contribution.
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\common\x86\atomics.cpp
18 #include <e32atomics.h>
23 WINS/WINSCW Use X86 locked operations. Assume Pentium or above CPU (CMPXCHG8B available)
24 X86 For Pentium and above use locked operations
25 For 486 use locked operations for 8, 16, 32 bit. For 64 bit must disable interrupts.
26 NOTE: 486 not supported at the moment
27 ARMv4/ARMv5 Must disable interrupts.
28 ARMv6 LDREX/STREX for 8, 16, 32 bit. For 64 bit must disable interrupts (maybe).
29 ARMv6K/ARMv7 LDREXB/LDREXH/LDREX/LDREXD
31 Need both kernel side and user side versions
34 #if defined(__SMP__) || !defined(__EPOC32__)
35 #define __BARRIERS_NEEDED__
50 #define __TUintX__ TUint32
51 #define __TIntX__ TInt32
52 #define __fname__(x) x##32
56 #include "atomic_skeleton.h"
64 #define __TUintX__ TUint16
65 #define __TIntX__ TInt16
66 #define __fname__(x) x##16
70 #include "atomic_skeleton.h"
78 #define __TUintX__ TUint8
79 #define __TIntX__ TInt8
80 #define __fname__(x) x##8
84 #include "atomic_skeleton.h"
93 /** Full memory barrier for explicit memory accesses
96 EXPORT_C __NAKED__ void __e32_memory_barrier()
98 #ifdef __BARRIERS_NEEDED__
99 _asm lock add dword ptr [esp], 0
105 /** Barrier guaranteeing completion as well as ordering
108 EXPORT_C __NAKED__ void __e32_io_completion_barrier()
117 /** Find the most significant 1 in a 32 bit word
119 @param v The word to be scanned
120 @return The bit number of the most significant 1 if v != 0
123 EXPORT_C __NAKED__ TInt __e32_find_ms1_32(TUint32 /*v*/)
125 _asm bsr eax, [esp+4]
127 _asm mov eax, 0ffffffffh
133 /** Find the least significant 1 in a 32 bit word
135 @param v The word to be scanned
136 @return The bit number of the least significant 1 if v != 0
139 EXPORT_C __NAKED__ TInt __e32_find_ls1_32(TUint32 /*v*/)
141 _asm bsf eax, [esp+4]
143 _asm mov eax, 0ffffffffh
149 /** Count the number of 1's in a 32 bit word
151 @param v The word to be scanned
152 @return The number of 1's
154 EXPORT_C __NAKED__ TInt __e32_bit_count_32(TUint32 /*v*/)
156 _asm mov eax, [esp+4]
158 _asm and eax, 0aaaaaaaah
159 _asm and edx, 055555555h
163 _asm and eax, 0cccccccch
164 _asm and edx, 033333333h
170 _asm and eax, 00f0f0f0fh
181 /** Find the most significant 1 in a 64 bit word
183 @param v The word to be scanned
184 @return The bit number of the most significant 1 if v != 0
187 EXPORT_C __NAKED__ TInt __e32_find_ms1_64(TUint64 /*v*/)
189 _asm bsr eax, [esp+8]
191 _asm bsr eax, [esp+4]
193 _asm mov eax, 0ffffffffh
201 /** Find the least significant 1 in a 64 bit word
203 @param v The word to be scanned
204 @return The bit number of the least significant 1 if v != 0
207 EXPORT_C __NAKED__ TInt __e32_find_ls1_64(TUint64 /*v*/)
209 _asm bsf eax, [esp+4]
211 _asm bsf eax, [esp+8]
213 _asm mov eax, 0ffffffffh
221 /** Count the number of 1's in a 64 bit word
223 @param v The word to be scanned
224 @return The number of 1's
226 EXPORT_C __NAKED__ TInt __e32_bit_count_64(TUint64 /*v*/)
228 _asm mov eax, [esp+4]
229 _asm mov edx, [esp+8]
232 _asm and eax, 0aaaaaaaah
233 _asm and ecx, 055555555h
235 _asm add eax, ecx /* 16 groups of 2 bits, count=0,1,2 */
237 _asm and eax, 0cccccccch
238 _asm and ecx, 033333333h
240 _asm add ecx, eax /* 8 groups of 4 bits, count=0...4 */
243 _asm and eax, 0aaaaaaaah
244 _asm and edx, 055555555h
246 _asm add eax, edx /* 16 groups of 2 bits, count=0,1,2 */
248 _asm and eax, 0cccccccch
249 _asm and edx, 033333333h
251 _asm add eax, edx /* 8 groups of 4 bits, count=0...4 */
253 _asm add eax, ecx /* 8 groups of 4 bits, count=0...8 */
255 _asm and eax, 0f0f0f0f0h
256 _asm and edx, 00f0f0f0fh
258 _asm add eax, edx /* 4 groups of 8 bits, count=0...16 */
271 /** Read a 64 bit word with acquire semantics
273 @param a Address of word to be read - must be a multiple of 8
274 @return The value read
276 EXPORT_C __NAKED__ TUint64 __e32_atomic_load_acq64(const volatile TAny* /*a*/)
280 _asm mov edi, [esp+12]
281 _asm mov eax, 0badbeefh
285 _asm __LOCK__ cmpxchg8b [edi]
292 /** Write a 64 bit word with release semantics
294 @param a Address of word to be written - must be a multiple of 8
295 @param v The value to be written
296 @return The value written
298 EXPORT_C __NAKED__ TUint64 __e32_atomic_store_rel64(volatile TAny* /*a*/, TUint64 /*v*/)
302 _asm mov edi, [esp+12]
303 _asm mov ebx, [esp+16]
304 _asm mov ecx, [esp+20]
306 _asm mov edx, [edi+4]
308 _asm __LOCK__ cmpxchg8b [edi]
318 /** Write a 64 bit word with full barrier semantics
320 @param a Address of word to be written - must be a multiple of 8
321 @param v The value to be written
322 @return The value written
324 EXPORT_C __NAKED__ TUint64 __e32_atomic_store_ord64(volatile TAny* /*a*/, TUint64 /*v*/)
326 _asm jmp __e32_atomic_store_rel64
330 /** Write a 64 bit word to memory and return the original value of the memory.
333 @param a Address of word to be written - must be a multiple of 8
334 @param v The value to be written
335 @return The original value of *a
337 EXPORT_C __NAKED__ TUint64 __e32_atomic_swp_rlx64(volatile TAny* /*a*/, TUint64 /*v*/)
339 _asm jmp __e32_atomic_swp_ord64
343 /** Write a 64 bit word to memory and return the original value of the memory.
346 @param a Address of word to be written - must be a multiple of 8
347 @param v The value to be written
348 @return The original value of *a
350 EXPORT_C __NAKED__ TUint64 __e32_atomic_swp_acq64(volatile TAny* /*a*/, TUint64 /*v*/)
352 _asm jmp __e32_atomic_swp_ord64
356 /** Write a 64 bit word to memory and return the original value of the memory.
359 @param a Address of word to be written - must be a multiple of 8
360 @param v The value to be written
361 @return The original value of *a
363 EXPORT_C __NAKED__ TUint64 __e32_atomic_swp_rel64(volatile TAny* /*a*/, TUint64 /*v*/)
365 _asm jmp __e32_atomic_swp_ord64
369 /** Write a 64 bit word to memory and return the original value of the memory.
370 Full barrier semantics.
372 @param a Address of word to be written - must be a multiple of 8
373 @param v The value to be written
374 @return The original value of *a
376 EXPORT_C __NAKED__ TUint64 __e32_atomic_swp_ord64(volatile TAny* /*a*/, TUint64 /*v*/)
380 _asm mov edi, [esp+12]
381 _asm mov ebx, [esp+16]
382 _asm mov ecx, [esp+20]
384 _asm mov edx, [edi+4]
386 _asm __LOCK__ cmpxchg8b [edi]
394 /** 64 bit compare and swap, relaxed ordering.
396 Atomically performs the following operation:
397 if (*a == *q) { *a = v; return TRUE; }
398 else { *q = *a; return FALSE; }
400 @param a Address of word to be written - must be a multiple of 8
401 @param q Address of location containing expected value
402 @param v The new value to be written if the old value is as expected
403 @return TRUE if *a was updated, FALSE otherwise
405 EXPORT_C __NAKED__ TBool __e32_atomic_cas_rlx64(volatile TAny* /*a*/, TUint64* /*q*/, TUint64 /*v*/)
407 _asm jmp __e32_atomic_cas_ord64
411 /** 64 bit compare and swap, acquire semantics.
413 Atomically performs the following operation:
414 if (*a == *q) { *a = v; return TRUE; }
415 else { *q = *a; return FALSE; }
417 @param a Address of word to be written - must be a multiple of 8
418 @param q Address of location containing expected value
419 @param v The new value to be written if the old value is as expected
420 @return TRUE if *a was updated, FALSE otherwise
422 EXPORT_C __NAKED__ TBool __e32_atomic_cas_acq64(volatile TAny* /*a*/, TUint64* /*q*/, TUint64 /*v*/)
424 _asm jmp __e32_atomic_cas_ord64
428 /** 64 bit compare and swap, release semantics.
430 Atomically performs the following operation:
431 if (*a == *q) { *a = v; return TRUE; }
432 else { *q = *a; return FALSE; }
434 @param a Address of word to be written - must be a multiple of 8
435 @param q Address of location containing expected value
436 @param v The new value to be written if the old value is as expected
437 @return TRUE if *a was updated, FALSE otherwise
439 EXPORT_C __NAKED__ TBool __e32_atomic_cas_rel64(volatile TAny* /*a*/, TUint64* /*q*/, TUint64 /*v*/)
441 _asm jmp __e32_atomic_cas_ord64
445 /** 64 bit compare and swap, full barrier semantics.
447 Atomically performs the following operation:
448 if (*a == *q) { *a = v; return TRUE; }
449 else { *q = *a; return FALSE; }
451 @param a Address of word to be written - must be a multiple of 8
452 @param q Address of location containing expected value
453 @param v The new value to be written if the old value is as expected
454 @return TRUE if *a was updated, FALSE otherwise
456 EXPORT_C __NAKED__ TBool __e32_atomic_cas_ord64(volatile TAny* /*a*/, TUint64* /*q*/, TUint64 /*v*/)
461 _asm mov edi, [esp+16] // edi = a
462 _asm mov esi, [esp+20] // esi = q
463 _asm mov ebx, [esp+24] // ecx:ebx = v
464 _asm mov ecx, [esp+28]
465 _asm mov eax, [esi] // edx:eax = *q
466 _asm mov edx, [esi+4]
467 _asm __LOCK__ cmpxchg8b [edi] // if (*a==*q) *a=v, ZF=1 else edx:eax=*a, ZF=0
468 _asm jne short cas_fail
475 _asm mov [esi], eax // *q = edx:eax
476 _asm mov [esi+4], edx
485 /** 64 bit atomic add, relaxed ordering.
487 Atomically performs the following operation:
488 oldv = *a; *a = oldv + v; return oldv;
490 @param a Address of word to be updated - must be a multiple of 8
491 @param v The value to be added
492 @return The original value of *a
494 EXPORT_C __NAKED__ TUint64 __e32_atomic_add_rlx64(volatile TAny* /*a*/, TUint64 /*v*/)
496 _asm jmp __e32_atomic_add_ord64
500 /** 64 bit atomic add, acquire semantics.
502 Atomically performs the following operation:
503 oldv = *a; *a = oldv + v; return oldv;
505 @param a Address of word to be updated - must be a multiple of 8
506 @param v The value to be added
507 @return The original value of *a
509 EXPORT_C __NAKED__ TUint64 __e32_atomic_add_acq64(volatile TAny* /*a*/, TUint64 /*v*/)
511 _asm jmp __e32_atomic_add_ord64
515 /** 64 bit atomic add, release semantics.
517 Atomically performs the following operation:
518 oldv = *a; *a = oldv + v; return oldv;
520 @param a Address of word to be updated - must be a multiple of 8
521 @param v The value to be added
522 @return The original value of *a
524 EXPORT_C __NAKED__ TUint64 __e32_atomic_add_rel64(volatile TAny* /*a*/, TUint64 /*v*/)
526 _asm jmp __e32_atomic_add_ord64
530 /** 64 bit atomic add, full barrier semantics.
532 Atomically performs the following operation:
533 oldv = *a; *a = oldv + v; return oldv;
535 @param a Address of word to be updated - must be a multiple of 8
536 @param v The value to be added
537 @return The original value of *a
539 EXPORT_C __NAKED__ TUint64 __e32_atomic_add_ord64(volatile TAny* /*a*/, TUint64 /*v*/)
543 _asm mov edi, [esp+12] // edi = a
544 _asm mov eax, [edi] // edx:eax = oldv
545 _asm mov edx, [edi+4]
549 _asm add ebx, [esp+16] // ecx:ebx = oldv + v
550 _asm adc ecx, [esp+20]
551 _asm __LOCK__ cmpxchg8b [edi] // if (*a==oldv) *a=oldv+v, ZF=1 else edx:eax=*a, ZF=0
559 /** 64 bit atomic bitwise logical AND, relaxed ordering.
561 Atomically performs the following operation:
562 oldv = *a; *a = oldv & v; return oldv;
564 @param a Address of word to be updated - must be a multiple of 8
565 @param v The value to be ANDed with *a
566 @return The original value of *a
568 EXPORT_C __NAKED__ TUint64 __e32_atomic_and_rlx64(volatile TAny* /*a*/, TUint64 /*v*/)
570 _asm jmp __e32_atomic_and_ord64
574 /** 64 bit atomic bitwise logical AND, acquire semantics.
576 Atomically performs the following operation:
577 oldv = *a; *a = oldv & v; return oldv;
579 @param a Address of word to be updated - must be a multiple of 8
580 @param v The value to be ANDed with *a
581 @return The original value of *a
583 EXPORT_C __NAKED__ TUint64 __e32_atomic_and_acq64(volatile TAny* /*a*/, TUint64 /*v*/)
585 _asm jmp __e32_atomic_and_ord64
589 /** 64 bit atomic bitwise logical AND, release semantics.
591 Atomically performs the following operation:
592 oldv = *a; *a = oldv & v; return oldv;
594 @param a Address of word to be updated - must be a multiple of 8
595 @param v The value to be ANDed with *a
596 @return The original value of *a
598 EXPORT_C __NAKED__ TUint64 __e32_atomic_and_rel64(volatile TAny* /*a*/, TUint64 /*v*/)
600 _asm jmp __e32_atomic_and_ord64
604 /** 64 bit atomic bitwise logical AND, full barrier semantics.
606 Atomically performs the following operation:
607 oldv = *a; *a = oldv & v; return oldv;
609 @param a Address of word to be updated - must be a multiple of 8
610 @param v The value to be ANDed with *a
611 @return The original value of *a
613 EXPORT_C __NAKED__ TUint64 __e32_atomic_and_ord64(volatile TAny* /*a*/, TUint64 /*v*/)
617 _asm mov edi, [esp+12] // edi = a
618 _asm mov eax, [edi] // edx:eax = oldv
619 _asm mov edx, [edi+4]
623 _asm and ebx, [esp+16] // ecx:ebx = oldv & v
624 _asm and ecx, [esp+20]
625 _asm __LOCK__ cmpxchg8b [edi] // if (*a==oldv) *a=oldv&v, ZF=1 else edx:eax=*a, ZF=0
633 /** 64 bit atomic bitwise logical inclusive OR, relaxed ordering.
635 Atomically performs the following operation:
636 oldv = *a; *a = oldv | v; return oldv;
638 @param a Address of word to be updated - must be a multiple of 8
639 @param v The value to be ORed with *a
640 @return The original value of *a
642 EXPORT_C __NAKED__ TUint64 __e32_atomic_ior_rlx64(volatile TAny* /*a*/, TUint64 /*v*/)
644 _asm jmp __e32_atomic_ior_ord64
648 /** 64 bit atomic bitwise logical inclusive OR, acquire semantics.
650 Atomically performs the following operation:
651 oldv = *a; *a = oldv | v; return oldv;
653 @param a Address of word to be updated - must be a multiple of 8
654 @param v The value to be ORed with *a
655 @return The original value of *a
657 EXPORT_C __NAKED__ TUint64 __e32_atomic_ior_acq64(volatile TAny* /*a*/, TUint64 /*v*/)
659 _asm jmp __e32_atomic_ior_ord64
663 /** 64 bit atomic bitwise logical inclusive OR, release semantics.
665 Atomically performs the following operation:
666 oldv = *a; *a = oldv | v; return oldv;
668 @param a Address of word to be updated - must be a multiple of 8
669 @param v The value to be ORed with *a
670 @return The original value of *a
672 EXPORT_C __NAKED__ TUint64 __e32_atomic_ior_rel64(volatile TAny* /*a*/, TUint64 /*v*/)
674 _asm jmp __e32_atomic_ior_ord64
678 /** 64 bit atomic bitwise logical inclusive OR, full barrier semantics.
680 Atomically performs the following operation:
681 oldv = *a; *a = oldv | v; return oldv;
683 @param a Address of word to be updated - must be a multiple of 8
684 @param v The value to be ORed with *a
685 @return The original value of *a
687 EXPORT_C __NAKED__ TUint64 __e32_atomic_ior_ord64(volatile TAny* /*a*/, TUint64 /*v*/)
691 _asm mov edi, [esp+12] // edi = a
692 _asm mov eax, [edi] // edx:eax = oldv
693 _asm mov edx, [edi+4]
697 _asm or ebx, [esp+16] // ecx:ebx = oldv | v
698 _asm or ecx, [esp+20]
699 _asm __LOCK__ cmpxchg8b [edi] // if (*a==oldv) *a=oldv|v, ZF=1 else edx:eax=*a, ZF=0
707 /** 64 bit atomic bitwise logical exclusive OR, relaxed ordering.
709 Atomically performs the following operation:
710 oldv = *a; *a = oldv ^ v; return oldv;
712 @param a Address of word to be updated - must be a multiple of 8
713 @param v The value to be XORed with *a
714 @return The original value of *a
716 EXPORT_C __NAKED__ TUint64 __e32_atomic_xor_rlx64(volatile TAny* /*a*/, TUint64 /*v*/)
718 _asm jmp __e32_atomic_xor_ord64
722 /** 64 bit atomic bitwise logical exclusive OR, acquire semantics.
724 Atomically performs the following operation:
725 oldv = *a; *a = oldv ^ v; return oldv;
727 @param a Address of word to be updated - must be a multiple of 8
728 @param v The value to be XORed with *a
729 @return The original value of *a
731 EXPORT_C __NAKED__ TUint64 __e32_atomic_xor_acq64(volatile TAny* /*a*/, TUint64 /*v*/)
733 _asm jmp __e32_atomic_xor_ord64
737 /** 64 bit atomic bitwise logical exclusive OR, release semantics.
739 Atomically performs the following operation:
740 oldv = *a; *a = oldv ^ v; return oldv;
742 @param a Address of word to be updated - must be a multiple of 8
743 @param v The value to be XORed with *a
744 @return The original value of *a
746 EXPORT_C __NAKED__ TUint64 __e32_atomic_xor_rel64(volatile TAny* /*a*/, TUint64 /*v*/)
748 _asm jmp __e32_atomic_xor_ord64
752 /** 64 bit atomic bitwise logical exclusive OR, full barrier semantics.
754 Atomically performs the following operation:
755 oldv = *a; *a = oldv ^ v; return oldv;
757 @param a Address of word to be updated - must be a multiple of 8
758 @param v The value to be XORed with *a
759 @return The original value of *a
761 EXPORT_C __NAKED__ TUint64 __e32_atomic_xor_ord64(volatile TAny* /*a*/, TUint64 /*v*/)
765 _asm mov edi, [esp+12] // edi = a
766 _asm mov eax, [edi] // edx:eax = oldv
767 _asm mov edx, [edi+4]
771 _asm xor ebx, [esp+16] // ecx:ebx = oldv ^ v
772 _asm xor ecx, [esp+20]
773 _asm __LOCK__ cmpxchg8b [edi] // if (*a==oldv) *a=oldv^v, ZF=1 else edx:eax=*a, ZF=0
781 /** 64 bit atomic bitwise universal function, relaxed ordering.
783 Atomically performs the following operation:
784 oldv = *a; *a = (oldv & u) ^ v; return oldv;
786 @param a Address of word to be updated - must be a multiple of 8
787 @param u The value to be ANDed with *a
788 @param v The value to be XORed with (*a&u)
789 @return The original value of *a
791 EXPORT_C __NAKED__ TUint64 __e32_atomic_axo_rlx64(volatile TAny* /*a*/, TUint64 /*u*/, TUint64 /*v*/)
793 _asm jmp __e32_atomic_axo_ord64
797 /** 64 bit atomic bitwise universal function, acquire semantics.
799 Atomically performs the following operation:
800 oldv = *a; *a = (oldv & u) ^ v; return oldv;
802 @param a Address of word to be updated - must be a multiple of 8
803 @param u The value to be ANDed with *a
804 @param v The value to be XORed with (*a&u)
805 @return The original value of *a
807 EXPORT_C __NAKED__ TUint64 __e32_atomic_axo_acq64(volatile TAny* /*a*/, TUint64 /*u*/, TUint64 /*v*/)
809 _asm jmp __e32_atomic_axo_ord64
813 /** 64 bit atomic bitwise universal function, release semantics.
815 Atomically performs the following operation:
816 oldv = *a; *a = (oldv & u) ^ v; return oldv;
818 @param a Address of word to be updated - must be a multiple of 8
819 @param u The value to be ANDed with *a
820 @param v The value to be XORed with (*a&u)
821 @return The original value of *a
823 EXPORT_C __NAKED__ TUint64 __e32_atomic_axo_rel64(volatile TAny* /*a*/, TUint64 /*u*/, TUint64 /*v*/)
825 _asm jmp __e32_atomic_axo_ord64
829 /** 64 bit atomic bitwise universal function, release semantics.
831 Atomically performs the following operation:
832 oldv = *a; *a = (oldv & u) ^ v; return oldv;
834 @param a Address of word to be updated - must be a multiple of 8
835 @param u The value to be ANDed with *a
836 @param v The value to be XORed with (*a&u)
837 @return The original value of *a
839 EXPORT_C __NAKED__ TUint64 __e32_atomic_axo_ord64(volatile TAny* /*a*/, TUint64 /*u*/, TUint64 /*v*/)
843 _asm mov edi, [esp+12] // edi = a
844 _asm mov eax, [edi] // edx:eax = oldv
845 _asm mov edx, [edi+4]
849 _asm and ebx, [esp+16] // ecx:ebx = oldv & u
850 _asm and ecx, [esp+20]
851 _asm xor ebx, [esp+24] // ecx:ebx = (oldv & u) ^ v
852 _asm xor ecx, [esp+28]
853 _asm __LOCK__ cmpxchg8b [edi] // if (*a==oldv) *a=(oldv&u)^v, ZF=1 else edx:eax=*a, ZF=0
861 /** 64 bit threshold and add, unsigned, relaxed ordering.
863 Atomically performs the following operation:
864 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
866 @param a Address of data to be updated - must be naturally aligned
867 @param t The threshold to compare *a to (unsigned compare)
868 @param u The value to be added to *a if it is originally >= t
869 @param u The value to be added to *a if it is originally < t
870 @return The original value of *a
872 EXPORT_C __NAKED__ TUint64 __e32_atomic_tau_rlx64(volatile TAny* /*a*/, TUint64 /*t*/, TUint64 /*u*/, TUint64 /*v*/)
874 _asm jmp __e32_atomic_tau_ord64
878 /** 64 bit threshold and add, unsigned, acquire semantics.
880 Atomically performs the following operation:
881 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
883 @param a Address of data to be updated - must be naturally aligned
884 @param t The threshold to compare *a to (unsigned compare)
885 @param u The value to be added to *a if it is originally >= t
886 @param u The value to be added to *a if it is originally < t
887 @return The original value of *a
889 EXPORT_C __NAKED__ TUint64 __e32_atomic_tau_acq64(volatile TAny* /*a*/, TUint64 /*t*/, TUint64 /*u*/, TUint64 /*v*/)
891 _asm jmp __e32_atomic_tau_ord64
895 /** 64 bit threshold and add, unsigned, release semantics.
897 Atomically performs the following operation:
898 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
900 @param a Address of data to be updated - must be naturally aligned
901 @param t The threshold to compare *a to (unsigned compare)
902 @param u The value to be added to *a if it is originally >= t
903 @param u The value to be added to *a if it is originally < t
904 @return The original value of *a
906 EXPORT_C __NAKED__ TUint64 __e32_atomic_tau_rel64(volatile TAny* /*a*/, TUint64 /*t*/, TUint64 /*u*/, TUint64 /*v*/)
908 _asm jmp __e32_atomic_tau_ord64
912 /** 64 bit threshold and add, unsigned, full barrier semantics.
914 Atomically performs the following operation:
915 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
917 @param a Address of data to be updated - must be naturally aligned
918 @param t The threshold to compare *a to (unsigned compare)
919 @param u The value to be added to *a if it is originally >= t
920 @param u The value to be added to *a if it is originally < t
921 @return The original value of *a
923 EXPORT_C __NAKED__ TUint64 __e32_atomic_tau_ord64(volatile TAny* /*a*/, TUint64 /*t*/, TUint64 /*u*/, TUint64 /*v*/)
927 _asm mov edi, [esp+12] // edi = a
928 _asm mov eax, [edi] // edx:eax = oldv
929 _asm mov edx, [edi+4]
932 _asm cmp eax, [esp+16] // eax - t.low, CF=borrow
933 _asm sbb ebx, [esp+20] // CF = borrow from (oldv - t)
934 _asm jnc short use_u // no borrow means oldv>=t so use u
935 _asm mov ebx, [esp+32] // ecx:ebx = v
936 _asm mov ecx, [esp+36]
939 _asm mov ebx, [esp+24] // ecx:ebx = u
940 _asm mov ecx, [esp+28]
942 _asm add ebx, eax // ecx:ebx = oldv + u or v
944 _asm __LOCK__ cmpxchg8b [edi]
952 /** 64 bit threshold and add, signed, relaxed ordering.
954 Atomically performs the following operation:
955 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
957 @param a Address of data to be updated - must be naturally aligned
958 @param t The threshold to compare *a to (signed compare)
959 @param u The value to be added to *a if it is originally >= t
960 @param u The value to be added to *a if it is originally < t
961 @return The original value of *a
963 EXPORT_C __NAKED__ TInt64 __e32_atomic_tas_rlx64(volatile TAny* /*a*/, TInt64 /*t*/, TInt64 /*u*/, TInt64 /*v*/)
965 _asm jmp __e32_atomic_tas_ord64
969 /** 64 bit threshold and add, signed, acquire semantics.
971 Atomically performs the following operation:
972 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
974 @param a Address of data to be updated - must be naturally aligned
975 @param t The threshold to compare *a to (signed compare)
976 @param u The value to be added to *a if it is originally >= t
977 @param u The value to be added to *a if it is originally < t
978 @return The original value of *a
980 EXPORT_C __NAKED__ TInt64 __e32_atomic_tas_acq64(volatile TAny* /*a*/, TInt64 /*t*/, TInt64 /*u*/, TInt64 /*v*/)
982 _asm jmp __e32_atomic_tas_ord64
986 /** 64 bit threshold and add, signed, release semantics.
988 Atomically performs the following operation:
989 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
991 @param a Address of data to be updated - must be naturally aligned
992 @param t The threshold to compare *a to (signed compare)
993 @param u The value to be added to *a if it is originally >= t
994 @param u The value to be added to *a if it is originally < t
995 @return The original value of *a
997 EXPORT_C __NAKED__ TInt64 __e32_atomic_tas_rel64(volatile TAny* /*a*/, TInt64 /*t*/, TInt64 /*u*/, TInt64 /*v*/)
999 _asm jmp __e32_atomic_tas_ord64
1003 /** 64 bit threshold and add, signed, full barrier semantics.
1005 Atomically performs the following operation:
1006 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
1008 @param a Address of data to be updated - must be naturally aligned
1009 @param t The threshold to compare *a to (signed compare)
1010 @param u The value to be added to *a if it is originally >= t
1011 @param u The value to be added to *a if it is originally < t
1012 @return The original value of *a
1014 EXPORT_C __NAKED__ TInt64 __e32_atomic_tas_ord64(volatile TAny* /*a*/, TInt64 /*t*/, TInt64 /*u*/, TInt64 /*v*/)
1018 _asm mov edi, [esp+12] // edi = a
1019 _asm mov eax, [edi] // edx:eax = oldv
1020 _asm mov edx, [edi+4]
1023 _asm cmp eax, [esp+16] // eax - t.low, CF=borrow
1024 _asm sbb ebx, [esp+20] // SF=sign, OF=overflow from (oldv - t)
1025 _asm jge short use_u // SF==OF (GE condition) means oldv>=t so use u
1026 _asm mov ebx, [esp+32] // ecx:ebx = v
1027 _asm mov ecx, [esp+36]
1028 _asm jmp short use_v
1030 _asm mov ebx, [esp+24] // ecx:ebx = u
1031 _asm mov ecx, [esp+28]
1033 _asm add ebx, eax // ecx:ebx = oldv + u or v
1035 _asm __LOCK__ cmpxchg8b [edi]
1036 _asm jne short retry