First public contribution.
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\common\x86\atomic_skeleton.h
19 Read an 8/16/32 bit quantity with acquire semantics
21 @param a Address of data to be read - must be naturally aligned
22 @return The value read
24 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_load_acq)(const volatile TAny* /*a*/)
27 _asm mov __A_REG__, [ecx]
28 #ifdef __BARRIERS_NEEDED__
29 _asm lock add dword ptr [esp], 0
35 /** Write an 8/16/32 bit quantity with release semantics
37 @param a Address of data to be written - must be naturally aligned
38 @param v The value to be written
39 @return The value written
41 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_store_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
44 _asm mov __D_REG__, [esp+8]
45 _asm mov __A_REG__, __D_REG__
46 _asm __LOCK__ xchg [ecx], __D_REG__
51 /** Write an 8/16/32 bit quantity with full barrier semantics
53 @param a Address of data to be written - must be naturally aligned
54 @param v The value to be written
55 @return The value written
57 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_store_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
59 _asm jmp __fname__(__e32_atomic_store_rel)
63 /** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
66 @param a Address of data to be written - must be naturally aligned
67 @param v The value to be written
68 @return The original value of *a
70 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_swp_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
72 _asm jmp __fname__(__e32_atomic_swp_ord)
76 /** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
79 @param a Address of data to be written - must be naturally aligned
80 @param v The value to be written
81 @return The original value of *a
83 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_swp_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
85 _asm jmp __fname__(__e32_atomic_swp_ord)
89 /** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
92 @param a Address of data to be written - must be naturally aligned
93 @param v The value to be written
94 @return The original value of *a
96 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_swp_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
98 _asm jmp __fname__(__e32_atomic_swp_ord)
102 /** Write an 8/16/32 bit quantity to memory and return the original value of the memory.
103 Full barrier semantics.
105 @param a Address of data to be written - must be naturally aligned
106 @param v The value to be written
107 @return The original value of *a
109 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_swp_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
111 _asm mov ecx, [esp+4]
112 _asm mov __A_REG__, [esp+8]
113 _asm __LOCK__ xchg [ecx], __A_REG__
118 /** 8/16/32 bit compare and swap, relaxed ordering.
120 Atomically performs the following operation:
121 if (*a == *q) { *a = v; return TRUE; }
122 else { *q = *a; return FALSE; }
124 @param a Address of data to be written - must be naturally aligned
125 @param q Address of location containing expected value
126 @param v The new value to be written if the old value is as expected
127 @return TRUE if *a was updated, FALSE otherwise
129 EXPORT_C __NAKED__ TBool __fname__(__e32_atomic_cas_rlx)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
131 _asm jmp __fname__(__e32_atomic_cas_ord)
135 /** 8/16/32 bit compare and swap, acquire semantics.
137 Atomically performs the following operation:
138 if (*a == *q) { *a = v; return TRUE; }
139 else { *q = *a; return FALSE; }
141 @param a Address of data to be written - must be naturally aligned
142 @param q Address of location containing expected value
143 @param v The new value to be written if the old value is as expected
144 @return TRUE if *a was updated, FALSE otherwise
146 EXPORT_C __NAKED__ TBool __fname__(__e32_atomic_cas_acq)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
148 _asm jmp __fname__(__e32_atomic_cas_ord)
152 /** 8/16/32 bit compare and swap, release semantics.
154 Atomically performs the following operation:
155 if (*a == *q) { *a = v; return TRUE; }
156 else { *q = *a; return FALSE; }
158 @param a Address of data to be written - must be naturally aligned
159 @param q Address of location containing expected value
160 @param v The new value to be written if the old value is as expected
161 @return TRUE if *a was updated, FALSE otherwise
163 EXPORT_C __NAKED__ TBool __fname__(__e32_atomic_cas_rel)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
165 _asm jmp __fname__(__e32_atomic_cas_ord)
169 /** 8/16/32 bit compare and swap, full barrier semantics.
171 Atomically performs the following operation:
172 if (*a == *q) { *a = v; return TRUE; }
173 else { *q = *a; return FALSE; }
175 @param a Address of data to be written - must be naturally aligned
176 @param q Address of location containing expected value
177 @param v The new value to be written if the old value is as expected
178 @return TRUE if *a was updated, FALSE otherwise
180 EXPORT_C __NAKED__ TBool __fname__(__e32_atomic_cas_ord)(volatile TAny* /*a*/, __TUintX__* /*q*/, __TUintX__ /*v*/)
182 _asm mov ecx, [esp+4]
183 _asm mov eax, [esp+8]
184 _asm mov __D_REG__, [esp+12]
185 _asm mov __A_REG__, [eax]
186 _asm __LOCK__ cmpxchg [ecx], __D_REG__
187 _asm jne short cas_fail
191 _asm mov edx, [esp+8]
192 _asm mov [edx], __A_REG__
198 /** 8/16/32 bit atomic add, relaxed ordering.
200 Atomically performs the following operation:
201 oldv = *a; *a = oldv + v; return oldv;
203 @param a Address of data to be updated - must be naturally aligned
204 @param v The value to be added
205 @return The original value of *a
207 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_add_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
209 _asm jmp __fname__(__e32_atomic_add_ord)
213 /** 8/16/32 bit atomic add, acquire semantics.
215 Atomically performs the following operation:
216 oldv = *a; *a = oldv + v; return oldv;
218 @param a Address of data to be updated - must be naturally aligned
219 @param v The value to be added
220 @return The original value of *a
222 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_add_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
224 _asm jmp __fname__(__e32_atomic_add_ord)
228 /** 8/16/32 bit atomic add, release semantics.
230 Atomically performs the following operation:
231 oldv = *a; *a = oldv + v; return oldv;
233 @param a Address of data to be updated - must be naturally aligned
234 @param v The value to be added
235 @return The original value of *a
237 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_add_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
239 _asm jmp __fname__(__e32_atomic_add_ord)
243 /** 8/16/32 bit atomic add, full barrier semantics.
245 Atomically performs the following operation:
246 oldv = *a; *a = oldv + v; return oldv;
248 @param a Address of data to be updated - must be naturally aligned
249 @param v The value to be added
250 @return The original value of *a
252 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_add_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
254 _asm mov ecx, [esp+4]
255 _asm mov __A_REG__, [esp+8]
256 _asm __LOCK__ xadd [ecx], __A_REG__
261 /** 8/16/32 bit atomic bitwise logical AND, relaxed ordering.
263 Atomically performs the following operation:
264 oldv = *a; *a = oldv & v; return oldv;
266 @param a Address of data to be updated - must be naturally aligned
267 @param v The value to be ANDed with *a
268 @return The original value of *a
270 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_and_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
272 _asm jmp __fname__(__e32_atomic_and_ord)
276 /** 8/16/32 bit atomic bitwise logical AND, acquire semantics.
278 Atomically performs the following operation:
279 oldv = *a; *a = oldv & v; return oldv;
281 @param a Address of data to be updated - must be naturally aligned
282 @param v The value to be ANDed with *a
283 @return The original value of *a
285 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_and_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
287 _asm jmp __fname__(__e32_atomic_and_ord)
291 /** 8/16/32 bit atomic bitwise logical AND, release semantics.
293 Atomically performs the following operation:
294 oldv = *a; *a = oldv & v; return oldv;
296 @param a Address of data to be updated - must be naturally aligned
297 @param v The value to be ANDed with *a
298 @return The original value of *a
300 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_and_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
302 _asm jmp __fname__(__e32_atomic_and_ord)
306 /** 8/16/32 bit atomic bitwise logical AND, full barrier semantics.
308 Atomically performs the following operation:
309 oldv = *a; *a = oldv & v; return oldv;
311 @param a Address of data to be updated - must be naturally aligned
312 @param v The value to be ANDed with *a
313 @return The original value of *a
315 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_and_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
317 _asm mov ecx, [esp+4]
318 _asm mov __A_REG__, [ecx]
320 _asm mov __D_REG__, [esp+8]
321 _asm and __D_REG__, __A_REG__
322 _asm __LOCK__ cmpxchg [ecx], __D_REG__
328 /** 8/16/32 bit atomic bitwise logical inclusive OR, relaxed ordering.
330 Atomically performs the following operation:
331 oldv = *a; *a = oldv | v; return oldv;
333 @param a Address of data to be updated - must be naturally aligned
334 @param v The value to be ORed with *a
335 @return The original value of *a
337 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_ior_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
339 _asm jmp __fname__(__e32_atomic_ior_ord)
343 /** 8/16/32 bit atomic bitwise logical inclusive OR, acquire semantics.
345 Atomically performs the following operation:
346 oldv = *a; *a = oldv | v; return oldv;
348 @param a Address of data to be updated - must be naturally aligned
349 @param v The value to be ORed with *a
350 @return The original value of *a
352 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_ior_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
354 _asm jmp __fname__(__e32_atomic_ior_ord)
358 /** 8/16/32 bit atomic bitwise logical inclusive OR, release semantics.
360 Atomically performs the following operation:
361 oldv = *a; *a = oldv | v; return oldv;
363 @param a Address of data to be updated - must be naturally aligned
364 @param v The value to be ORed with *a
365 @return The original value of *a
367 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_ior_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
369 _asm jmp __fname__(__e32_atomic_ior_ord)
373 /** 8/16/32 bit atomic bitwise logical inclusive OR, full barrier semantics.
375 Atomically performs the following operation:
376 oldv = *a; *a = oldv | v; return oldv;
378 @param a Address of data to be updated - must be naturally aligned
379 @param v The value to be ORed with *a
380 @return The original value of *a
382 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_ior_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
384 _asm mov ecx, [esp+4]
385 _asm mov __A_REG__, [ecx]
387 _asm mov __D_REG__, [esp+8]
388 _asm or __D_REG__, __A_REG__
389 _asm __LOCK__ cmpxchg [ecx], __D_REG__
395 /** 8/16/32 bit atomic bitwise logical exclusive OR, relaxed ordering.
397 Atomically performs the following operation:
398 oldv = *a; *a = oldv ^ v; return oldv;
400 @param a Address of data to be updated - must be naturally aligned
401 @param v The value to be XORed with *a
402 @return The original value of *a
404 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_xor_rlx)(volatile TAny* /*a*/, __TUintX__ /*v*/)
406 _asm jmp __fname__(__e32_atomic_xor_ord)
410 /** 8/16/32 bit atomic bitwise logical exclusive OR, acquire semantics.
412 Atomically performs the following operation:
413 oldv = *a; *a = oldv ^ v; return oldv;
415 @param a Address of data to be updated - must be naturally aligned
416 @param v The value to be XORed with *a
417 @return The original value of *a
419 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_xor_acq)(volatile TAny* /*a*/, __TUintX__ /*v*/)
421 _asm jmp __fname__(__e32_atomic_xor_ord)
425 /** 8/16/32 bit atomic bitwise logical exclusive OR, release semantics.
427 Atomically performs the following operation:
428 oldv = *a; *a = oldv ^ v; return oldv;
430 @param a Address of data to be updated - must be naturally aligned
431 @param v The value to be XORed with *a
432 @return The original value of *a
434 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_xor_rel)(volatile TAny* /*a*/, __TUintX__ /*v*/)
436 _asm jmp __fname__(__e32_atomic_xor_ord)
440 /** 8/16/32 bit atomic bitwise logical exclusive OR, full barrier semantics.
442 Atomically performs the following operation:
443 oldv = *a; *a = oldv ^ v; return oldv;
445 @param a Address of data to be updated - must be naturally aligned
446 @param v The value to be XORed with *a
447 @return The original value of *a
449 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_xor_ord)(volatile TAny* /*a*/, __TUintX__ /*v*/)
451 _asm mov ecx, [esp+4]
452 _asm mov __A_REG__, [ecx]
454 _asm mov __D_REG__, [esp+8]
455 _asm xor __D_REG__, __A_REG__
456 _asm __LOCK__ cmpxchg [ecx], __D_REG__
462 /** 8/16/32 bit atomic bitwise universal function, relaxed ordering.
464 Atomically performs the following operation:
465 oldv = *a; *a = (oldv & u) ^ v; return oldv;
467 @param a Address of data to be updated - must be naturally aligned
468 @param u The value to be ANDed with *a
469 @param v The value to be XORed with (*a&u)
470 @return The original value of *a
472 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_axo_rlx)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
474 _asm jmp __fname__(__e32_atomic_axo_ord)
478 /** 8/16/32 bit atomic bitwise universal function, acquire semantics.
480 Atomically performs the following operation:
481 oldv = *a; *a = (oldv & u) ^ v; return oldv;
483 @param a Address of data to be updated - must be naturally aligned
484 @param u The value to be ANDed with *a
485 @param v The value to be XORed with (*a&u)
486 @return The original value of *a
488 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_axo_acq)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
490 _asm jmp __fname__(__e32_atomic_axo_ord)
494 /** 8/16/32 bit atomic bitwise universal function, release semantics.
496 Atomically performs the following operation:
497 oldv = *a; *a = (oldv & u) ^ v; return oldv;
499 @param a Address of data to be updated - must be naturally aligned
500 @param u The value to be ANDed with *a
501 @param v The value to be XORed with (*a&u)
502 @return The original value of *a
504 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_axo_rel)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
506 _asm jmp __fname__(__e32_atomic_axo_ord)
510 /** 8/16/32 bit atomic bitwise universal function, full barrier semantics.
512 Atomically performs the following operation:
513 oldv = *a; *a = (oldv & u) ^ v; return oldv;
515 @param a Address of data to be updated - must be naturally aligned
516 @param u The value to be ANDed with *a
517 @param v The value to be XORed with (*a&u)
518 @return The original value of *a
520 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_axo_ord)(volatile TAny* /*a*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
522 _asm mov ecx, [esp+4]
523 _asm mov __A_REG__, [ecx]
525 _asm mov __D_REG__, [esp+8]
526 _asm and __D_REG__, __A_REG__
527 _asm xor __D_REG__, [esp+12]
528 _asm __LOCK__ cmpxchg [ecx], __D_REG__
534 /** 8/16/32 bit threshold and add, unsigned, relaxed ordering.
536 Atomically performs the following operation:
537 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
539 @param a Address of data to be updated - must be naturally aligned
540 @param t The threshold to compare *a to (unsigned compare)
541 @param u The value to be added to *a if it is originally >= t
542 @param u The value to be added to *a if it is originally < t
543 @return The original value of *a
545 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_tau_rlx)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
547 _asm jmp __fname__(__e32_atomic_tau_ord)
551 /** 8/16/32 bit threshold and add, unsigned, acquire semantics.
553 Atomically performs the following operation:
554 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
556 @param a Address of data to be updated - must be naturally aligned
557 @param t The threshold to compare *a to (unsigned compare)
558 @param u The value to be added to *a if it is originally >= t
559 @param u The value to be added to *a if it is originally < t
560 @return The original value of *a
562 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_tau_acq)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
564 _asm jmp __fname__(__e32_atomic_tau_ord)
568 /** 8/16/32 bit threshold and add, unsigned, release semantics.
570 Atomically performs the following operation:
571 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
573 @param a Address of data to be updated - must be naturally aligned
574 @param t The threshold to compare *a to (unsigned compare)
575 @param u The value to be added to *a if it is originally >= t
576 @param u The value to be added to *a if it is originally < t
577 @return The original value of *a
579 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_tau_rel)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
581 _asm jmp __fname__(__e32_atomic_tau_ord)
585 /** 8/16/32 bit threshold and add, unsigned, full barrier semantics.
587 Atomically performs the following operation:
588 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
590 @param a Address of data to be updated - must be naturally aligned
591 @param t The threshold to compare *a to (unsigned compare)
592 @param u The value to be added to *a if it is originally >= t
593 @param u The value to be added to *a if it is originally < t
594 @return The original value of *a
596 EXPORT_C __NAKED__ __TUintX__ __fname__(__e32_atomic_tau_ord)(volatile TAny* /*a*/, __TUintX__ /*t*/, __TUintX__ /*u*/, __TUintX__ /*v*/)
598 _asm mov ecx, [esp+4]
599 _asm mov __A_REG__, [ecx]
601 _asm mov __D_REG__, [esp+12]
602 _asm cmp __A_REG__, [esp+8]
604 _asm mov __D_REG__, [esp+16]
606 _asm add __D_REG__, __A_REG__
607 _asm __LOCK__ cmpxchg [ecx], __D_REG__
613 /** 8/16/32 bit threshold and add, signed, relaxed ordering.
615 Atomically performs the following operation:
616 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
618 @param a Address of data to be updated - must be naturally aligned
619 @param t The threshold to compare *a to (signed compare)
620 @param u The value to be added to *a if it is originally >= t
621 @param u The value to be added to *a if it is originally < t
622 @return The original value of *a
624 EXPORT_C __NAKED__ __TIntX__ __fname__(__e32_atomic_tas_rlx)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
626 _asm jmp __fname__(__e32_atomic_tas_ord)
630 /** 8/16/32 bit threshold and add, signed, acquire semantics.
632 Atomically performs the following operation:
633 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
635 @param a Address of data to be updated - must be naturally aligned
636 @param t The threshold to compare *a to (signed compare)
637 @param u The value to be added to *a if it is originally >= t
638 @param u The value to be added to *a if it is originally < t
639 @return The original value of *a
641 EXPORT_C __NAKED__ __TIntX__ __fname__(__e32_atomic_tas_acq)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
643 _asm jmp __fname__(__e32_atomic_tas_ord)
647 /** 8/16/32 bit threshold and add, signed, release semantics.
649 Atomically performs the following operation:
650 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
652 @param a Address of data to be updated - must be naturally aligned
653 @param t The threshold to compare *a to (signed compare)
654 @param u The value to be added to *a if it is originally >= t
655 @param u The value to be added to *a if it is originally < t
656 @return The original value of *a
658 EXPORT_C __NAKED__ __TIntX__ __fname__(__e32_atomic_tas_rel)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
660 _asm jmp __fname__(__e32_atomic_tas_ord)
664 /** 8/16/32 bit threshold and add, signed, full barrier semantics.
666 Atomically performs the following operation:
667 oldv = *a; if (oldv>=t) *a=oldv+u else *a=oldv+v; return oldv;
669 @param a Address of data to be updated - must be naturally aligned
670 @param t The threshold to compare *a to (signed compare)
671 @param u The value to be added to *a if it is originally >= t
672 @param u The value to be added to *a if it is originally < t
673 @return The original value of *a
675 EXPORT_C __NAKED__ __TIntX__ __fname__(__e32_atomic_tas_ord)(volatile TAny* /*a*/, __TIntX__ /*t*/, __TIntX__ /*u*/, __TIntX__ /*v*/)
677 _asm mov ecx, [esp+4]
678 _asm mov __A_REG__, [ecx]
680 _asm mov __D_REG__, [esp+12]
681 _asm cmp __A_REG__, [esp+8]
683 _asm mov __D_REG__, [esp+16]
685 _asm add __D_REG__, __A_REG__
686 _asm __LOCK__ cmpxchg [ecx], __D_REG__