2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
5 * Copyright (c) 2008-2018 Ivan Maidanski
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
16 * Some of the machine specific code was borrowed from our GC distribution.
19 #if (AO_GNUC_PREREQ(4, 8) || AO_CLANG_PREREQ(3, 4)) \
20 && !defined(__INTEL_COMPILER) /* TODO: test and enable icc */ \
21 && !defined(AO_DISABLE_GCC_ATOMICS)
22 # define AO_GCC_ATOMIC_TEST_AND_SET
24 # if defined(__APPLE_CC__)
25 /* OS X 10.7 clang-425 lacks __GCC_HAVE_SYNC_COMPARE_AND_SWAP_n */
26 /* predefined macro (unlike e.g. OS X 10.11 clang-703). */
27 # define AO_GCC_FORCE_HAVE_CAS
30 # if !AO_CLANG_PREREQ(9, 0) /* < Apple clang-900 */
31 /* Older Apple clang (e.g., clang-600 based on LLVM 3.5svn) had */
32 /* some bug in the double word CAS implementation for x64. */
33 # define AO_SKIPATOMIC_double_compare_and_swap_ANY
36 # elif defined(__MACH__)
37 /* OS X 10.8 lacks __atomic_load/store symbols for arch i386 */
38 /* (even with a non-Apple clang). */
39 # ifndef MAC_OS_X_VERSION_MIN_REQUIRED
40 /* Include this header just to import the version macro. */
41 # include <AvailabilityMacros.h>
43 # if MAC_OS_X_VERSION_MIN_REQUIRED < 1090 /* MAC_OS_X_VERSION_10_9 */
44 # define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
46 # endif /* __i386__ */
48 # elif defined(__clang__)
49 # if !defined(__x86_64__)
50 # if !defined(AO_PREFER_BUILTIN_ATOMICS) && !defined(__CYGWIN__) \
51 && !AO_CLANG_PREREQ(5, 0)
52 /* At least clang-3.8/i686 (from NDK r11c) required to specify */
53 /* -latomic in case of a double-word atomic operation use. */
54 # define AO_SKIPATOMIC_double_compare_and_swap_ANY
55 # define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
56 # endif /* !AO_PREFER_BUILTIN_ATOMICS */
58 # elif !defined(__ILP32__)
59 # if (!AO_CLANG_PREREQ(3, 5) && !defined(AO_PREFER_BUILTIN_ATOMICS)) \
60 || (!AO_CLANG_PREREQ(4, 0) && defined(AO_ADDRESS_SANITIZER)) \
61 || defined(AO_THREAD_SANITIZER)
62 /* clang-3.4/x64 required -latomic. clang-3.9/x64 seems to */
63 /* pass double-wide arguments to atomic operations incorrectly */
64 /* in case of ASan/TSan. */
65 /* TODO: As of clang-4.0, lock-free test_stack fails if TSan. */
66 # define AO_SKIPATOMIC_double_compare_and_swap_ANY
67 # define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
69 # endif /* __x86_64__ */
71 # elif AO_GNUC_PREREQ(7, 0) && !defined(AO_PREFER_BUILTIN_ATOMICS) \
72 && !defined(AO_THREAD_SANITIZER) && !defined(__MINGW32__)
73 /* gcc-7.x/x64 (gcc-7.2, at least) requires -latomic flag in case */
74 /* of double-word atomic operations use (but not in case of TSan). */
75 /* TODO: Revise it for the future gcc-7 releases. */
76 # define AO_SKIPATOMIC_double_compare_and_swap_ANY
77 # define AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
78 # endif /* __GNUC__ && !__clang__ */
80 # ifdef AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
81 # define AO_SKIPATOMIC_double_load
82 # define AO_SKIPATOMIC_double_load_acquire
83 # define AO_SKIPATOMIC_double_store
84 # define AO_SKIPATOMIC_double_store_release
85 # undef AO_SKIPATOMIC_DOUBLE_LOAD_STORE_ANY
88 #else /* AO_DISABLE_GCC_ATOMICS */
90 /* The following really assume we have a 486 or better. Unfortunately */
91 /* gcc doesn't define a suitable feature test macro based on command */
93 /* We should perhaps test dynamically. */
95 #include "../all_aligned_atomic_load_store.h"
97 #include "../test_and_set_t_is_char.h"
99 #if defined(__SSE2__) && !defined(AO_USE_PENTIUM4_INSTRS)
100 /* "mfence" is a part of SSE2 set (introduced on Intel Pentium 4). */
101 # define AO_USE_PENTIUM4_INSTRS
104 #if defined(AO_USE_PENTIUM4_INSTRS)
108 __asm__ __volatile__("mfence" : : : "memory");
110 # define AO_HAVE_nop_full
113 /* We could use the cpuid instruction. But that seems to be slower */
114 /* than the default implementation based on test_and_set_full. Thus */
115 /* we omit that bit of misinformation here. */
116 #endif /* !AO_USE_PENTIUM4_INSTRS */
118 /* As far as we can tell, the lfence and sfence instructions are not */
119 /* currently needed or useful for cached memory accesses. */
121 /* Really only works for 486 and later */
122 #ifndef AO_PREFER_GENERALIZED
124 AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
128 __asm__ __volatile__ ("lock; xadd %0, %1" :
129 "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
133 # define AO_HAVE_fetch_and_add_full
134 #endif /* !AO_PREFER_GENERALIZED */
136 AO_INLINE unsigned char
137 AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
139 unsigned char result;
141 __asm__ __volatile__ ("lock; xaddb %0, %1" :
142 "=q" (result), "=m" (*p) : "0" (incr), "m" (*p)
146 #define AO_HAVE_char_fetch_and_add_full
148 AO_INLINE unsigned short
149 AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
151 unsigned short result;
153 __asm__ __volatile__ ("lock; xaddw %0, %1" :
154 "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
158 #define AO_HAVE_short_fetch_and_add_full
160 #ifndef AO_PREFER_GENERALIZED
162 AO_and_full (volatile AO_t *p, AO_t value)
164 __asm__ __volatile__ ("lock; and %1, %0" :
165 "=m" (*p) : "r" (value), "m" (*p)
168 # define AO_HAVE_and_full
171 AO_or_full (volatile AO_t *p, AO_t value)
173 __asm__ __volatile__ ("lock; or %1, %0" :
174 "=m" (*p) : "r" (value), "m" (*p)
177 # define AO_HAVE_or_full
180 AO_xor_full (volatile AO_t *p, AO_t value)
182 __asm__ __volatile__ ("lock; xor %1, %0" :
183 "=m" (*p) : "r" (value), "m" (*p)
186 # define AO_HAVE_xor_full
188 /* AO_store_full could be implemented directly using "xchg" but it */
189 /* could be generalized efficiently as an ordinary store accomplished */
190 /* with AO_nop_full ("mfence" instruction). */
193 AO_char_and_full (volatile unsigned char *p, unsigned char value)
195 __asm__ __volatile__ ("lock; andb %1, %0" :
196 "=m" (*p) : "r" (value), "m" (*p)
199 #define AO_HAVE_char_and_full
202 AO_char_or_full (volatile unsigned char *p, unsigned char value)
204 __asm__ __volatile__ ("lock; orb %1, %0" :
205 "=m" (*p) : "r" (value), "m" (*p)
208 #define AO_HAVE_char_or_full
211 AO_char_xor_full (volatile unsigned char *p, unsigned char value)
213 __asm__ __volatile__ ("lock; xorb %1, %0" :
214 "=m" (*p) : "r" (value), "m" (*p)
217 #define AO_HAVE_char_xor_full
220 AO_short_and_full (volatile unsigned short *p, unsigned short value)
222 __asm__ __volatile__ ("lock; andw %1, %0" :
223 "=m" (*p) : "r" (value), "m" (*p)
226 #define AO_HAVE_short_and_full
229 AO_short_or_full (volatile unsigned short *p, unsigned short value)
231 __asm__ __volatile__ ("lock; orw %1, %0" :
232 "=m" (*p) : "r" (value), "m" (*p)
235 #define AO_HAVE_short_or_full
238 AO_short_xor_full (volatile unsigned short *p, unsigned short value)
240 __asm__ __volatile__ ("lock; xorw %1, %0" :
241 "=m" (*p) : "r" (value), "m" (*p)
244 #define AO_HAVE_short_xor_full
245 #endif /* !AO_PREFER_GENERALIZED */
247 AO_INLINE AO_TS_VAL_t
248 AO_test_and_set_full(volatile AO_TS_t *addr)
250 unsigned char oldval;
251 /* Note: the "xchg" instruction does not need a "lock" prefix */
252 __asm__ __volatile__ ("xchgb %0, %1"
253 : "=q" (oldval), "=m" (*addr)
254 : "0" ((unsigned char)0xff), "m" (*addr)
256 return (AO_TS_VAL_t)oldval;
258 #define AO_HAVE_test_and_set_full
260 #ifndef AO_GENERALIZE_ASM_BOOL_CAS
261 /* Returns nonzero if the comparison succeeded. */
263 AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
265 # ifdef AO_USE_SYNC_CAS_BUILTIN
266 return (int)__sync_bool_compare_and_swap(addr, old, new_val
267 /* empty protection list */);
268 /* Note: an empty list of variables protected by the */
269 /* memory barrier should mean all globally accessible */
270 /* variables are protected. */
273 __asm__ __volatile__ ("lock; cmpxchg %3, %0; setz %1"
274 : "=m" (*addr), "=a" (result)
275 : "m" (*addr), "r" (new_val), "a" (old)
280 # define AO_HAVE_compare_and_swap_full
281 #endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
284 AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
287 # ifdef AO_USE_SYNC_CAS_BUILTIN
288 return __sync_val_compare_and_swap(addr, old_val, new_val
289 /* empty protection list */);
292 __asm__ __volatile__ ("lock; cmpxchg %3, %4"
293 : "=a" (fetched_val), "=m" (*addr)
294 : "a" (old_val), "r" (new_val), "m" (*addr)
299 #define AO_HAVE_fetch_compare_and_swap_full
301 AO_INLINE unsigned char
302 AO_char_fetch_compare_and_swap_full(volatile unsigned char *addr,
303 unsigned char old_val,
304 unsigned char new_val)
306 # ifdef AO_USE_SYNC_CAS_BUILTIN
307 return __sync_val_compare_and_swap(addr, old_val, new_val
308 /* empty protection list */);
310 unsigned char fetched_val;
312 __asm__ __volatile__ ("lock; cmpxchgb %3, %4"
313 : "=a" (fetched_val), "=m" (*addr)
314 : "a" (old_val), "q" (new_val), "m" (*addr)
319 # define AO_HAVE_char_fetch_compare_and_swap_full
321 AO_INLINE unsigned short
322 AO_short_fetch_compare_and_swap_full(volatile unsigned short *addr,
323 unsigned short old_val,
324 unsigned short new_val)
326 # ifdef AO_USE_SYNC_CAS_BUILTIN
327 return __sync_val_compare_and_swap(addr, old_val, new_val
328 /* empty protection list */);
330 unsigned short fetched_val;
332 __asm__ __volatile__ ("lock; cmpxchgw %3, %4"
333 : "=a" (fetched_val), "=m" (*addr)
334 : "a" (old_val), "r" (new_val), "m" (*addr)
339 # define AO_HAVE_short_fetch_compare_and_swap_full
341 # if defined(__x86_64__) && !defined(__ILP32__)
342 AO_INLINE unsigned int
343 AO_int_fetch_compare_and_swap_full(volatile unsigned int *addr,
344 unsigned int old_val,
345 unsigned int new_val)
347 # ifdef AO_USE_SYNC_CAS_BUILTIN
348 return __sync_val_compare_and_swap(addr, old_val, new_val
349 /* empty protection list */);
351 unsigned int fetched_val;
353 __asm__ __volatile__ ("lock; cmpxchgl %3, %4"
354 : "=a" (fetched_val), "=m" (*addr)
355 : "a" (old_val), "r" (new_val), "m" (*addr)
360 # define AO_HAVE_int_fetch_compare_and_swap_full
362 # ifndef AO_PREFER_GENERALIZED
363 AO_INLINE unsigned int
364 AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
368 __asm__ __volatile__ ("lock; xaddl %0, %1"
369 : "=r" (result), "=m" (*p)
370 : "0" (incr), "m" (*p)
374 # define AO_HAVE_int_fetch_and_add_full
377 AO_int_and_full (volatile unsigned int *p, unsigned int value)
379 __asm__ __volatile__ ("lock; andl %1, %0"
380 : "=m" (*p) : "r" (value), "m" (*p)
383 # define AO_HAVE_int_and_full
386 AO_int_or_full (volatile unsigned int *p, unsigned int value)
388 __asm__ __volatile__ ("lock; orl %1, %0"
389 : "=m" (*p) : "r" (value), "m" (*p)
392 # define AO_HAVE_int_or_full
395 AO_int_xor_full (volatile unsigned int *p, unsigned int value)
397 __asm__ __volatile__ ("lock; xorl %1, %0"
398 : "=m" (*p) : "r" (value), "m" (*p)
401 # define AO_HAVE_int_xor_full
402 # endif /* !AO_PREFER_GENERALIZED */
406 # endif /* !x86_64 || ILP32 */
408 /* Real X86 implementations, except for some old 32-bit WinChips, */
409 /* appear to enforce ordering between memory operations, EXCEPT that */
410 /* a later read can pass earlier writes, presumably due to the */
411 /* visible presence of store buffers. */
412 /* We ignore both the WinChips and the fact that the official specs */
413 /* seem to be much weaker (and arguably too weak to be usable). */
414 # include "../ordered_except_wr.h"
416 #endif /* AO_DISABLE_GCC_ATOMICS */
418 #if defined(AO_GCC_ATOMIC_TEST_AND_SET) \
419 && !defined(AO_SKIPATOMIC_double_compare_and_swap_ANY)
421 # if defined(__ILP32__) || !defined(__x86_64__) /* 32-bit AO_t */ \
422 || defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) /* 64-bit AO_t */
423 # include "../standard_ao_double_t.h"
426 #elif !defined(__x86_64__) && (!defined(AO_USE_SYNC_CAS_BUILTIN) \
427 || defined(AO_GCC_ATOMIC_TEST_AND_SET))
428 # include "../standard_ao_double_t.h"
430 /* Reading or writing a quadword aligned on a 64-bit boundary is */
431 /* always carried out atomically on at least a Pentium according to */
432 /* Chapter 8.1.1 of Volume 3A Part 1 of Intel processor manuals. */
433 # ifndef AO_PREFER_GENERALIZED
434 # define AO_ACCESS_double_CHECK_ALIGNED
435 # include "../loadstore/double_atomic_load_store.h"
438 /* Returns nonzero if the comparison succeeded. */
439 /* Really requires at least a Pentium. */
441 AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
442 AO_t old_val1, AO_t old_val2,
443 AO_t new_val1, AO_t new_val2)
449 /* If PIC is turned on, we cannot use ebx as it is reserved for the */
450 /* GOT pointer. We should save and restore ebx. The proposed */
451 /* solution is not so efficient as the older alternatives using */
452 /* push ebx or edi as new_val1 (w/o clobbering edi and temporary */
453 /* local variable usage) but it is more portable (it works even if */
454 /* ebx is not used as GOT pointer, and it works for the buggy GCC */
455 /* releases that incorrectly evaluate memory operands offset in the */
456 /* inline assembly after push). */
458 __asm__ __volatile__("mov %%ebx, %2\n\t" /* save ebx */
459 "lea %0, %%edi\n\t" /* in case addr is in ebx */
460 "mov %7, %%ebx\n\t" /* load new_val1 */
461 "lock; cmpxchg8b (%%edi)\n\t"
462 "mov %2, %%ebx\n\t" /* restore ebx */
464 : "=m" (*addr), "=a" (result), "=m" (saved_ebx)
465 : "m" (*addr), "d" (old_val2), "a" (old_val1),
466 "c" (new_val2), "m" (new_val1)
469 /* A less-efficient code manually preserving edi if GCC invoked */
470 /* with -O0 option (otherwise it fails while finding a register */
471 /* in class 'GENERAL_REGS'). */
473 __asm__ __volatile__("mov %%edi, %3\n\t" /* save edi */
474 "mov %%ebx, %2\n\t" /* save ebx */
475 "lea %0, %%edi\n\t" /* in case addr is in ebx */
476 "mov %8, %%ebx\n\t" /* load new_val1 */
477 "lock; cmpxchg8b (%%edi)\n\t"
478 "mov %2, %%ebx\n\t" /* restore ebx */
479 "mov %3, %%edi\n\t" /* restore edi */
481 : "=m" (*addr), "=a" (result),
482 "=m" (saved_ebx), "=m" (saved_edi)
483 : "m" (*addr), "d" (old_val2), "a" (old_val1),
484 "c" (new_val2), "m" (new_val1) : "memory");
487 /* For non-PIC mode, this operation could be simplified (and be */
488 /* faster) by using ebx as new_val1 (GCC would refuse to compile */
489 /* such code for PIC mode). */
490 __asm__ __volatile__ ("lock; cmpxchg8b %0; setz %1"
491 : "=m" (*addr), "=a" (result)
492 : "m" (*addr), "d" (old_val2), "a" (old_val1),
493 "c" (new_val2), "b" (new_val1)
498 # define AO_HAVE_compare_double_and_swap_double_full
500 #elif defined(__ILP32__) || !defined(__x86_64__)
501 # include "../standard_ao_double_t.h"
503 /* Reading or writing a quadword aligned on a 64-bit boundary is */
504 /* always carried out atomically (requires at least a Pentium). */
505 # ifndef AO_PREFER_GENERALIZED
506 # define AO_ACCESS_double_CHECK_ALIGNED
507 # include "../loadstore/double_atomic_load_store.h"
510 /* X32 has native support for 64-bit integer operations (AO_double_t */
511 /* is a 64-bit integer and we could use 64-bit cmpxchg). */
512 /* This primitive is used by compare_double_and_swap_double_full. */
514 AO_double_compare_and_swap_full(volatile AO_double_t *addr,
515 AO_double_t old_val, AO_double_t new_val)
517 /* It is safe to use __sync CAS built-in here. */
518 return __sync_bool_compare_and_swap(&addr->AO_whole,
519 old_val.AO_whole, new_val.AO_whole
520 /* empty protection list */);
522 # define AO_HAVE_double_compare_and_swap_full
524 #elif defined(AO_CMPXCHG16B_AVAILABLE) \
525 || (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) \
526 && !defined(AO_THREAD_SANITIZER))
527 # include "../standard_ao_double_t.h"
529 /* The Intel and AMD Architecture Programmer Manuals state roughly */
531 /* - CMPXCHG16B (with a LOCK prefix) can be used to perform 16-byte */
532 /* atomic accesses in 64-bit mode (with certain alignment */
534 /* - SSE instructions that access data larger than a quadword (like */
535 /* MOVDQA) may be implemented using multiple memory accesses; */
536 /* - LOCK prefix causes an invalid-opcode exception when used with */
537 /* 128-bit media (SSE) instructions. */
538 /* Thus, currently, the only way to implement lock-free double_load */
539 /* and double_store on x86_64 is to use CMPXCHG16B (if available). */
541 /* NEC LE-IT: older AMD Opterons are missing this instruction. */
542 /* On these machines SIGILL will be thrown. */
543 /* Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated (lock */
544 /* based) version available. */
545 /* HB: Changed this to not define either by default. There are */
546 /* enough machines and tool chains around on which cmpxchg16b */
547 /* doesn't work. And the emulation is unsafe by our usual rules. */
548 /* However both are clearly useful in certain cases. */
551 AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
552 AO_t old_val1, AO_t old_val2,
553 AO_t new_val1, AO_t new_val2)
556 __asm__ __volatile__("lock; cmpxchg16b %0; setz %1"
557 : "=m"(*addr), "=a"(result)
558 : "m"(*addr), "d" (old_val2), "a" (old_val1),
559 "c" (new_val2), "b" (new_val1)
563 # define AO_HAVE_compare_double_and_swap_double_full
565 #elif defined(AO_WEAK_DOUBLE_CAS_EMULATION)
566 # include "../standard_ao_double_t.h"
572 /* This one provides spinlock based emulation of CAS implemented in */
573 /* atomic_ops.c. We probably do not want to do this here, since it */
574 /* is not atomic with respect to other kinds of updates of *addr. */
575 /* On the other hand, this may be a useful facility on occasion. */
576 int AO_compare_double_and_swap_double_emulation(
577 volatile AO_double_t *addr,
578 AO_t old_val1, AO_t old_val2,
579 AO_t new_val1, AO_t new_val2);
586 AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
587 AO_t old_val1, AO_t old_val2,
588 AO_t new_val1, AO_t new_val2)
590 return AO_compare_double_and_swap_double_emulation(addr,
591 old_val1, old_val2, new_val1, new_val2);
593 # define AO_HAVE_compare_double_and_swap_double_full
594 #endif /* x86_64 && !ILP32 && CAS_EMULATION && !AO_CMPXCHG16B_AVAILABLE */
596 #ifdef AO_GCC_ATOMIC_TEST_AND_SET
597 # include "generic.h"
600 #undef AO_GCC_FORCE_HAVE_CAS
601 #undef AO_SKIPATOMIC_double_compare_and_swap_ANY
602 #undef AO_SKIPATOMIC_double_load
603 #undef AO_SKIPATOMIC_double_load_acquire
604 #undef AO_SKIPATOMIC_double_store
605 #undef AO_SKIPATOMIC_double_store_release