1 /* Internal macros for atomic operations for GNU C Library.
2 Copyright (C) 2002-2015 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <http://www.gnu.org/licenses/>. */
23 /* This header defines three types of macros:
25 - atomic arithmetic and logic operation on memory. They all
26 have the prefix "atomic_".
28 - conditionally atomic operations of the same kinds. These
29 always behave identical but can be faster when atomicity
30 is not really needed since only one thread has access to
31 the memory location. In that case the code is slower in
32 the multi-thread case. The interfaces have the prefix
35 - support functions like barriers. They also have the prefix
38 Architectures must provide a few lowlevel macros (the compare
39 and exchange definitions). All others are optional. They
40 should only be provided if the architecture has specific
41 support for the operation.
43 As <atomic.h> macros are usually heavily nested and often use local
44 variables to make sure side-effects are evaluated properly, use for
45 macro local variables a per-macro unique prefix. This file uses
46 __atgN_ prefix where N is different in each macro. */
50 #include <bits/atomic.h>
52 /* Wrapper macros to call pre_NN_post (mem, ...) where NN is the
53 bit width of *MEM. The calling macro puts parens around MEM
54 and following args. */
55 #define __atomic_val_bysize(pre, post, mem, ...) \
57 __typeof (*mem) __atg1_result; \
58 if (sizeof (*mem) == 1) \
59 __atg1_result = pre##_8_##post (mem, __VA_ARGS__); \
60 else if (sizeof (*mem) == 2) \
61 __atg1_result = pre##_16_##post (mem, __VA_ARGS__); \
62 else if (sizeof (*mem) == 4) \
63 __atg1_result = pre##_32_##post (mem, __VA_ARGS__); \
64 else if (sizeof (*mem) == 8) \
65 __atg1_result = pre##_64_##post (mem, __VA_ARGS__); \
70 #define __atomic_bool_bysize(pre, post, mem, ...) \
73 if (sizeof (*mem) == 1) \
74 __atg2_result = pre##_8_##post (mem, __VA_ARGS__); \
75 else if (sizeof (*mem) == 2) \
76 __atg2_result = pre##_16_##post (mem, __VA_ARGS__); \
77 else if (sizeof (*mem) == 4) \
78 __atg2_result = pre##_32_##post (mem, __VA_ARGS__); \
79 else if (sizeof (*mem) == 8) \
80 __atg2_result = pre##_64_##post (mem, __VA_ARGS__); \
87 /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
88 Return the old *MEM value. */
89 #if !defined atomic_compare_and_exchange_val_acq \
90 && defined __arch_compare_and_exchange_val_32_acq
91 # define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
92 __atomic_val_bysize (__arch_compare_and_exchange_val,acq, \
97 #ifndef catomic_compare_and_exchange_val_acq
98 # ifdef __arch_c_compare_and_exchange_val_32_acq
99 # define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
100 __atomic_val_bysize (__arch_c_compare_and_exchange_val,acq, \
103 # define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \
104 atomic_compare_and_exchange_val_acq (mem, newval, oldval)
109 #ifndef catomic_compare_and_exchange_val_rel
110 # ifndef atomic_compare_and_exchange_val_rel
111 # define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
112 catomic_compare_and_exchange_val_acq (mem, newval, oldval)
114 # define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \
115 atomic_compare_and_exchange_val_rel (mem, newval, oldval)
120 #ifndef atomic_compare_and_exchange_val_rel
121 # define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
122 atomic_compare_and_exchange_val_acq (mem, newval, oldval)
126 /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL.
127 Return zero if *MEM was changed or non-zero if no exchange happened. */
128 #ifndef atomic_compare_and_exchange_bool_acq
129 # ifdef __arch_compare_and_exchange_bool_32_acq
130 # define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
131 __atomic_bool_bysize (__arch_compare_and_exchange_bool,acq, \
134 # define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
135 ({ /* Cannot use __oldval here, because macros later in this file might \
136 call this macro with __oldval argument. */ \
137 __typeof (oldval) __atg3_old = (oldval); \
138 atomic_compare_and_exchange_val_acq (mem, newval, __atg3_old) \
145 #ifndef catomic_compare_and_exchange_bool_acq
146 # ifdef __arch_c_compare_and_exchange_bool_32_acq
147 # define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
148 __atomic_bool_bysize (__arch_c_compare_and_exchange_bool,acq, \
151 # define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \
152 ({ /* Cannot use __oldval here, because macros later in this file might \
153 call this macro with __oldval argument. */ \
154 __typeof (oldval) __atg4_old = (oldval); \
155 catomic_compare_and_exchange_val_acq (mem, newval, __atg4_old) \
162 #ifndef catomic_compare_and_exchange_bool_rel
163 # ifndef atomic_compare_and_exchange_bool_rel
164 # define catomic_compare_and_exchange_bool_rel(mem, newval, oldval) \
165 catomic_compare_and_exchange_bool_acq (mem, newval, oldval)
167 # define catomic_compare_and_exchange_bool_rel(mem, newval, oldval) \
168 atomic_compare_and_exchange_bool_rel (mem, newval, oldval)
173 #ifndef atomic_compare_and_exchange_bool_rel
174 # define atomic_compare_and_exchange_bool_rel(mem, newval, oldval) \
175 atomic_compare_and_exchange_bool_acq (mem, newval, oldval)
179 /* Store NEWVALUE in *MEM and return the old value. */
180 #ifndef atomic_exchange_acq
181 # define atomic_exchange_acq(mem, newvalue) \
182 ({ __typeof (*(mem)) __atg5_oldval; \
183 __typeof (mem) __atg5_memp = (mem); \
184 __typeof (*(mem)) __atg5_value = (newvalue); \
187 __atg5_oldval = *__atg5_memp; \
188 while (__builtin_expect \
189 (atomic_compare_and_exchange_bool_acq (__atg5_memp, __atg5_value, \
190 __atg5_oldval), 0)); \
195 #ifndef atomic_exchange_rel
196 # define atomic_exchange_rel(mem, newvalue) atomic_exchange_acq (mem, newvalue)
200 /* Add VALUE to *MEM and return the old value of *MEM. */
201 #ifndef atomic_exchange_and_add_acq
202 # ifdef atomic_exchange_and_add
203 # define atomic_exchange_and_add_acq(mem, value) \
204 atomic_exchange_and_add (mem, value)
206 # define atomic_exchange_and_add_acq(mem, value) \
207 ({ __typeof (*(mem)) __atg6_oldval; \
208 __typeof (mem) __atg6_memp = (mem); \
209 __typeof (*(mem)) __atg6_value = (value); \
212 __atg6_oldval = *__atg6_memp; \
213 while (__builtin_expect \
214 (atomic_compare_and_exchange_bool_acq (__atg6_memp, \
217 __atg6_oldval), 0)); \
223 #ifndef atomic_exchange_and_add_rel
224 # define atomic_exchange_and_add_rel(mem, value) \
225 atomic_exchange_and_add_acq(mem, value)
228 #ifndef atomic_exchange_and_add
229 # define atomic_exchange_and_add(mem, value) \
230 atomic_exchange_and_add_acq(mem, value)
233 #ifndef catomic_exchange_and_add
234 # define catomic_exchange_and_add(mem, value) \
235 ({ __typeof (*(mem)) __atg7_oldv; \
236 __typeof (mem) __atg7_memp = (mem); \
237 __typeof (*(mem)) __atg7_value = (value); \
240 __atg7_oldv = *__atg7_memp; \
241 while (__builtin_expect \
242 (catomic_compare_and_exchange_bool_acq (__atg7_memp, \
252 # define atomic_max(mem, value) \
254 __typeof (*(mem)) __atg8_oldval; \
255 __typeof (mem) __atg8_memp = (mem); \
256 __typeof (*(mem)) __atg8_value = (value); \
258 __atg8_oldval = *__atg8_memp; \
259 if (__atg8_oldval >= __atg8_value) \
261 } while (__builtin_expect \
262 (atomic_compare_and_exchange_bool_acq (__atg8_memp, __atg8_value,\
263 __atg8_oldval), 0)); \
269 # define catomic_max(mem, value) \
271 __typeof (*(mem)) __atg9_oldv; \
272 __typeof (mem) __atg9_memp = (mem); \
273 __typeof (*(mem)) __atg9_value = (value); \
275 __atg9_oldv = *__atg9_memp; \
276 if (__atg9_oldv >= __atg9_value) \
278 } while (__builtin_expect \
279 (catomic_compare_and_exchange_bool_acq (__atg9_memp, \
287 # define atomic_min(mem, value) \
289 __typeof (*(mem)) __atg10_oldval; \
290 __typeof (mem) __atg10_memp = (mem); \
291 __typeof (*(mem)) __atg10_value = (value); \
293 __atg10_oldval = *__atg10_memp; \
294 if (__atg10_oldval <= __atg10_value) \
296 } while (__builtin_expect \
297 (atomic_compare_and_exchange_bool_acq (__atg10_memp, \
299 __atg10_oldval), 0)); \
305 # define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value))
310 # define catomic_add(mem, value) \
311 (void) catomic_exchange_and_add ((mem), (value))
315 #ifndef atomic_increment
316 # define atomic_increment(mem) atomic_add ((mem), 1)
320 #ifndef catomic_increment
321 # define catomic_increment(mem) catomic_add ((mem), 1)
325 #ifndef atomic_increment_val
326 # define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1)
330 #ifndef catomic_increment_val
331 # define catomic_increment_val(mem) (catomic_exchange_and_add ((mem), 1) + 1)
335 /* Add one to *MEM and return true iff it's now zero. */
336 #ifndef atomic_increment_and_test
337 # define atomic_increment_and_test(mem) \
338 (atomic_exchange_and_add ((mem), 1) + 1 == 0)
342 #ifndef atomic_decrement
343 # define atomic_decrement(mem) atomic_add ((mem), -1)
347 #ifndef catomic_decrement
348 # define catomic_decrement(mem) catomic_add ((mem), -1)
352 #ifndef atomic_decrement_val
353 # define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1)
357 #ifndef catomic_decrement_val
358 # define catomic_decrement_val(mem) (catomic_exchange_and_add ((mem), -1) - 1)
362 /* Subtract 1 from *MEM and return true iff it's now zero. */
363 #ifndef atomic_decrement_and_test
364 # define atomic_decrement_and_test(mem) \
365 (atomic_exchange_and_add ((mem), -1) == 1)
369 /* Decrement *MEM if it is > 0, and return the old value. */
370 #ifndef atomic_decrement_if_positive
371 # define atomic_decrement_if_positive(mem) \
372 ({ __typeof (*(mem)) __atg11_oldval; \
373 __typeof (mem) __atg11_memp = (mem); \
377 __atg11_oldval = *__atg11_memp; \
378 if (__glibc_unlikely (__atg11_oldval <= 0)) \
381 while (__builtin_expect \
382 (atomic_compare_and_exchange_bool_acq (__atg11_memp, \
383 __atg11_oldval - 1, \
384 __atg11_oldval), 0)); \
389 #ifndef atomic_add_negative
390 # define atomic_add_negative(mem, value) \
391 ({ __typeof (value) __atg12_value = (value); \
392 atomic_exchange_and_add (mem, __atg12_value) < -__atg12_value; })
396 #ifndef atomic_add_zero
397 # define atomic_add_zero(mem, value) \
398 ({ __typeof (value) __atg13_value = (value); \
399 atomic_exchange_and_add (mem, __atg13_value) == -__atg13_value; })
403 #ifndef atomic_bit_set
404 # define atomic_bit_set(mem, bit) \
405 (void) atomic_bit_test_set(mem, bit)
409 #ifndef atomic_bit_test_set
410 # define atomic_bit_test_set(mem, bit) \
411 ({ __typeof (*(mem)) __atg14_old; \
412 __typeof (mem) __atg14_memp = (mem); \
413 __typeof (*(mem)) __atg14_mask = ((__typeof (*(mem))) 1 << (bit)); \
416 __atg14_old = (*__atg14_memp); \
417 while (__builtin_expect \
418 (atomic_compare_and_exchange_bool_acq (__atg14_memp, \
419 __atg14_old | __atg14_mask,\
422 __atg14_old & __atg14_mask; })
425 /* Atomically *mem &= mask. */
427 # define atomic_and(mem, mask) \
429 __typeof (*(mem)) __atg15_old; \
430 __typeof (mem) __atg15_memp = (mem); \
431 __typeof (*(mem)) __atg15_mask = (mask); \
434 __atg15_old = (*__atg15_memp); \
435 while (__builtin_expect \
436 (atomic_compare_and_exchange_bool_acq (__atg15_memp, \
437 __atg15_old & __atg15_mask, \
443 # define catomic_and(mem, mask) \
445 __typeof (*(mem)) __atg20_old; \
446 __typeof (mem) __atg20_memp = (mem); \
447 __typeof (*(mem)) __atg20_mask = (mask); \
450 __atg20_old = (*__atg20_memp); \
451 while (__builtin_expect \
452 (catomic_compare_and_exchange_bool_acq (__atg20_memp, \
453 __atg20_old & __atg20_mask,\
458 /* Atomically *mem &= mask and return the old value of *mem. */
459 #ifndef atomic_and_val
460 # define atomic_and_val(mem, mask) \
461 ({ __typeof (*(mem)) __atg16_old; \
462 __typeof (mem) __atg16_memp = (mem); \
463 __typeof (*(mem)) __atg16_mask = (mask); \
466 __atg16_old = (*__atg16_memp); \
467 while (__builtin_expect \
468 (atomic_compare_and_exchange_bool_acq (__atg16_memp, \
469 __atg16_old & __atg16_mask,\
475 /* Atomically *mem |= mask and return the old value of *mem. */
477 # define atomic_or(mem, mask) \
479 __typeof (*(mem)) __atg17_old; \
480 __typeof (mem) __atg17_memp = (mem); \
481 __typeof (*(mem)) __atg17_mask = (mask); \
484 __atg17_old = (*__atg17_memp); \
485 while (__builtin_expect \
486 (atomic_compare_and_exchange_bool_acq (__atg17_memp, \
487 __atg17_old | __atg17_mask, \
493 # define catomic_or(mem, mask) \
495 __typeof (*(mem)) __atg18_old; \
496 __typeof (mem) __atg18_memp = (mem); \
497 __typeof (*(mem)) __atg18_mask = (mask); \
500 __atg18_old = (*__atg18_memp); \
501 while (__builtin_expect \
502 (catomic_compare_and_exchange_bool_acq (__atg18_memp, \
503 __atg18_old | __atg18_mask,\
508 /* Atomically *mem |= mask and return the old value of *mem. */
509 #ifndef atomic_or_val
510 # define atomic_or_val(mem, mask) \
511 ({ __typeof (*(mem)) __atg19_old; \
512 __typeof (mem) __atg19_memp = (mem); \
513 __typeof (*(mem)) __atg19_mask = (mask); \
516 __atg19_old = (*__atg19_memp); \
517 while (__builtin_expect \
518 (atomic_compare_and_exchange_bool_acq (__atg19_memp, \
519 __atg19_old | __atg19_mask,\
525 #ifndef atomic_full_barrier
526 # define atomic_full_barrier() __asm ("" ::: "memory")
530 #ifndef atomic_read_barrier
531 # define atomic_read_barrier() atomic_full_barrier ()
535 #ifndef atomic_write_barrier
536 # define atomic_write_barrier() atomic_full_barrier ()
540 #ifndef atomic_forced_read
541 # define atomic_forced_read(x) \
542 ({ __typeof (x) __x; __asm ("" : "=r" (__x) : "0" (x)); __x; })
545 /* This is equal to 1 iff the architecture supports 64b atomic operations. */
546 #ifndef __HAVE_64B_ATOMICS
547 #error Unable to determine if 64-bit atomics are present.
550 /* The following functions are a subset of the atomic operations provided by
551 C11. Usually, a function named atomic_OP_MO(args) is equivalent to C11's
552 atomic_OP_explicit(args, memory_order_MO); exceptions noted below. */
554 /* Each arch can request to use compiler built-ins for C11 atomics. If it
555 does, all atomics will be based on these. */
556 #if USE_ATOMIC_COMPILER_BUILTINS
558 /* We require 32b atomic operations; some archs also support 64b atomic
560 void __atomic_link_error (void);
561 # if __HAVE_64B_ATOMICS == 1
562 # define __atomic_check_size(mem) \
563 if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8)) \
564 __atomic_link_error ();
566 # define __atomic_check_size(mem) \
567 if (sizeof (*mem) != 4) \
568 __atomic_link_error ();
571 # define atomic_thread_fence_acquire() \
572 __atomic_thread_fence (__ATOMIC_ACQUIRE)
573 # define atomic_thread_fence_release() \
574 __atomic_thread_fence (__ATOMIC_RELEASE)
575 # define atomic_thread_fence_seq_cst() \
576 __atomic_thread_fence (__ATOMIC_SEQ_CST)
578 # define atomic_load_relaxed(mem) \
579 ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_RELAXED); })
580 # define atomic_load_acquire(mem) \
581 ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_ACQUIRE); })
583 # define atomic_store_relaxed(mem, val) \
585 __atomic_check_size((mem)); \
586 __atomic_store_n ((mem), (val), __ATOMIC_RELAXED); \
588 # define atomic_store_release(mem, val) \
590 __atomic_check_size((mem)); \
591 __atomic_store_n ((mem), (val), __ATOMIC_RELEASE); \
594 /* On failure, this CAS has memory_order_relaxed semantics. */
595 # define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
596 ({ __atomic_check_size((mem)); \
597 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
598 __ATOMIC_RELAXED, __ATOMIC_RELAXED); })
599 # define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
600 ({ __atomic_check_size((mem)); \
601 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
602 __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); })
603 # define atomic_compare_exchange_weak_release(mem, expected, desired) \
604 ({ __atomic_check_size((mem)); \
605 __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \
606 __ATOMIC_RELEASE, __ATOMIC_RELAXED); })
608 # define atomic_exchange_acquire(mem, desired) \
609 ({ __atomic_check_size((mem)); \
610 __atomic_exchange_n ((mem), (desired), __ATOMIC_ACQUIRE); })
611 # define atomic_exchange_release(mem, desired) \
612 ({ __atomic_check_size((mem)); \
613 __atomic_exchange_n ((mem), (desired), __ATOMIC_RELEASE); })
615 # define atomic_fetch_add_relaxed(mem, operand) \
616 ({ __atomic_check_size((mem)); \
617 __atomic_fetch_add ((mem), (operand), __ATOMIC_RELAXED); })
618 # define atomic_fetch_add_acquire(mem, operand) \
619 ({ __atomic_check_size((mem)); \
620 __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQUIRE); })
621 # define atomic_fetch_add_release(mem, operand) \
622 ({ __atomic_check_size((mem)); \
623 __atomic_fetch_add ((mem), (operand), __ATOMIC_RELEASE); })
624 # define atomic_fetch_add_acq_rel(mem, operand) \
625 ({ __atomic_check_size((mem)); \
626 __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQ_REL); })
628 # define atomic_fetch_and_acquire(mem, operand) \
629 ({ __atomic_check_size((mem)); \
630 __atomic_fetch_and ((mem), (operand), __ATOMIC_ACQUIRE); })
632 # define atomic_fetch_or_relaxed(mem, operand) \
633 ({ __atomic_check_size((mem)); \
634 __atomic_fetch_or ((mem), (operand), __ATOMIC_RELAXED); })
635 # define atomic_fetch_or_acquire(mem, operand) \
636 ({ __atomic_check_size((mem)); \
637 __atomic_fetch_or ((mem), (operand), __ATOMIC_ACQUIRE); })
639 #else /* !USE_ATOMIC_COMPILER_BUILTINS */
641 /* By default, we assume that read, write, and full barriers are equivalent
642 to acquire, release, and seq_cst barriers. Archs for which this does not
643 hold have to provide custom definitions of the fences. */
644 # ifndef atomic_thread_fence_acquire
645 # define atomic_thread_fence_acquire() atomic_read_barrier ()
647 # ifndef atomic_thread_fence_release
648 # define atomic_thread_fence_release() atomic_write_barrier ()
650 # ifndef atomic_thread_fence_seq_cst
651 # define atomic_thread_fence_seq_cst() atomic_full_barrier ()
654 # ifndef atomic_load_relaxed
655 # define atomic_load_relaxed(mem) \
656 ({ __typeof (*(mem)) __atg100_val; \
657 __asm ("" : "=r" (__atg100_val) : "0" (*(mem))); \
660 # ifndef atomic_load_acquire
661 # define atomic_load_acquire(mem) \
662 ({ __typeof (*(mem)) __atg101_val = atomic_load_relaxed (mem); \
663 atomic_thread_fence_acquire (); \
667 # ifndef atomic_store_relaxed
668 /* XXX Use inline asm here? */
669 # define atomic_store_relaxed(mem, val) do { *(mem) = (val); } while (0)
671 # ifndef atomic_store_release
672 # define atomic_store_release(mem, val) \
674 atomic_thread_fence_release (); \
675 atomic_store_relaxed ((mem), (val)); \
679 /* On failure, this CAS has memory_order_relaxed semantics. */
680 /* XXX This potentially has one branch more than necessary, but archs
681 currently do not define a CAS that returns both the previous value and
683 # ifndef atomic_compare_exchange_weak_acquire
684 # define atomic_compare_exchange_weak_acquire(mem, expected, desired) \
685 ({ typeof (*(expected)) __atg102_expected = *(expected); \
687 atomic_compare_and_exchange_val_acq ((mem), (desired), *(expected)); \
688 *(expected) == __atg102_expected; })
690 # ifndef atomic_compare_exchange_weak_relaxed
691 /* XXX Fall back to CAS with acquire MO because archs do not define a weaker
693 # define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \
694 atomic_compare_exchange_weak_acquire ((mem), (expected), (desired))
696 # ifndef atomic_compare_exchange_weak_release
697 # define atomic_compare_exchange_weak_release(mem, expected, desired) \
698 ({ typeof (*(expected)) __atg103_expected = *(expected); \
700 atomic_compare_and_exchange_val_rel ((mem), (desired), *(expected)); \
701 *(expected) == __atg103_expected; })
704 # ifndef atomic_exchange_acquire
705 # define atomic_exchange_acquire(mem, val) \
706 atomic_exchange_acq ((mem), (val))
708 # ifndef atomic_exchange_release
709 # define atomic_exchange_release(mem, val) \
710 atomic_exchange_rel ((mem), (val))
713 # ifndef atomic_fetch_add_acquire
714 # define atomic_fetch_add_acquire(mem, operand) \
715 atomic_exchange_and_add_acq ((mem), (operand))
717 # ifndef atomic_fetch_add_relaxed
718 /* XXX Fall back to acquire MO because the MO semantics of
719 atomic_exchange_and_add are not documented; the generic version falls back
720 to atomic_exchange_and_add_acq if atomic_exchange_and_add is not defined,
722 # define atomic_fetch_add_relaxed(mem, operand) \
723 atomic_fetch_add_acquire ((mem), (operand))
725 # ifndef atomic_fetch_add_release
726 # define atomic_fetch_add_release(mem, operand) \
727 atomic_exchange_and_add_rel ((mem), (operand))
729 # ifndef atomic_fetch_add_acq_rel
730 # define atomic_fetch_add_acq_rel(mem, operand) \
731 ({ atomic_thread_fence_release (); \
732 atomic_exchange_and_add_acq ((mem), (operand)); })
735 /* XXX The default for atomic_and_val has acquire semantics, but this is not
737 # ifndef atomic_fetch_and_acquire
738 # define atomic_fetch_and_acquire(mem, operand) \
739 atomic_and_val ((mem), (operand))
742 /* XXX The default for atomic_or_val has acquire semantics, but this is not
744 # ifndef atomic_fetch_or_acquire
745 # define atomic_fetch_or_acquire(mem, operand) \
746 atomic_or_val ((mem), (operand))
748 /* XXX Fall back to acquire MO because archs do not define a weaker
750 # ifndef atomic_fetch_or_relaxed
751 # define atomic_fetch_or_relaxed(mem, operand) \
752 atomic_fetch_or_acquire ((mem), (operand))
755 #endif /* !USE_ATOMIC_COMPILER_BUILTINS */
759 # define atomic_delay() do { /* nothing */ } while (0)
762 #endif /* atomic.h */