2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
16 * Some of the machine specific code was borrowed from our GC distribution.
19 #include "../all_aligned_atomic_load_store.h"
21 /* Real X86 implementations appear */
22 /* to enforce ordering between memory operations, EXCEPT that a later */
23 /* read can pass earlier writes, presumably due to the visible */
24 /* presence of store buffers. */
25 /* We ignore the fact that the official specs */
26 /* seem to be much weaker (and arguably too weak to be usable). */
28 #include "../ordered_except_wr.h"
30 #include "../test_and_set_t_is_char.h"
32 #include "../standard_ao_double_t.h"
37 /* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
38 __asm__ __volatile__("mfence" : : : "memory");
40 #define AO_HAVE_nop_full
42 /* As far as we can tell, the lfence and sfence instructions are not */
43 /* currently needed or useful for cached memory accesses. */
46 AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
50 __asm__ __volatile__ ("lock; xaddq %0, %1" :
51 "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
55 #define AO_HAVE_fetch_and_add_full
57 AO_INLINE unsigned char
58 AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
62 __asm__ __volatile__ ("lock; xaddb %0, %1" :
63 "=q" (result), "=m" (*p) : "0" (incr), "m" (*p)
67 #define AO_HAVE_char_fetch_and_add_full
69 AO_INLINE unsigned short
70 AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
72 unsigned short result;
74 __asm__ __volatile__ ("lock; xaddw %0, %1" :
75 "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
79 #define AO_HAVE_short_fetch_and_add_full
81 AO_INLINE unsigned int
82 AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
86 __asm__ __volatile__ ("lock; xaddl %0, %1" :
87 "=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
91 #define AO_HAVE_int_fetch_and_add_full
94 AO_or_full (volatile AO_t *p, AO_t incr)
96 __asm__ __volatile__ ("lock; orq %1, %0" :
97 "=m" (*p) : "r" (incr), "m" (*p) : "memory");
99 #define AO_HAVE_or_full
101 AO_INLINE AO_TS_VAL_t
102 AO_test_and_set_full(volatile AO_TS_t *addr)
104 # ifdef AO_XCHGB_RET_WORD
105 /* Workaround for a bug in LLVM v2.7 GAS. */
108 unsigned char oldval;
110 /* Note: the "xchg" instruction does not need a "lock" prefix */
111 __asm__ __volatile__("xchgb %0, %1"
112 : "=q"(oldval), "=m"(*addr)
113 : "0"(0xff), "m"(*addr) : "memory");
114 return (AO_TS_VAL_t)oldval;
116 #define AO_HAVE_test_and_set_full
118 /* Returns nonzero if the comparison succeeded. */
120 AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
122 # ifdef AO_USE_SYNC_CAS_BUILTIN
123 return (int)__sync_bool_compare_and_swap(addr, old, new_val);
126 __asm__ __volatile__("lock; cmpxchgq %3, %0; setz %1"
127 : "=m" (*addr), "=a" (result)
128 : "m" (*addr), "r" (new_val), "a" (old) : "memory");
132 #define AO_HAVE_compare_and_swap_full
134 #ifdef AO_CMPXCHG16B_AVAILABLE
136 /* NEC LE-IT: older AMD Opterons are missing this instruction.
137 * On these machines SIGILL will be thrown.
138 * Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated
139 * (lock based) version available */
140 /* HB: Changed this to not define either by default. There are
141 * enough machines and tool chains around on which cmpxchg16b
142 * doesn't work. And the emulation is unsafe by our usual rules.
143 * Hoewever both are clearly useful in certain cases.
146 AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
147 AO_t old_val1, AO_t old_val2,
148 AO_t new_val1, AO_t new_val2)
151 __asm__ __volatile__("lock; cmpxchg16b %0; setz %1"
152 : "=m"(*addr), "=a"(result)
153 : "m"(*addr), "d" (old_val2), "a" (old_val1),
154 "c" (new_val2), "b" (new_val1) : "memory");
157 #define AO_HAVE_compare_double_and_swap_double_full
160 /* this one provides spinlock based emulation of CAS implemented in */
161 /* atomic_ops.c. We probably do not want to do this here, since it is */
162 /* not atomic with respect to other kinds of updates of *addr. On the */
163 /* other hand, this may be a useful facility on occasion. */
164 #ifdef AO_WEAK_DOUBLE_CAS_EMULATION
165 int AO_compare_double_and_swap_double_emulation(volatile AO_double_t *addr,
166 AO_t old_val1, AO_t old_val2,
167 AO_t new_val1, AO_t new_val2);
170 AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
171 AO_t old_val1, AO_t old_val2,
172 AO_t new_val1, AO_t new_val2)
174 return AO_compare_double_and_swap_double_emulation(addr, old_val1, old_val2,
177 #define AO_HAVE_compare_double_and_swap_double_full
178 #endif /* AO_WEAK_DOUBLE_CAS_EMULATION */
180 #endif /* AO_CMPXCHG16B_AVAILABLE */