2 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
5 * Copyright (c) 2009-2016 Ivan Maidanski
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
16 * Some of the machine specific code was borrowed from our GC distribution.
19 /* The following really assume we have a 486 or better. */
21 #include "../all_aligned_atomic_load_store.h"
23 #include "../test_and_set_t_is_char.h"
25 #if !defined(AO_USE_PENTIUM4_INSTRS) && !defined(__i386)
26 /* "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
27 # define AO_USE_PENTIUM4_INSTRS
30 #if defined(AO_USE_PENTIUM4_INSTRS)
34 __asm__ __volatile__ ("mfence" : : : "memory");
36 # define AO_HAVE_nop_full
39 /* We could use the cpuid instruction. But that seems to be slower */
40 /* than the default implementation based on test_and_set_full. Thus */
41 /* we omit that bit of misinformation here. */
42 #endif /* !AO_USE_PENTIUM4_INSTRS */
44 /* As far as we can tell, the lfence and sfence instructions are not */
45 /* currently needed or useful for cached memory accesses. */
47 /* Really only works for 486 and later */
48 #ifndef AO_PREFER_GENERALIZED
50 AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
54 __asm__ __volatile__ ("lock; xadd %0, %1"
55 : "=r" (result), "+m" (*p)
60 # define AO_HAVE_fetch_and_add_full
61 #endif /* !AO_PREFER_GENERALIZED */
63 AO_INLINE unsigned char
64 AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
68 __asm__ __volatile__ ("lock; xaddb %0, %1"
69 : "=q" (result), "+m" (*p)
74 #define AO_HAVE_char_fetch_and_add_full
76 AO_INLINE unsigned short
77 AO_short_fetch_and_add_full (volatile unsigned short *p, unsigned short incr)
79 unsigned short result;
81 __asm__ __volatile__ ("lock; xaddw %0, %1"
82 : "=r" (result), "+m" (*p)
87 #define AO_HAVE_short_fetch_and_add_full
89 #ifndef AO_PREFER_GENERALIZED
91 AO_and_full (volatile AO_t *p, AO_t value)
93 __asm__ __volatile__ ("lock; and %1, %0"
98 # define AO_HAVE_and_full
101 AO_or_full (volatile AO_t *p, AO_t value)
103 __asm__ __volatile__ ("lock; or %1, %0"
108 # define AO_HAVE_or_full
111 AO_xor_full (volatile AO_t *p, AO_t value)
113 __asm__ __volatile__ ("lock; xor %1, %0"
118 # define AO_HAVE_xor_full
119 #endif /* !AO_PREFER_GENERALIZED */
121 AO_INLINE AO_TS_VAL_t
122 AO_test_and_set_full (volatile AO_TS_t *addr)
125 /* Note: the "xchg" instruction does not need a "lock" prefix */
126 __asm__ __volatile__ ("xchg %b0, %1"
127 : "=q" (oldval), "+m" (*addr)
130 return (AO_TS_VAL_t)oldval;
132 #define AO_HAVE_test_and_set_full
134 #ifndef AO_GENERALIZE_ASM_BOOL_CAS
135 /* Returns nonzero if the comparison succeeded. */
137 AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
140 __asm__ __volatile__ ("lock; cmpxchg %2, %0; setz %1"
141 : "+m" (*addr), "=a" (result)
142 : "r" (new_val), "a" (old)
146 # define AO_HAVE_compare_and_swap_full
147 #endif /* !AO_GENERALIZE_ASM_BOOL_CAS */
150 AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
154 __asm__ __volatile__ ("lock; cmpxchg %2, %0"
155 : "+m" (*addr), "=a" (fetched_val)
156 : "r" (new_val), "a" (old_val)
160 #define AO_HAVE_fetch_compare_and_swap_full
164 # ifndef AO_NO_CMPXCHG8B
165 # include "../standard_ao_double_t.h"
167 /* Reading or writing a quadword aligned on a 64-bit boundary is */
168 /* always carried out atomically (requires at least a Pentium). */
169 # define AO_ACCESS_double_CHECK_ALIGNED
170 # include "../loadstore/double_atomic_load_store.h"
172 /* Returns nonzero if the comparison succeeded. */
173 /* Really requires at least a Pentium. */
175 AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
176 AO_t old_val1, AO_t old_val2,
177 AO_t new_val1, AO_t new_val2)
181 __asm__ __volatile__ ("lock; cmpxchg8b %0; setz %1"
182 : "+m" (*addr), "=a" (result)
183 : "d" (old_val2), "a" (old_val1),
184 "c" (new_val2), "b" (new_val1)
188 # define AO_HAVE_compare_double_and_swap_double_full
189 # endif /* !AO_NO_CMPXCHG8B */
195 AO_INLINE unsigned int
196 AO_int_fetch_and_add_full (volatile unsigned int *p, unsigned int incr)
200 __asm__ __volatile__ ("lock; xaddl %0, %1"
201 : "=r" (result), "+m" (*p)
206 # define AO_HAVE_int_fetch_and_add_full
208 # ifdef AO_CMPXCHG16B_AVAILABLE
209 # include "../standard_ao_double_t.h"
211 /* Older AMD Opterons are missing this instruction (SIGILL should */
212 /* be thrown in this case). */
214 AO_compare_double_and_swap_double_full (volatile AO_double_t *addr,
215 AO_t old_val1, AO_t old_val2,
216 AO_t new_val1, AO_t new_val2)
219 __asm__ __volatile__ ("lock; cmpxchg16b %0; setz %1"
220 : "+m" (*addr), "=a" (result)
221 : "d" (old_val2), "a" (old_val1),
222 "c" (new_val2), "b" (new_val1)
226 # define AO_HAVE_compare_double_and_swap_double_full
227 # endif /* !AO_CMPXCHG16B_AVAILABLE */
231 /* Real X86 implementations, except for some old 32-bit WinChips, */
232 /* appear to enforce ordering between memory operations, EXCEPT that */
233 /* a later read can pass earlier writes, presumably due to the visible */
234 /* presence of store buffers. */
235 /* We ignore both the WinChips and the fact that the official specs */
236 /* seem to be much weaker (and arguably too weak to be usable). */
237 #include "../ordered_except_wr.h"