#include "../all_aligned_atomic_load_store.h"
-/* Real X86 implementations, except for some old WinChips, appear */
-/* to enforce ordering between memory operations, EXCEPT that a later */
-/* read can pass earlier writes, presumably due to the visible */
+/* Real X86 implementations, except for some old WinChips, */
+/* appear to enforce ordering between memory operations, EXCEPT that */
+/* a later read can pass earlier writes, presumably due to the visible */
/* presence of store buffers. */
-/* We ignore both the WinChips, and the fact that the official specs */
+/* We ignore both the WinChips and the fact that the official specs */
/* seem to be much weaker (and arguably too weak to be usable). */
#include "../ordered_except_wr.h"
#include "../standard_ao_double_t.h"
#if defined(AO_USE_PENTIUM4_INSTRS)
-AO_INLINE void
-AO_nop_full(void)
-{
- __asm__ __volatile__("mfence" : : : "memory");
-}
-#define AO_HAVE_nop_full
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ __asm__ __volatile__("mfence" : : : "memory");
+ }
+# define AO_HAVE_nop_full
#else
-
-/* We could use the cpuid instruction. But that seems to be slower */
-/* than the default implementation based on test_and_set_full. Thus */
-/* we omit that bit of misinformation here. */
-
-#endif
+ /* We could use the cpuid instruction. But that seems to be slower */
+ /* than the default implementation based on test_and_set_full. Thus */
+ /* we omit that bit of misinformation here. */
+#endif /* !AO_USE_PENTIUM4_INSTRS */
/* As far as we can tell, the lfence and sfence instructions are not */
/* currently needed or useful for cached memory accesses. */
/* Really only works for 486 and later */
#ifndef AO_PREFER_GENERALIZED
-AO_INLINE AO_t
-AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
-{
- AO_t result;
+ AO_INLINE AO_t
+ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
+ {
+ AO_t result;
- __asm__ __volatile__ ("lock; xaddl %0, %1" :
+ __asm__ __volatile__ ("lock; xaddl %0, %1" :
"=r" (result), "=m" (*p) : "0" (incr), "m" (*p)
: "memory");
- return result;
-}
-#define AO_HAVE_fetch_and_add_full
+ return result;
+ }
+# define AO_HAVE_fetch_and_add_full
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned char
#define AO_HAVE_short_fetch_and_add_full
#ifndef AO_PREFER_GENERALIZED
-/* Really only works for 486 and later */
-AO_INLINE void
-AO_and_full (volatile AO_t *p, AO_t value)
-{
- __asm__ __volatile__ ("lock; andl %1, %0" :
- "=m" (*p) : "r" (value), "m" (*p) : "memory");
-}
-#define AO_HAVE_and_full
+ /* Really only works for 486 and later */
+ AO_INLINE void
+ AO_and_full (volatile AO_t *p, AO_t value)
+ {
+ __asm__ __volatile__ ("lock; andl %1, %0" :
+ "=m" (*p) : "r" (value), "m" (*p)
+ : "memory");
+ }
+# define AO_HAVE_and_full
-AO_INLINE void
-AO_or_full (volatile AO_t *p, AO_t value)
-{
- __asm__ __volatile__ ("lock; orl %1, %0" :
- "=m" (*p) : "r" (value), "m" (*p) : "memory");
-}
-#define AO_HAVE_or_full
+ AO_INLINE void
+ AO_or_full (volatile AO_t *p, AO_t value)
+ {
+ __asm__ __volatile__ ("lock; orl %1, %0" :
+ "=m" (*p) : "r" (value), "m" (*p)
+ : "memory");
+ }
+# define AO_HAVE_or_full
-AO_INLINE void
-AO_xor_full (volatile AO_t *p, AO_t value)
-{
- __asm__ __volatile__ ("lock; xorl %1, %0" :
- "=m" (*p) : "r" (value), "m" (*p) : "memory");
-}
-#define AO_HAVE_xor_full
+ AO_INLINE void
+ AO_xor_full (volatile AO_t *p, AO_t value)
+ {
+ __asm__ __volatile__ ("lock; xorl %1, %0" :
+ "=m" (*p) : "r" (value), "m" (*p)
+ : "memory");
+ }
+# define AO_HAVE_xor_full
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_TS_VAL_t
{
unsigned char oldval;
/* Note: the "xchg" instruction does not need a "lock" prefix */
- __asm__ __volatile__("xchgb %0, %1"
- : "=q"(oldval), "=m"(*addr)
- : "0"((unsigned char)0xff), "m"(*addr) : "memory");
+ __asm__ __volatile__ ("xchgb %0, %1"
+ : "=q" (oldval), "=m" (*addr)
+ : "0" ((unsigned char)0xff), "m" (*addr)
+ : "memory");
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set_full
/* variables are protected. */
# else
char result;
- __asm__ __volatile__("lock; cmpxchgl %3, %0; setz %1"
- : "=m" (*addr), "=a" (result)
- : "m" (*addr), "r" (new_val), "a" (old)
- : "memory");
+ __asm__ __volatile__ ("lock; cmpxchgl %3, %0; setz %1"
+ : "=m" (*addr), "=a" (result)
+ : "m" (*addr), "r" (new_val), "a" (old)
+ : "memory");
return (int)result;
# endif
}
/* empty protection list */);
# else
AO_t fetched_val;
- __asm__ __volatile__("lock; cmpxchgl %3, %4"
- : "=a" (fetched_val), "=m" (*addr)
- : "0" (old_val), "q" (new_val), "m" (*addr)
- : "memory");
+ __asm__ __volatile__ ("lock; cmpxchgl %3, %4"
+ : "=a" (fetched_val), "=m" (*addr)
+ : "0" (old_val), "q" (new_val), "m" (*addr)
+ : "memory");
return fetched_val;
# endif
}
/* For non-PIC mode, this operation could be simplified (and be */
/* faster) by using ebx as new_val1 (GCC would refuse to compile */
/* such code for PIC mode). */
- __asm__ __volatile__("lock; cmpxchg8b %0; setz %1"
- : "=m" (*addr), "=a" (result)
- : "m" (*addr), "d" (old_val2), "a" (old_val1),
- "c" (new_val2), "b" (new_val1) : "memory");
+ __asm__ __volatile__ ("lock; cmpxchg8b %0; setz %1"
+ : "=m" (*addr), "=a" (result)
+ : "m" (*addr), "d" (old_val2), "a" (old_val1),
+ "c" (new_val2), "b" (new_val1)
+ : "memory");
# endif
return (int) result;
}
#include "../all_aligned_atomic_load_store.h"
-/* Real X86 implementations, except for some old WinChips, appear */
-/* to enforce ordering between memory operations, EXCEPT that a later */
-/* read can pass earlier writes, presumably due to the visible */
+/* Real X86 implementations, except for some old WinChips, */
+/* appear to enforce ordering between memory operations, EXCEPT that */
+/* a later read can pass earlier writes, presumably due to the visible */
/* presence of store buffers. */
-/* We ignore both the WinChips, and the fact that the official specs */
+/* We ignore both the WinChips and the fact that the official specs */
/* seem to be much weaker (and arguably too weak to be usable). */
#include "../ordered_except_wr.h"
#include "../standard_ao_double_t.h"
#if defined(AO_USE_PENTIUM4_INSTRS)
-AO_INLINE void
-AO_nop_full(void)
-{
- __asm__ __volatile__ ("mfence" : : : "memory");
-}
-#define AO_HAVE_nop_full
+ AO_INLINE void
+ AO_nop_full(void)
+ {
+ __asm__ __volatile__ ("mfence" : : : "memory");
+ }
+# define AO_HAVE_nop_full
#else
-
-/* We could use the cpuid instruction. But that seems to be slower */
-/* than the default implementation based on test_and_set_full. Thus */
-/* we omit that bit of misinformation here. */
-
-#endif
+ /* We could use the cpuid instruction. But that seems to be slower */
+ /* than the default implementation based on test_and_set_full. Thus */
+ /* we omit that bit of misinformation here. */
+#endif /* !AO_USE_PENTIUM4_INSTRS */
/* As far as we can tell, the lfence and sfence instructions are not */
/* currently needed or useful for cached memory accesses. */
/* Really only works for 486 and later */
#ifndef AO_PREFER_GENERALIZED
-AO_INLINE AO_t
-AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
-{
- AO_t result;
+ AO_INLINE AO_t
+ AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
+ {
+ AO_t result;
- __asm__ __volatile__ ("lock; xaddl %0, %1" :
+ __asm__ __volatile__ ("lock; xaddl %0, %1" :
"=r" (result), "=m" (*p) : "0" (incr) /* , "m" (*p) */
: "memory");
- return result;
-}
-#define AO_HAVE_fetch_and_add_full
+ return result;
+ }
+# define AO_HAVE_fetch_and_add_full
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned char
#define AO_HAVE_short_fetch_and_add_full
#ifndef AO_PREFER_GENERALIZED
-/* Really only works for 486 and later */
-AO_INLINE void
-AO_and_full (volatile AO_t *p, AO_t value)
-{
- __asm__ __volatile__ ("lock; andl %1, %0" :
+ /* Really only works for 486 and later */
+ AO_INLINE void
+ AO_and_full (volatile AO_t *p, AO_t value)
+ {
+ __asm__ __volatile__ ("lock; andl %1, %0" :
"=m" (*p) : "r" (value) /* , "m" (*p) */
: "memory");
-}
-#define AO_HAVE_and_full
+ }
+# define AO_HAVE_and_full
-AO_INLINE void
-AO_or_full (volatile AO_t *p, AO_t value)
-{
- __asm__ __volatile__ ("lock; orl %1, %0" :
+ AO_INLINE void
+ AO_or_full (volatile AO_t *p, AO_t value)
+ {
+ __asm__ __volatile__ ("lock; orl %1, %0" :
"=m" (*p) : "r" (value) /* , "m" (*p) */
: "memory");
-}
-#define AO_HAVE_or_full
+ }
+# define AO_HAVE_or_full
-AO_INLINE void
-AO_xor_full (volatile AO_t *p, AO_t value)
-{
- __asm__ __volatile__ ("lock; xorl %1, %0" :
+ AO_INLINE void
+ AO_xor_full (volatile AO_t *p, AO_t value)
+ {
+ __asm__ __volatile__ ("lock; xorl %1, %0" :
"=m" (*p) : "r" (value) /* , "m" (*p) */
: "memory");
-}
-#define AO_HAVE_xor_full
+ }
+# define AO_HAVE_xor_full
#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_TS_VAL_t
{
char result;
__asm__ __volatile__ ("lock; cmpxchgl %2, %0; setz %1"
- : "=m"(*addr), "=a"(result)
- : "r" (new_val), "a"(old)
- : "memory");
+ : "=m"(*addr), "=a"(result)
+ : "r" (new_val), "a"(old)
+ : "memory");
return (int) result;
}
# define AO_HAVE_compare_and_swap_full
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
AO_t new_val)
{
- AO_t fetched_val;
- __asm__ __volatile__("lock; cmpxchgl %1, %2"
- : "=a" (fetched_val)
- : "r" (new_val), "m" (*addr), "0" (old_val)
- : "memory");
- return fetched_val;
+ AO_t fetched_val;
+ __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
+ : "=a" (fetched_val)
+ : "r" (new_val), "m" (*addr), "0" (old_val)
+ : "memory");
+ return fetched_val;
}
#define AO_HAVE_fetch_compare_and_swap_full
{
char result;
- __asm__ __volatile__("lock; cmpxchg8b %0; setz %1"
+ __asm__ __volatile__ ("lock; cmpxchg8b %0; setz %1"
: "=m" (*addr), "=a" (result)
: /* "m" (*addr), */ "d" (old_val2), "a" (old_val1),
"c" (new_val2), "b" (new_val1)