# These macros are tested in public headers
AH_TEMPLATE([AO_GENERALIZE_ASM_BOOL_CAS],
[Force compare_and_swap definition via fetch_compare_and_swap])
+AH_TEMPLATE([AO_PREFER_GENERALIZED],
+ [Prefer generalized definitions to direct assembly-based ones])
AH_TEMPLATE([AO_USE_PTHREAD_DEFS],
[Emulate atomic operations via slow and async-signal-unsafe \
pthread locking])
more flexible, other instructions can be done between the LDREX and STREX accesses.
"
*/
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr) {
return result;
}
#define AO_HAVE_fetch_and_sub1
+#endif /* !AO_PREFER_GENERALIZED */
/* NEC LE-IT: compare and swap */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
interrupt latencies. LDREX, STREX are more flexible, other instructions
can be done between the LDREX and STREX accesses."
*/
+#ifndef AO_PREFER_GENERALIZED
#if !defined(AO_FORCE_USE_SWP) || defined(__thumb2__)
/* But, on the other hand, there could be a considerable performance */
/* degradation in case of a race. Eg., test_atomic.c executing */
return result;
}
#define AO_HAVE_fetch_and_sub1
+#endif /* !AO_PREFER_GENERALIZED */
/* NEC LE-IT: compare and swap */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
#endif /* __ARM_ARCH_x */
#if !defined(AO_HAVE_test_and_set_full) && !defined(AO_HAVE_test_and_set) \
+ && (!defined(AO_PREFER_GENERALIZED) \
+ || !defined(AO_HAVE_fetch_compare_and_swap)) \
&& !defined(__ARM_ARCH_2__) && !defined(__ARM_ARCH_6M__)
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
#include "../test_and_set_t_is_ao_t.h"
-AO_INLINE AO_TS_VAL_t
-AO_test_and_set_full(volatile AO_TS_t *addr)
-{
+#ifndef AO_PREFER_GENERALIZED
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_full(volatile AO_TS_t *addr)
+ {
register long ret;
__asm__ __volatile__(
: "memory");
return (AO_TS_VAL_t)ret;
-}
-#define AO_HAVE_test_and_set_full
+ }
+# define AO_HAVE_test_and_set_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE int
AO_compare_and_swap_full(volatile AO_t *addr, AO_t old, AO_t new_val)
/* The Hexagon has load-locked, store-conditional primitives, and so */
/* resulting code is very nearly identical to that of PowerPC. */
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr)
{
return (AO_TS_VAL_t)oldval;
}
#define AO_HAVE_test_and_set
+#endif /* !AO_PREFER_GENERALIZED */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
AO_INLINE int
}
#define AO_HAVE_nop_full
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add1_acquire (volatile AO_t *addr)
{
return result;
}
#define AO_HAVE_fetch_and_sub1_release
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_t
AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old, AO_t new_val)
/* FIXME: Add compare_double_and_swap_double for the _ILP32 case. */
#else
+# ifndef AO_PREFER_GENERALIZED
AO_INLINE unsigned int
AO_int_fetch_and_add1_acquire(volatile unsigned int *addr)
{
return result;
}
# define AO_HAVE_int_fetch_and_sub1_release
+# endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned int
AO_int_fetch_compare_and_swap_acquire(volatile unsigned int *addr,
}
#define AO_HAVE_store_release
+#ifndef AO_PREFER_GENERALIZED
/* This is similar to the code in the garbage collector. Deleting */
/* this and having it synthesized from compare_and_swap would probably */
/* only cost us a load immediate instruction. */
return result;
}
#define AO_HAVE_test_and_set_full
+#endif /* !AO_PREFER_GENERALIZED */
#ifndef AO_GENERALIZE_ASM_BOOL_CAS
}
#define AO_HAVE_fetch_compare_and_swap_full
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr) {
AO_t oldval;
return result;
}
#define AO_HAVE_fetch_and_add_full
+#endif /* !AO_PREFER_GENERALIZED */
#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
/* Empty */
/* currently needed or useful for cached memory accesses. */
/* Really only works for 486 and later */
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
return result;
}
#define AO_HAVE_fetch_and_add_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned char
AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
}
#define AO_HAVE_short_fetch_and_add_full
+#ifndef AO_PREFER_GENERALIZED
/* Really only works for 486 and later */
AO_INLINE void
AO_and_full (volatile AO_t *p, AO_t value)
"=m" (*p) : "r" (value), "m" (*p) : "memory");
}
#define AO_HAVE_xor_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
/* As far as we can tell, the lfence and sfence instructions are not */
/* currently needed or useful for cached memory accesses. */
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
return result;
}
#define AO_HAVE_fetch_and_add_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned char
AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
}
#define AO_HAVE_int_fetch_and_add_full
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE void
AO_and_full (volatile AO_t *p, AO_t value)
{
"=m" (*p) : "r" (value), "m" (*p) : "memory");
}
#define AO_HAVE_xor_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr)
}
#define AO_HAVE_nop_full
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add1_acquire (volatile AO_t *p)
{
_LDHINT_NONE, _UP_MEM_FENCE);
}
#define AO_HAVE_fetch_and_sub1_release
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_t
AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val,
}
#define AO_HAVE_store_release
+#ifndef AO_PREFER_GENERALIZED
/* This is similar to the code in the garbage collector. Deleting */
/* this and having it synthesized from compare_and_swap would probably */
/* only cost us a load immediate instruction. */
return result;
}
#define AO_HAVE_test_and_set_full
+#endif /* !AO_PREFER_GENERALIZED */
/*AO_INLINE AO_t
AO_fetch_compare_and_swap(volatile AO_t *addr, AO_t old_val, AO_t new_val)
}
#define AO_HAVE_nop_full
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add1_acquire(volatile AO_t *p)
{
return __fetchadd8_rel((unsigned __int64 *)p, -1);
}
#define AO_HAVE_fetch_and_sub1_release
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_t
AO_fetch_compare_and_swap_acquire(volatile AO_t *addr, AO_t old_val,
# endif
# endif /* _MSC_VER < 1400 */
-# pragma intrinsic (_InterlockedIncrement)
-# pragma intrinsic (_InterlockedDecrement)
-# pragma intrinsic (_InterlockedExchangeAdd)
+# if !defined(AO_PREFER_GENERALIZED) || !defined(AO_ASSUME_WINDOWS98)
+# pragma intrinsic (_InterlockedIncrement)
+# pragma intrinsic (_InterlockedDecrement)
+# pragma intrinsic (_InterlockedExchangeAdd)
+# endif /* !AO_PREFER_GENERALIZED */
# pragma intrinsic (_InterlockedCompareExchange)
# define AO_INTERLOCKED_VOLATILE volatile
#endif /* _MSC_VER >= 1310 */
+#if !defined(AO_PREFER_GENERALIZED) || !defined(AO_ASSUME_WINDOWS98)
AO_INLINE AO_t
AO_fetch_and_add_full(volatile AO_t *p, AO_t incr)
{
return _InterlockedDecrement((LONG AO_INTERLOCKED_VOLATILE *)p) + 1;
}
#define AO_HAVE_fetch_and_sub1_full
+#endif /* !AO_PREFER_GENERALIZED */
#ifdef AO_ASSUME_WINDOWS98
AO_INLINE AO_t
/* Assume _MSC_VER >= 1400 */
#include <intrin.h>
-#pragma intrinsic (_InterlockedIncrement64)
-#pragma intrinsic (_InterlockedDecrement64)
-#pragma intrinsic (_InterlockedExchangeAdd64)
#pragma intrinsic (_InterlockedCompareExchange64)
+#ifndef AO_PREFER_GENERALIZED
+
+# pragma intrinsic (_InterlockedIncrement64)
+# pragma intrinsic (_InterlockedDecrement64)
+# pragma intrinsic (_InterlockedExchangeAdd64)
+
AO_INLINE AO_t
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
return _InterlockedDecrement64((LONGLONG volatile *)p) + 1;
}
#define AO_HAVE_fetch_and_sub1_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_t
AO_fetch_compare_and_swap_full(volatile AO_t *addr, AO_t old_val,
/* currently needed or useful for cached memory accesses. */
/* Really only works for 486 and later */
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
return result;
}
#define AO_HAVE_fetch_and_add_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned char
AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
}
#define AO_HAVE_short_fetch_and_add_full
+#ifndef AO_PREFER_GENERALIZED
/* Really only works for 486 and later */
AO_INLINE void
AO_and_full (volatile AO_t *p, AO_t value)
: "memory");
}
#define AO_HAVE_xor_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full (volatile AO_TS_t *addr)
/* As far as we can tell, the lfence and sfence instructions are not */
/* currently needed or useful for cached memory accesses. */
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE AO_t
AO_fetch_and_add_full (volatile AO_t *p, AO_t incr)
{
return result;
}
#define AO_HAVE_fetch_and_add_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE unsigned char
AO_char_fetch_and_add_full (volatile unsigned char *p, unsigned char incr)
}
#define AO_HAVE_int_fetch_and_add_full
+#ifndef AO_PREFER_GENERALIZED
AO_INLINE void
AO_and_full (volatile AO_t *p, AO_t value)
{
: "memory");
}
#define AO_HAVE_xor_full
+#endif /* !AO_PREFER_GENERALIZED */
AO_INLINE AO_TS_VAL_t
AO_test_and_set_full (volatile AO_TS_t *addr)