+2011-06-03 Ivan Maidanski <ivmai@mail.ru>
+
+ * src/atomic_ops/sysdeps/aligned_atomic_load_store.h: Remove
+ blank line between AO_func and AO_HAVE_func definitions.
+ * src/atomic_ops/sysdeps/atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/char_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/generic_pthread.h: Ditto.
+ * src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/int_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/ordered.h: Ditto.
+ * src/atomic_ops/sysdeps/ordered_except_wr.h: Ditto.
+ * src/atomic_ops/sysdeps/read_ordered.h: Ditto.
+ * src/atomic_ops/sysdeps/short_aligned_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/short_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/alpha.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/arm.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/cris.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/hppa.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/ia64.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/m68k.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/mips.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/powerpc.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/s390.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/sparc.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/x86.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/x86_64.h: Ditto.
+ * src/atomic_ops/sysdeps/hpc/hppa.h: Ditto.
+ * src/atomic_ops/sysdeps/hpc/ia64.h: Ditto.
+ * src/atomic_ops/sysdeps/ibmc/powerpc.h: Ditto.
+ * src/atomic_ops/sysdeps/msftc/common32_defs.h: Ditto.
+ * src/atomic_ops/sysdeps/msftc/x86.h: Ditto.
+ * src/atomic_ops/sysdeps/msftc/x86_64.h: Ditto.
+ * src/atomic_ops/sysdeps/sunc/sparc.h: Ditto.
+ * src/atomic_ops/sysdeps/sunc/x86.h: Ditto.
+ * src/atomic_ops/sysdeps/sunc/x86_64.h: Ditto.
+ * src/atomic_ops/sysdeps/aligned_atomic_load_store.h: Reformat
+ comment.
+ * src/atomic_ops/sysdeps/atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/char_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/int_aligned_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/int_atomic_load_store.h: Ditto.
+ * src/atomic_ops/sysdeps/ordered.h: Ditto.
+ * src/atomic_ops/sysdeps/gcc/arm.h: Ditto.
+ * src/atomic_ops/sysdeps/test_and_set_t_is_char.h: Remove file
+ tail blank lines.
+ * src/atomic_ops/sysdeps/gcc/arm.h (AO_test_and_set_full): Don't
+ define for ARMv2.
+ * src/atomic_ops/sysdeps/gcc/powerpc.h (AO_load_acquire,
+ AO_test_and_set, AO_compare_and_swap): Merge adjacent definitions.
+ * src/atomic_ops/sysdeps/ibmc/powerpc.h (AO_HAVE_store_release):
+ Define.
+ * src/atomic_ops/sysdeps/sunc/sparc.h: Expand all tabs to spaces;
+ remove trailing spaces at EOLn.
+
2011-06-02 Ivan Maidanski <ivmai@mail.ru>
* tests/test_malloc.c (main): Remove unused "exper_n" local
* SOFTWARE.
*/
-/*
- * Definitions for architectures on which loads and stores of AO_t are
- * atomic fo all legal alignments.
- */
+/* Definitions for architectures on which loads and stores of AO_t are */
+/* atomic fo all legal alignments. */
AO_INLINE AO_t
AO_load(const volatile AO_t *addr)
/* volatile adds barrier semantics. */
return *(AO_t *)addr;
}
-
#define AO_HAVE_load
AO_INLINE void
assert(((size_t)addr & (sizeof(AO_t) - 1)) == 0);
(*(AO_t *)addr) = new_val;
}
-
#define AO_HAVE_store
* SOFTWARE.
*/
-/*
- * Definitions for architectures on which loads and stores of AO_t are
- * atomic for all legal alignments.
- */
+/* Definitions for architectures on which loads and stores of AO_t are */
+/* atomic for all legal alignments. */
AO_INLINE AO_t
AO_load(const volatile AO_t *addr)
/* volatile adds barrier semantics. */
return (*(const AO_t *)addr);
}
-
#define AO_HAVE_load
AO_INLINE void
{
(*(AO_t *)addr) = new_val;
}
-
#define AO_HAVE_store
* SOFTWARE.
*/
-/*
- * Definitions for architectures on which loads and stores of unsigned char are
- * atomic for all legal alignments.
- */
+/* Definitions for architectures on which loads and stores of unsigned */
+/* char are atomic for all legal alignments. */
AO_INLINE unsigned char
AO_char_load(const volatile unsigned char *addr)
/* volatile adds barrier semantics. */
return (*(const unsigned char *)addr);
}
-
#define AO_HAVE_char_load
AO_INLINE void
{
(*(unsigned char *)addr) = new_val;
}
-
#define AO_HAVE_char_store
{
__asm__ __volatile__("mb" : : : "memory");
}
-
#define AO_HAVE_nop_full
AO_INLINE void
{
__asm__ __volatile__("wmb" : : : "memory");
}
-
#define AO_HAVE_nop_write
/* mb should be used for AO_nop_read(). That's the default. */
:"memory");
return was_equal;
}
-
#define AO_HAVE_compare_and_swap
#include "../test_and_set_t_is_ao_t.h" /* Probably suboptimal */
-/* NEC LE-IT: ARMv6 is the first architecture providing support for simple LL/SC
- * A data memory barrier must be raised via CP15 command (see documentation).
- *
- * ARMv7 is compatible to ARMv6 but has a simpler command for issuing a
- * memory barrier (DMB). Raising it via CP15 should still work as told me by the
- * support engineers. If it turns out to be much quicker than we should implement
- * custom code for ARMv7 using the asm { dmb } command.
- *
- * If only a single processor is used, we can define AO_UNIPROCESSOR
- * and do not need to access CP15 for ensuring a DMB
-*/
+/* NEC LE-IT: ARMv6 is the first architecture providing support for */
+/* simple LL/SC. A data memory barrier must be raised via CP15 command */
+/* (see documentation). */
+/* ARMv7 is compatible to ARMv6 but has a simpler command for issuing */
+/* a memory barrier (DMB). Raising it via CP15 should still work as */
+/* told me by the support engineers. If it turns out to be much quicker */
+/* than we should implement custom code for ARMv7 using the asm { dmb } */
+/* instruction. */
+/* If only a single processor is used, we can define AO_UNIPROCESSOR */
+/* and do not need to access CP15 for ensuring a DMB. */
/* NEC LE-IT: gcc has no way to easily check the arm architecture */
/* but it defines only one of __ARM_ARCH_x__ to be true. */
: "=&r"(dest) : : "memory");
#endif
}
-
#define AO_HAVE_nop_full
/* NEC LE-IT: AO_t load is simple reading */
#define AO_HAVE_store
/* NEC LE-IT: replace the SWAP as recommended by ARM:
-
"Applies to: ARM11 Cores
- Though the SWP instruction will still work with ARM V6 cores, it is
- recommended to use the new V6 synchronization instructions. The SWP
- instruction produces 'locked' read and write accesses which are atomic,
- i.e. another operation cannot be done between these locked accesses which
- ties up external bus (AHB,AXI) bandwidth and can increase worst case
- interrupt latencies. LDREX,STREX are more flexible, other instructions can
- be done between the LDREX and STREX accesses.
- "
+ Though the SWP instruction will still work with ARM V6 cores, it is
+ recommended to use the new V6 synchronization instructions. The SWP
+ instruction produces 'locked' read and write accesses which are atomic,
+ i.e. another operation cannot be done between these locked accesses which
+ ties up external bus (AHB,AXI) bandwidth and can increase worst case
+ interrupt latencies. LDREX,STREX are more flexible, other instructions
+ can be done between the LDREX and STREX accesses."
*/
AO_INLINE AO_TS_t
AO_test_and_set(volatile AO_TS_t *addr)
return oldval;
}
-
#define AO_HAVE_test_and_set
/* NEC LE-IT: fetch and add for ARMv6 */
return result;
}
-
#define AO_HAVE_fetch_and_add
/* NEC LE-IT: fetch and add1 for ARMv6 */
return result;
}
-
#define AO_HAVE_fetch_and_add1
/* NEC LE-IT: fetch and sub for ARMv6 */
return result;
}
-
#define AO_HAVE_fetch_and_sub1
/* NEC LE-IT: compare and swap */
/* It appears that SWP is the only simple memory barrier. */
#include "../all_atomic_load_store.h"
-AO_INLINE AO_TS_VAL_t
-AO_test_and_set_full(volatile AO_TS_t *addr)
-{
- AO_TS_VAL_t oldval;
- /* SWP on ARM is very similar to XCHG on x86. */
- /* The first operand is the result, the second the value */
- /* to be stored. Both registers must be different from addr. */
- /* Make the address operand an early clobber output so it */
- /* doesn't overlap with the other operands. The early clobber*/
- /* on oldval is necessary to prevent the compiler allocating */
- /* them to the same register if they are both unused. */
- __asm__ __volatile__("swp %0, %2, [%3]"
- : "=&r"(oldval), "=&r"(addr)
- : "r"(1), "1"(addr)
- : "memory");
- return oldval;
-}
-#define AO_HAVE_test_and_set_full
+#if !defined(__ARM_ARCH_2__)
+ AO_INLINE AO_TS_VAL_t
+ AO_test_and_set_full(volatile AO_TS_t *addr)
+ {
+ AO_TS_VAL_t oldval;
+ /* SWP on ARM is very similar to XCHG on x86. */
+ /* The first operand is the result, the second the value */
+ /* to be stored. Both registers must be different from addr. */
+ /* Make the address operand an early clobber output so it */
+ /* doesn't overlap with the other operands. The early clobber */
+ /* on oldval is necessary to prevent the compiler allocating */
+ /* them to the same register if they are both unused. */
+ __asm__ __volatile__("swp %0, %2, [%3]"
+ : "=&r"(oldval), "=&r"(addr)
+ : "r"(1), "1"(addr)
+ : "memory");
+ return oldval;
+ }
+# define AO_HAVE_test_and_set_full
+#endif /* !__ARM_ARCH_2__ */
#endif /* __ARM_ARCH_x */
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * SOFTWARE.
*
* Most of this code originally comes from Hans-Peter Nilsson. It is included
* here with his permission.
*
* This version has not been tested. It was coped here from a GC
* patch so that we wouldn't lose the code in the upgrade to gc7.
- */
+ */
#include "../all_atomic_load_store.h"
/* Note the use of a dummy output of *addr to expose the write. The
memory barrier is to stop *other* writes being moved past this code. */
__asm__ __volatile__("clearf\n"
- "0:\n\t"
- "movu.b [%2],%0\n\t"
- "ax\n\t"
- "move.b %3,[%2]\n\t"
- "bwf 0b\n\t"
- "clearf"
- : "=&r" (ret), "=m" (*addr)
- : "r" (addr), "r" ((int) 1), "m" (*addr)
- : "memory");
+ "0:\n\t"
+ "movu.b [%2],%0\n\t"
+ "ax\n\t"
+ "move.b %3,[%2]\n\t"
+ "bwf 0b\n\t"
+ "clearf"
+ : "=&r" (ret), "=m" (*addr)
+ : "r" (addr), "r" ((int) 1), "m" (*addr)
+ : "memory");
return ret;
}
-
#define AO_HAVE_test_and_set_full
-
volatile unsigned int *a = __ldcw_align (addr);
return (AO_TS_VAL_t) __ldcw (a);
}
+#define AO_HAVE_test_and_set_full
AO_INLINE void
AO_pa_clear(volatile AO_TS_t * addr)
*a = 1;
}
#define AO_CLEAR(addr) AO_pa_clear(addr)
-
-#define AO_HAVE_test_and_set_full
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
-
#define AO_HAVE_fetch_and_add1_release
AO_INLINE AO_t
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
-
#define AO_HAVE_fetch_and_sub1_acquire
AO_INLINE AO_t
"=r" (result) AO_OUT_ADDR: AO_IN_ADDR :"memory");
return result;
}
-
#define AO_HAVE_fetch_and_sub1_release
#ifndef _ILP32
"=r" (result): AO_IN_ADDR :"memory");
return result;
}
-
#define AO_HAVE_int_fetch_and_add1_release
AO_INLINE unsigned int
"=r" (result): AO_IN_ADDR :"memory");
return result;
}
-
#define AO_HAVE_int_fetch_and_sub1_acquire
AO_INLINE unsigned int
"=r" (result): AO_IN_ADDR :"memory");
return result;
}
-
#define AO_HAVE_int_fetch_and_sub1_release
#endif /* !_ILP32 */
: "memory");
return (oldval == old);
}
-
#define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
: "memory");
return (oldval == old);
}
-
#define AO_HAVE_compare_and_swap_release
AO_INLINE int
: "memory");
return (oldval == old);
}
-
#define AO_HAVE_char_compare_and_swap_acquire
AO_INLINE int
: "memory");
return (oldval == old);
}
-
#define AO_HAVE_char_compare_and_swap_release
AO_INLINE int
: "memory");
return (oldval == old);
}
-
#define AO_HAVE_short_compare_and_swap_acquire
AO_INLINE int
: "memory");
return (oldval == old);
}
-
#define AO_HAVE_short_compare_and_swap_release
#ifndef _ILP32
: AO_IN_ADDR, "r"(new_val), "r"((AO_t)old) : "memory");
return (oldval == old);
}
-
#define AO_HAVE_int_compare_and_swap_acquire
AO_INLINE int
: AO_IN_ADDR, "r"(new_val), "r"((AO_t)old) : "memory");
return (oldval == old);
}
-
#define AO_HAVE_int_compare_and_swap_release
#endif /* !_ILP32 */
: "memory");
return oldval;
}
-
#define AO_HAVE_test_and_set_full
/* Returns nonzero if the comparison succeeded. */
: "memory");
return -result;
}
-
#define AO_HAVE_compare_and_swap_full
-
#include "../ao_t_is_int.h"
" .set pop "
: : : "memory");
}
-
#define AO_HAVE_nop_full
AO_INLINE int
: "memory");
return was_equal;
}
-
#define AO_HAVE_compare_and_swap
/* FIXME: I think the implementations below should be automatically */
AO_nop_full();
return result;
}
-
#define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
AO_nop_full();
return AO_compare_and_swap(addr, old, new_val);
}
-
#define AO_HAVE_compare_and_swap_release
AO_INLINE int
AO_nop_full();
return result;
}
-
#define AO_HAVE_compare_and_swap_full
/*
{
__asm__ __volatile__("sync" : : : "memory");
}
-
#define AO_HAVE_nop_full
/* lwsync apparently works for everything but a StoreLoad barrier. */
/* cheaper. And the documentation is fairly explicit that this also */
/* has acquire semantics. */
/* ppc64 uses ld not lwz */
-#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
AO_INLINE AO_t
AO_load_acquire(const volatile AO_t *addr)
{
AO_t result;
-
+#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
__asm__ __volatile__ (
"ld%U1%X1 %0,%1\n"
"cmpw %0,%0\n"
"1: isync\n"
: "=r" (result)
: "m"(*addr) : "memory", "cr0");
- return result;
-}
#else
-AO_INLINE AO_t
-AO_load_acquire(const volatile AO_t *addr)
-{
- AO_t result;
-
/* FIXME: We should get gcc to allocate one of the condition */
/* registers. I always got "impossible constraint" when I */
/* tried the "y" constraint. */
"1: isync\n"
: "=r" (result)
: "m"(*addr) : "memory", "cc");
+#endif
return result;
}
-#endif
#define AO_HAVE_load_acquire
/* We explicitly specify store_release, since it relies */
AO_lwsync();
*addr = value;
}
-
#define AO_HAVE_load_acquire
/* This is similar to the code in the garbage collector. Deleting */
/* this and having it synthesized from compare_and_swap would probably */
/* only cost us a load immediate instruction. */
-#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
-/* Completely untested. And we should be using smaller objects anyway. */
AO_INLINE AO_TS_VAL_t
AO_test_and_set(volatile AO_TS_t *addr) {
+#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
+/* Completely untested. And we should be using smaller objects anyway. */
unsigned long oldval;
unsigned long temp = 1; /* locked value */
: "=&r"(oldval)
: "r"(addr), "r"(temp)
: "memory", "cr0");
-
- return (AO_TS_VAL_t)oldval;
-}
-
#else
-
-AO_INLINE AO_TS_VAL_t
-AO_test_and_set(volatile AO_TS_t *addr) {
int oldval;
int temp = 1; /* locked value */
: "=&r"(oldval)
: "r"(addr), "r"(temp)
: "memory", "cr0");
-
+#endif
return (AO_TS_VAL_t)oldval;
}
-
-#endif
-
#define AO_HAVE_test_and_set
AO_INLINE AO_TS_VAL_t
AO_lwsync();
return result;
}
-
#define AO_HAVE_test_and_set_acquire
AO_INLINE AO_TS_VAL_t
AO_lwsync();
return AO_test_and_set(addr);
}
-
#define AO_HAVE_test_and_set_release
AO_INLINE AO_TS_VAL_t
AO_lwsync();
return result;
}
-
#define AO_HAVE_test_and_set_full
-#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
-/* FIXME: Completely untested. */
AO_INLINE int
AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) {
AO_t oldval;
int result = 0;
-
+#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
+/* FIXME: Completely untested. */
__asm__ __volatile__(
"1:ldarx %0,0,%2\n" /* load and reserve */
"cmpd %0, %4\n" /* if load is not equal to */
: "=&r"(oldval), "=&r"(result)
: "r"(addr), "r"(new_val), "r"(old), "1"(result)
: "memory", "cr0");
-
- return result;
-}
-
#else
-
-AO_INLINE int
-AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) {
- AO_t oldval;
- int result = 0;
-
__asm__ __volatile__(
"1:lwarx %0,0,%2\n" /* load and reserve */
"cmpw %0, %4\n" /* if load is not equal to */
: "=&r"(oldval), "=&r"(result)
: "r"(addr), "r"(new_val), "r"(old), "1"(result)
: "memory", "cr0");
-
+#endif
return result;
}
-#endif
-
#define AO_HAVE_compare_and_swap
AO_INLINE int
AO_lwsync();
return result;
}
-
#define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
AO_lwsync();
return AO_compare_and_swap(addr, old, new_val);
}
-
#define AO_HAVE_compare_and_swap_release
AO_INLINE int
AO_lwsync();
return result;
}
-
#define AO_HAVE_compare_and_swap_full
-#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
-/* FIXME: Completely untested. */
-
AO_INLINE AO_t
AO_fetch_and_add(volatile AO_t *addr, AO_t incr) {
AO_t oldval;
AO_t newval;
-
+#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
+/* FIXME: Completely untested. */
__asm__ __volatile__(
"1:ldarx %0,0,%2\n" /* load and reserve */
"add %1,%0,%3\n" /* increment */
: "=&r"(oldval), "=&r"(newval)
: "r"(addr), "r"(incr)
: "memory", "cr0");
-
- return oldval;
-}
-
-#define AO_HAVE_fetch_and_add
-
#else
-
-AO_INLINE AO_t
-AO_fetch_and_add(volatile AO_t *addr, AO_t incr) {
- AO_t oldval;
- AO_t newval;
-
__asm__ __volatile__(
"1:lwarx %0,0,%2\n" /* load and reserve */
"add %1,%0,%3\n" /* increment */
: "=&r"(oldval), "=&r"(newval)
: "r"(addr), "r"(incr)
: "memory", "cr0");
-
+#endif
return oldval;
}
-
#define AO_HAVE_fetch_and_add
-#endif
-
AO_INLINE AO_t
AO_fetch_and_add_acquire(volatile AO_t *addr, AO_t incr) {
AO_t result = AO_fetch_and_add(addr, incr);
AO_lwsync();
return result;
}
-
#define AO_HAVE_fetch_and_add_acquire
AO_INLINE AO_t
AO_lwsync();
return AO_fetch_and_add(addr, incr);
}
-
#define AO_HAVE_fetch_and_add_release
AO_INLINE AO_t
AO_lwsync();
return result;
}
-
#define AO_HAVE_fetch_and_add_full
#if defined(__powerpc64__) || defined(__ppc64__) || defined(__64BIT__)
/* Presumably they're cheaper than CS? */
AO_INLINE AO_t AO_compare_and_swap_full(volatile AO_t *addr,
- AO_t old, AO_t new_val)
+ AO_t old, AO_t new_val)
{
int retval;
__asm__ __volatile__ (
: "cc", "memory");
return retval == 0;
}
-
#define AO_HAVE_compare_and_swap_full
/* FIXME: Add double-wide compare-and-swap for 32-bit executables. */
: "m"(*addr) : "memory");
return oldval;
}
-
#define AO_HAVE_test_and_set_full
#ifndef AO_NO_SPARC_V9
: "memory", "cc");
return (int)ret;
}
-
#define AO_HAVE_compare_and_swap_full
-#endif /* AO_NO_SPARC_V9 */
+#endif /* !AO_NO_SPARC_V9 */
/* FIXME: This needs to be extended for SPARC v8 and v9. */
/* SPARC V8 also has swap. V9 has CAS. */
{
__asm__ __volatile__("mfence" : : : "memory");
}
-
#define AO_HAVE_nop_full
#else
: "memory");
return result;
}
-
#define AO_HAVE_fetch_and_add_full
AO_INLINE unsigned char
: "memory");
return result;
}
-
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
: "memory");
return result;
}
-
#define AO_HAVE_short_fetch_and_add_full
/* Really only works for 486 and later */
__asm__ __volatile__ ("lock; orl %1, %0" :
"=m" (*p) : "r" (incr), "m" (*p) : "memory");
}
-
#define AO_HAVE_or_full
AO_INLINE AO_TS_VAL_t
: "0"(0xff), "m"(*addr) : "memory");
return (AO_TS_VAL_t)oldval;
}
-
#define AO_HAVE_test_and_set_full
/* Returns nonzero if the comparison succeeded. */
return (int)result;
# endif
}
-
#define AO_HAVE_compare_and_swap_full
/* Returns nonzero if the comparison succeeded. */
#endif
return (int) result;
}
-
#define AO_HAVE_compare_double_and_swap_double_full
#include "../ao_t_is_int.h"
/* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
__asm__ __volatile__("mfence" : : : "memory");
}
-
#define AO_HAVE_nop_full
/* As far as we can tell, the lfence and sfence instructions are not */
: "memory");
return result;
}
-
#define AO_HAVE_fetch_and_add_full
AO_INLINE unsigned char
: "memory");
return result;
}
-
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
: "memory");
return result;
}
-
#define AO_HAVE_short_fetch_and_add_full
AO_INLINE unsigned int
: "memory");
return result;
}
-
#define AO_HAVE_int_fetch_and_add_full
AO_INLINE void
__asm__ __volatile__ ("lock; orq %1, %0" :
"=m" (*p) : "r" (incr), "m" (*p) : "memory");
}
-
#define AO_HAVE_or_full
AO_INLINE AO_TS_VAL_t
: "0"(0xff), "m"(*addr) : "memory");
return (AO_TS_VAL_t)oldval;
}
-
#define AO_HAVE_test_and_set_full
/* Returns nonzero if the comparison succeeded. */
return (int) result;
# endif
}
-
#define AO_HAVE_compare_and_swap_full
#ifdef AO_CMPXCHG16B_AVAILABLE
+
/* NEC LE-IT: older AMD Opterons are missing this instruction.
* On these machines SIGILL will be thrown.
* Define AO_WEAK_DOUBLE_CAS_EMULATION to have an emulated
return (int) result;
}
#define AO_HAVE_compare_double_and_swap_double_full
+
#else
/* this one provides spinlock based emulation of CAS implemented in */
/* atomic_ops.c. We probably do not want to do this here, since it is */
AO_t old_val1, AO_t old_val2,
AO_t new_val1, AO_t new_val2)
{
- return AO_compare_double_and_swap_double_emulation(addr,
- old_val1, old_val2,
- new_val1, new_val2);
+ return AO_compare_double_and_swap_double_emulation(addr, old_val1, old_val2,
+ new_val1, new_val2);
}
#define AO_HAVE_compare_double_and_swap_double_full
#endif /* AO_WEAK_DOUBLE_CAS_EMULATION */
+
#endif /* AO_CMPXCHG16B_AVAILABLE */
pthread_mutex_lock(&AO_pt_lock);
pthread_mutex_unlock(&AO_pt_lock);
}
-
#define AO_HAVE_nop_full
AO_INLINE AO_t
pthread_mutex_unlock(&AO_pt_lock);
return result;
}
-
#define AO_HAVE_load_full
AO_INLINE void
*addr = val;
pthread_mutex_unlock(&AO_pt_lock);
}
-
#define AO_HAVE_store_full
AO_INLINE unsigned char
pthread_mutex_unlock(&AO_pt_lock);
return result;
}
-
#define AO_HAVE_char_load_full
AO_INLINE void
*addr = val;
pthread_mutex_unlock(&AO_pt_lock);
}
-
#define AO_HAVE_char_store_full
AO_INLINE unsigned short
pthread_mutex_unlock(&AO_pt_lock);
return result;
}
-
#define AO_HAVE_short_load_full
AO_INLINE void
*addr = val;
pthread_mutex_unlock(&AO_pt_lock);
}
-
#define AO_HAVE_short_store_full
AO_INLINE unsigned int
pthread_mutex_unlock(&AO_pt_lock);
return result;
}
-
#define AO_HAVE_int_load_full
AO_INLINE void
*addr = val;
pthread_mutex_unlock(&AO_pt_lock);
}
-
#define AO_HAVE_int_store_full
AO_INLINE AO_TS_VAL_t
assert(result == AO_TS_SET || result == AO_TS_CLEAR);
return result;
}
-
#define AO_HAVE_test_and_set_full
AO_INLINE AO_t
pthread_mutex_unlock(&AO_pt_lock);
return tmp;
}
-
#define AO_HAVE_fetch_and_add_full
AO_INLINE unsigned char
pthread_mutex_unlock(&AO_pt_lock);
return tmp;
}
-
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
pthread_mutex_unlock(&AO_pt_lock);
return tmp;
}
-
#define AO_HAVE_short_fetch_and_add_full
AO_INLINE unsigned int
pthread_mutex_unlock(&AO_pt_lock);
return tmp;
}
-
#define AO_HAVE_int_fetch_and_add_full
AO_INLINE void
*p = (tmp | incr);
pthread_mutex_unlock(&AO_pt_lock);
}
-
#define AO_HAVE_or_full
AO_INLINE int
pthread_mutex_unlock(&AO_pt_lock);
return 0;
}
-
#define AO_HAVE_compare_and_swap_full
/* Unlike real architectures, we define both double-width CAS variants. */
AO_t AO_val1;
AO_t AO_val2;
} AO_double_t;
-
#define AO_HAVE_double_t
AO_INLINE int
pthread_mutex_unlock(&AO_pt_lock);
return 0;
}
-
#define AO_HAVE_compare_double_and_swap_double_full
AO_INLINE int
pthread_mutex_unlock(&AO_pt_lock);
return 0;
}
-
#define AO_HAVE_compare_and_swap_double_full
/* We can't use hardware loads and stores, since they don't */
__ldcw (a, ret);
return ret;
}
+#define AO_HAVE_test_and_set_full
AO_INLINE void
AO_pa_clear(volatile AO_TS_t * addr)
*(volatile unsigned int *)a = 1;
}
#define AO_CLEAR(addr) AO_pa_clear(addr)
-
-#define AO_HAVE_test_and_set_full
return _Asm_fetchadd(AO_T_FASIZE, _SEM_REL, p, 1,
_LDHINT_NONE, _UP_MEM_FENCE);
}
-
#define AO_HAVE_fetch_and_add1_release
AO_INLINE AO_t
return _Asm_fetchadd(AO_T_FASIZE, _SEM_ACQ, p, -1,
_LDHINT_NONE, _DOWN_MEM_FENCE);
}
-
#define AO_HAVE_fetch_and_sub1_acquire
AO_INLINE AO_t
return _Asm_fetchadd(AO_T_FASIZE, _SEM_REL, p, -1,
_LDHINT_NONE, _UP_MEM_FENCE);
}
-
#define AO_HAVE_fetch_and_sub1_release
AO_INLINE int
new_val, _LDHINT_NONE, _DOWN_MEM_FENCE);
return (oldval == old);
}
-
#define AO_HAVE_compare_and_swap_acquire
AO_INLINE int
/* Hopefully the compiler knows not to reorder the above two? */
return (oldval == old);
}
-
#define AO_HAVE_compare_and_swap_release
AO_INLINE int
new_val, _LDHINT_NONE, _DOWN_MEM_FENCE);
return (oldval == old);
}
-
#define AO_HAVE_char_compare_and_swap_acquire
AO_INLINE int
/* Hopefully the compiler knows not to reorder the above two? */
return (oldval == old);
}
-
#define AO_HAVE_char_compare_and_swap_release
AO_INLINE int
new_val, _LDHINT_NONE, _DOWN_MEM_FENCE);
return (oldval == old);
}
-
#define AO_HAVE_short_compare_and_swap_acquire
AO_INLINE int
/* Hopefully the compiler knows not to reorder the above two? */
return (oldval == old);
}
-
#define AO_HAVE_short_compare_and_swap_release
#ifndef __LP64__
AO_lwsync();
return result;
}
-
#define AO_HAVE_load_acquire
AO_INLINE void
AO_lwsync();
*addr = value;
}
-
-#define AO_HAVE_load_acquire
+#define AO_HAVE_store_release
/* This is similar to the code in the garbage collector. Deleting */
/* this and having it synthesized from compare_and_swap would probably */
AO_test_and_set(volatile AO_TS_t *addr) {
# error FIXME Implement me
}
-
#define AO_HAVE_test_and_set*/
AO_INLINE AO_TS_VAL_t
AO_lwsync();
return result;
}
-
#define AO_HAVE_test_and_set_acquire
AO_INLINE AO_TS_VAL_t
AO_lwsync();
return AO_test_and_set(addr);
}
-
#define AO_HAVE_test_and_set_release
AO_INLINE AO_TS_VAL_t
AO_lwsync();
return result;
}
-
#define AO_HAVE_test_and_set_full
/*AO_INLINE AO_t
AO_compare_and_swap(volatile AO_t *addr, AO_t old, AO_t new_val) {
# error FIXME Implement me
}
-
#define AO_HAVE_compare_and_swap*/
AO_INLINE AO_t
AO_lwsync();
return result;
}
-
#define AO_HAVE_compare_and_swap_acquire
AO_INLINE AO_t
AO_lwsync();
return AO_compare_and_swap(addr, old, new_val);
}
-
#define AO_HAVE_compare_and_swap_release
AO_INLINE AO_t
AO_lwsync();
return result;
}
-
#define AO_HAVE_compare_and_swap_full
/* FIXME: We should also implement fetch_and_add and or primitives */
* SOFTWARE.
*/
-/*
- * Definitions for architectures on which loads and stores of unsigned int are
- * atomic for all legal alignments.
- */
+/* Definitions for architectures on which loads and stores of unsigned */
+/* int are atomic for all legal alignments. */
AO_INLINE unsigned int
AO_int_load(const volatile unsigned int *addr)
/* volatile adds barrier semantics. */
return (*(unsigned int *)addr);
}
-
#define AO_HAVE_int_load
AO_INLINE void
assert(((size_t)addr & (sizeof(unsigned int) - 1)) == 0);
(*(unsigned int *)addr) = new_val;
}
-
#define AO_HAVE_int_store
* SOFTWARE.
*/
-/*
- * Definitions for architectures on which loads and stores of unsigned int are
- * atomic for all legal alignments.
- */
+/* Definitions for architectures on which loads and stores of unsigned */
+/* int are atomic for all legal alignments. */
AO_INLINE unsigned int
AO_int_load(const volatile unsigned int *addr)
/* volatile adds barrier semantics. */
return (*(const unsigned int *)addr);
}
-
#define AO_HAVE_int_load
AO_INLINE void
{
(*(unsigned int *)addr) = new_val;
}
-
#define AO_HAVE_int_store
return _InterlockedExchangeAdd((LONG AO_INTERLOCKED_VOLATILE *)p,
(LONG)incr);
}
-
#define AO_HAVE_fetch_and_add_full
AO_INLINE AO_t
{
return _InterlockedIncrement((LONG AO_INTERLOCKED_VOLATILE *)p) - 1;
}
-
#define AO_HAVE_fetch_and_add1_full
AO_INLINE AO_t
{
return _InterlockedDecrement((LONG AO_INTERLOCKED_VOLATILE *)p) + 1;
}
-
#define AO_HAVE_fetch_and_sub1_full
#ifdef AO_ASSUME_WINDOWS98
== (LONG)old;
# endif
}
-
# define AO_HAVE_compare_and_swap_full
#endif /* AO_ASSUME_WINDOWS98 */
{
__asm { mfence }
}
-
#define AO_HAVE_nop_full
#else
}
/* Ignore possible "missing return value" warning here. */
}
-
#define AO_HAVE_test_and_set_full
#ifdef _WIN64
#endif
#ifdef AO_ASSUME_VISTA
+
/* NEC LE-IT: whenever we run on a pentium class machine we have that
* certain function */
}
#define AO_HAVE_double_compare_and_swap_full
#endif /* __cplusplus */
+
#endif /* AO_ASSUME_VISTA */
#include "../ao_t_is_int.h"
{
return _InterlockedExchangeAdd64((LONGLONG volatile *)p, (LONGLONG)incr);
}
-
#define AO_HAVE_fetch_and_add_full
AO_INLINE AO_t
{
return _InterlockedIncrement64((LONGLONG volatile *)p) - 1;
}
-
#define AO_HAVE_fetch_and_add1_full
AO_INLINE AO_t
{
return _InterlockedDecrement64((LONGLONG volatile *)p) + 1;
}
-
#define AO_HAVE_fetch_and_sub1_full
AO_INLINE int
(LONGLONG)new_val, (LONGLONG)old)
== (LONGLONG)old;
}
-
#define AO_HAVE_compare_and_swap_full
/* As far as we can tell, the lfence and sfence instructions are not */
/* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
__asm { mfence }
}
-
#define AO_HAVE_nop_full
AO_INLINE AO_TS_VAL_t
xchg byte ptr [rbx],al ;
}
}
-
#define AO_HAVE_test_and_set_full
#endif /* AO_ASM_X64_AVAILABLE */
#ifdef AO_CMPXCHG16B_AVAILABLE
-
/* AO_compare_double_and_swap_double_full needs implementation for Win64.
* Also see ../gcc/x86_64.h for partial old Opteron workaround.
*/
return _InterlockedCompareExchange128((volatile __int64 *)addr,
new_val2 /* high */, new_val1 /* low */, comparandResult);
}
-
# define AO_HAVE_compare_double_and_swap_double_full
# elif defined(AO_ASM_X64_AVAILABLE)
-
- /* If there is no intrinsic _InterlockedCompareExchange128 then we
- * need basically what's given below.
- */
-
+ /* If there is no intrinsic _InterlockedCompareExchange128 then we */
+ /* need basically what's given below. */
AO_INLINE int
AO_compare_double_and_swap_double_full(volatile AO_double_t *addr,
AO_t old_val1, AO_t old_val2,
setz rax ;
}
}
-
# define AO_HAVE_compare_double_and_swap_double_full
-
# endif /* _MSC_VER >= 1500 || AO_ASM_X64_AVAILABLE */
#endif /* AO_CMPXCHG16B_AVAILABLE */
* SOFTWARE.
*/
-/*
- * These are common definitions for architectures that provide processor
- * ordered memory operations.
- */
+/* These are common definitions for architectures that provide */
+/* processor ordered memory operations. */
#include "ordered_except_wr.h"
{
AO_compiler_barrier();
}
-
#define AO_HAVE_nop_full
/* sfence according to Intel docs. Pentium 3 and up. */
/* Unnecessary for cached accesses? */
}
-
#define AO_HAVE_NOP_WRITE
#if defined(AO_HAVE_store)
-
-AO_INLINE void
-AO_store_write(volatile AO_t *addr, AO_t val)
-{
- AO_compiler_barrier();
- AO_store(addr, val);
-}
+ AO_INLINE void
+ AO_store_write(volatile AO_t *addr, AO_t val)
+ {
+ AO_compiler_barrier();
+ AO_store(addr, val);
+ }
# define AO_HAVE_store_write
# define AO_store_release(addr, val) AO_store_write(addr, val)
# define AO_HAVE_store_release
-
#endif /* AO_HAVE_store */
#if defined(AO_HAVE_char_store)
-
-AO_INLINE void
-AO_char_store_write(volatile unsigned char *addr, unsigned char val)
-{
- AO_compiler_barrier();
- AO_char_store(addr, val);
-}
+ AO_INLINE void
+ AO_char_store_write(volatile unsigned char *addr, unsigned char val)
+ {
+ AO_compiler_barrier();
+ AO_char_store(addr, val);
+ }
# define AO_HAVE_char_store_write
# define AO_char_store_release(addr, val) AO_char_store_write(addr, val)
# define AO_HAVE_char_store_release
-
#endif /* AO_HAVE_char_store */
#if defined(AO_HAVE_short_store)
-
-AO_INLINE void
-AO_short_store_write(volatile unsigned short *addr, unsigned short val)
-{
- AO_compiler_barrier();
- AO_short_store(addr, val);
-}
+ AO_INLINE void
+ AO_short_store_write(volatile unsigned short *addr, unsigned short val)
+ {
+ AO_compiler_barrier();
+ AO_short_store(addr, val);
+ }
# define AO_HAVE_short_store_write
# define AO_short_store_release(addr, val) AO_short_store_write(addr, val)
# define AO_HAVE_short_store_release
-
#endif /* AO_HAVE_short_store */
#if defined(AO_HAVE_int_store)
-
-AO_INLINE void
-AO_int_store_write(volatile unsigned int *addr, unsigned int val)
-{
- AO_compiler_barrier();
- AO_int_store(addr, val);
-}
+ AO_INLINE void
+ AO_int_store_write(volatile unsigned int *addr, unsigned int val)
+ {
+ AO_compiler_barrier();
+ AO_int_store(addr, val);
+ }
# define AO_HAVE_int_store_write
# define AO_int_store_release(addr, val) AO_int_store_write(addr, val)
# define AO_HAVE_int_store_release
-
#endif /* AO_HAVE_int_store */
{
AO_compiler_barrier();
}
-
#define AO_HAVE_NOP_READ
#ifdef AO_HAVE_load
-
-AO_INLINE AO_t
-AO_load_read(const volatile AO_t *addr)
-{
- AO_t result = AO_load(addr);
- AO_compiler_barrier();
- return result;
-}
-#define AO_HAVE_load_read
-
-#define AO_load_acquire(addr) AO_load_read(addr)
-#define AO_HAVE_load_acquire
-
+ AO_INLINE AO_t
+ AO_load_read(const volatile AO_t *addr)
+ {
+ AO_t result = AO_load(addr);
+ AO_compiler_barrier();
+ return result;
+ }
+# define AO_HAVE_load_read
+
+# define AO_load_acquire(addr) AO_load_read(addr)
+# define AO_HAVE_load_acquire
#endif /* AO_HAVE_load */
#ifdef AO_HAVE_char_load
-
-AO_INLINE AO_t
-AO_char_load_read(const volatile unsigned char *addr)
-{
- AO_t result = AO_char_load(addr);
- AO_compiler_barrier();
- return result;
-}
-#define AO_HAVE_char_load_read
-
-#define AO_char_load_acquire(addr) AO_char_load_read(addr)
-#define AO_HAVE_char_load_acquire
-
+ AO_INLINE AO_t
+ AO_char_load_read(const volatile unsigned char *addr)
+ {
+ AO_t result = AO_char_load(addr);
+ AO_compiler_barrier();
+ return result;
+ }
+# define AO_HAVE_char_load_read
+
+# define AO_char_load_acquire(addr) AO_char_load_read(addr)
+# define AO_HAVE_char_load_acquire
#endif /* AO_HAVE_char_load */
#ifdef AO_HAVE_short_load
-
-AO_INLINE AO_t
-AO_short_load_read(const volatile unsigned short *addr)
-{
- AO_t result = AO_short_load(addr);
- AO_compiler_barrier();
- return result;
-}
-#define AO_HAVE_short_load_read
-
-#define AO_short_load_acquire(addr) AO_short_load_read(addr)
-#define AO_HAVE_short_load_acquire
-
+ AO_INLINE AO_t
+ AO_short_load_read(const volatile unsigned short *addr)
+ {
+ AO_t result = AO_short_load(addr);
+ AO_compiler_barrier();
+ return result;
+ }
+# define AO_HAVE_short_load_read
+
+# define AO_short_load_acquire(addr) AO_short_load_read(addr)
+# define AO_HAVE_short_load_acquire
#endif /* AO_HAVE_short_load */
#ifdef AO_HAVE_int_load
-
-AO_INLINE AO_t
-AO_int_load_read(const volatile unsigned int *addr)
-{
- AO_t result = AO_int_load(addr);
- AO_compiler_barrier();
- return result;
-}
-#define AO_HAVE_int_load_read
-
-#define AO_int_load_acquire(addr) AO_int_load_read(addr)
-#define AO_HAVE_int_load_acquire
-
+ AO_INLINE AO_t
+ AO_int_load_read(const volatile unsigned int *addr)
+ {
+ AO_t result = AO_int_load(addr);
+ AO_compiler_barrier();
+ return result;
+ }
+# define AO_HAVE_int_load_read
+
+# define AO_int_load_acquire(addr) AO_int_load_read(addr)
+# define AO_HAVE_int_load_acquire
#endif /* AO_HAVE_int_load */
/* volatile adds barrier semantics. */
return (*(unsigned short *)addr);
}
-
#define AO_HAVE_short_load
AO_INLINE void
assert(((size_t)addr & (sizeof(unsigned short) - 1)) == 0);
(*(unsigned short *)addr) = new_val;
}
-
#define AO_HAVE_short_store
/* volatile adds barrier semantics. */
return (*(const unsigned short *)addr);
}
-
#define AO_HAVE_short_load
AO_INLINE void
{
(*(unsigned short *)addr) = new_val;
}
-
#define AO_HAVE_short_store
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * SOFTWARE.
*/
#include "../all_atomic_load_store.h"
/* Real SPARC code uses TSO: */
#include "../ordered_except_wr.h"
-/* Test_and_set location is just a byte. */
+/* Test_and_set location is just a byte. */
#include "../test_and_set_t_is_char.h"
extern AO_TS_VAL_t
AO_test_and_set_full(volatile AO_TS_t *addr);
-/* Implemented in separate .S file, for now. */
-
+/* Implemented in separate .S file, for now. */
#define AO_HAVE_test_and_set_full
-/* FIXME: Like the gcc version, this needs to be extended for V8 */
-/* and V9. */
+/* FIXME: Like the gcc version, this needs to be extended for V8 */
+/* and V9. */
{
__asm__ __volatile__ ("mfence" : : : "memory");
}
-
#define AO_HAVE_nop_full
#else
: "memory");
return result;
}
-
#define AO_HAVE_fetch_and_add_full
AO_INLINE unsigned char
: "memory");
return result;
}
-
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
: "memory");
return result;
}
-
#define AO_HAVE_short_fetch_and_add_full
/* Really only works for 486 and later */
"=m" (*p) : "r" (incr) /* , "m" (*p) */
: "memory");
}
-
#define AO_HAVE_or_full
AO_INLINE AO_TS_VAL_t
: "memory");
return (AO_TS_VAL_t)oldval;
}
-
#define AO_HAVE_test_and_set_full
/* Returns nonzero if the comparison succeeded. */
: "r" (new_val), "a"(old) : "memory");
return (int) result;
}
-
#define AO_HAVE_compare_and_swap_full
#if 0
#endif
return (int) result;
}
-
#define AO_HAVE_compare_double_and_swap_double_full
#endif
/* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */
__asm__ __volatile__ ("mfence" : : : "memory");
}
-
#define AO_HAVE_nop_full
/* As far as we can tell, the lfence and sfence instructions are not */
: "memory");
return result;
}
-
#define AO_HAVE_fetch_and_add_full
AO_INLINE unsigned char
: "memory");
return result;
}
-
#define AO_HAVE_char_fetch_and_add_full
AO_INLINE unsigned short
: "memory");
return result;
}
-
#define AO_HAVE_short_fetch_and_add_full
AO_INLINE unsigned int
: "memory");
return result;
}
-
#define AO_HAVE_int_fetch_and_add_full
AO_INLINE void
"=m" (*p) : "r" (incr) /* , "m" (*p) */
: "memory");
}
-
#define AO_HAVE_or_full
AO_INLINE AO_TS_VAL_t
: "memory");
return (AO_TS_VAL_t)oldval;
}
-
#define AO_HAVE_test_and_set_full
/* Returns nonzero if the comparison succeeded. */
: "r" (new_val), "a"(old) : "memory");
return (int) result;
}
-
#define AO_HAVE_compare_and_swap_full
#ifdef AO_CMPXCHG16B_AVAILABLE
/*
* Copyright (c) 2004 Hewlett-Packard Development Company, L.P.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
- *
+ *
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
+ * SOFTWARE.
+ */
/*
* These are common definitions for architectures on which test_and_set
#define AO_TS_SET AO_BYTE_TS_set
#define AO_CHAR_TS_T 1
-
-
-