From afce5aeeb99c12f40438bf75536323a5cd81b0b7 Mon Sep 17 00:00:00 2001 From: ivmai Date: Wed, 1 Jun 2011 13:57:18 +0000 Subject: [PATCH] 2011-06-01 Ivan Maidanski * src/atomic_ops/sysdeps/sunc/x86.h (AO_test_and_set_full): Specify "%b0" (instead of "%0") in "xchg" instruction (to workaround a bug in Sun C 5.11). * src/atomic_ops/sysdeps/sunc/x86_64.h (AO_test_and_set_full): Ditto. --- ChangeLog | 8 ++++++++ src/atomic_ops/sysdeps/sunc/x86.h | 21 ++++++++++----------- src/atomic_ops/sysdeps/sunc/x86_64.h | 35 +++++++++++++++++------------------ 3 files changed, 35 insertions(+), 29 deletions(-) diff --git a/ChangeLog b/ChangeLog index e241328..ad7e863 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,5 +1,13 @@ 2011-06-01 Ivan Maidanski + * src/atomic_ops/sysdeps/sunc/x86.h (AO_test_and_set_full): + Specify "%b0" (instead of "%0") in "xchg" instruction (to + workaround a bug in Sun C 5.11). + * src/atomic_ops/sysdeps/sunc/x86_64.h (AO_test_and_set_full): + Ditto. + +2011-06-01 Ivan Maidanski + * src/atomic_ops_malloc.c (AO_malloc_enable_mmap): Workaround for Sun C compiler (call "release" variant of AO_store). diff --git a/src/atomic_ops/sysdeps/sunc/x86.h b/src/atomic_ops/sysdeps/sunc/x86.h index 5086e84..e1e054a 100644 --- a/src/atomic_ops/sysdeps/sunc/x86.h +++ b/src/atomic_ops/sysdeps/sunc/x86.h @@ -37,7 +37,7 @@ AO_INLINE void AO_nop_full(void) { - __asm__ __volatile__("mfence" : : : "memory"); + __asm__ __volatile__ ("mfence" : : : "memory"); } #define AO_HAVE_nop_full @@ -105,14 +105,14 @@ AO_or_full (volatile AO_t *p, AO_t incr) #define AO_HAVE_or_full AO_INLINE AO_TS_VAL_t -AO_test_and_set_full(volatile AO_TS_t *addr) +AO_test_and_set_full (volatile AO_TS_t *addr) { AO_TS_t oldval; /* Note: the "xchg" instruction does not need a "lock" prefix */ - __asm__ __volatile__("xchg %0, %1" - : "=q"(oldval), "=m"(*addr) - : "0"(0xff) /* , "m"(*addr) */ - : "memory"); + __asm__ __volatile__ ("xchg %b0, %1" + : "=q"(oldval), "=m"(*addr) + : "0"(0xff) /* , "m"(*addr) */ + : "memory"); return (AO_TS_VAL_t)oldval; } @@ -120,13 +120,12 @@ AO_test_and_set_full(volatile AO_TS_t *addr) /* Returns nonzero if the comparison succeeded. */ AO_INLINE int -AO_compare_and_swap_full(volatile AO_t *addr, - AO_t old, AO_t new_val) +AO_compare_and_swap_full (volatile AO_t *addr, AO_t old, AO_t new_val) { char result; - __asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1" - : "=m"(*addr), "=a"(result) - : "r" (new_val), "a"(old) : "memory"); + __asm__ __volatile__ ("lock; cmpxchgl %2, %0; setz %1" + : "=m"(*addr), "=a"(result) + : "r" (new_val), "a"(old) : "memory"); return (int) result; } diff --git a/src/atomic_ops/sysdeps/sunc/x86_64.h b/src/atomic_ops/sysdeps/sunc/x86_64.h index ac5f51d..4d47c2f 100644 --- a/src/atomic_ops/sysdeps/sunc/x86_64.h +++ b/src/atomic_ops/sysdeps/sunc/x86_64.h @@ -35,7 +35,7 @@ AO_INLINE void AO_nop_full(void) { /* Note: "mfence" (SSE2) is supported on all x86_64/amd64 chips. */ - __asm__ __volatile__("mfence" : : : "memory"); + __asm__ __volatile__ ("mfence" : : : "memory"); } #define AO_HAVE_nop_full @@ -106,14 +106,14 @@ AO_or_full (volatile AO_t *p, AO_t incr) #define AO_HAVE_or_full AO_INLINE AO_TS_VAL_t -AO_test_and_set_full(volatile AO_TS_t *addr) +AO_test_and_set_full (volatile AO_TS_t *addr) { AO_TS_t oldval; /* Note: the "xchg" instruction does not need a "lock" prefix */ - __asm__ __volatile__("xchg %0, %1" - : "=q"(oldval), "=m"(*addr) - : "0"(0xff) /* , "m"(*addr) */ - : "memory"); + __asm__ __volatile__ ("xchg %b0, %1" + : "=q"(oldval), "=m"(*addr) + : "0"(0xff) /* , "m"(*addr) */ + : "memory"); return (AO_TS_VAL_t)oldval; } @@ -121,13 +121,12 @@ AO_test_and_set_full(volatile AO_TS_t *addr) /* Returns nonzero if the comparison succeeded. */ AO_INLINE int -AO_compare_and_swap_full(volatile AO_t *addr, - AO_t old, AO_t new_val) +AO_compare_and_swap_full (volatile AO_t *addr, AO_t old, AO_t new_val) { char result; - __asm__ __volatile__("lock; cmpxchgq %2, %0; setz %1" - : "=m"(*addr), "=a"(result) - : "r" (new_val), "a"(old) : "memory"); + __asm__ __volatile__ ("lock; cmpxchgq %2, %0; setz %1" + : "=m"(*addr), "=a"(result) + : "r" (new_val), "a"(old) : "memory"); return (int) result; } @@ -144,15 +143,15 @@ AO_compare_and_swap_full(volatile AO_t *addr, * Hoewever both are clearly useful in certain cases. */ AO_INLINE int -AO_compare_double_and_swap_double_full(volatile AO_double_t *addr, - AO_t old_val1, AO_t old_val2, - AO_t new_val1, AO_t new_val2) +AO_compare_double_and_swap_double_full (volatile AO_double_t *addr, + AO_t old_val1, AO_t old_val2, + AO_t new_val1, AO_t new_val2) { char result; - __asm__ __volatile__("lock; cmpxchg16b %0; setz %1" - : "=m"(*addr), "=a"(result) - : "m"(*addr), "d" (old_val2), "a" (old_val1), - "c" (new_val2), "b" (new_val1) : "memory"); + __asm__ __volatile__ ("lock; cmpxchg16b %0; setz %1" + : "=m"(*addr), "=a"(result) + : "m"(*addr), "d" (old_val2), "a" (old_val1), + "c" (new_val2), "b" (new_val1) : "memory"); return (int) result; } #define AO_HAVE_compare_double_and_swap_double_full -- 2.7.4