From 3e195d9371956446c8182af812723300010f0bb8 Mon Sep 17 00:00:00 2001 From: Roland McGrath Date: Tue, 25 Mar 2003 22:40:21 +0000 Subject: [PATCH] 2003-03-25 Roland McGrath * sysdeps/powerpc/bits/atomic.h (__arch_atomic_exchange_32): New macro. (__arch_atomic_exchange_64): New macro. (atomic_exchange): Use them. (__arch_atomic_exchange_and_add_32): New macro. (__arch_atomic_exchange_and_add_64): New macro. (atomic_exchange_and_add): Use them. Original patch from Steven Munroe . --- sysdeps/powerpc/bits/atomic.h | 99 ++++++++++++++++++++++++++++++++----------- 1 file changed, 75 insertions(+), 24 deletions(-) diff --git a/sysdeps/powerpc/bits/atomic.h b/sysdeps/powerpc/bits/atomic.h index 49f1c14..e0f2bd4 100644 --- a/sysdeps/powerpc/bits/atomic.h +++ b/sysdeps/powerpc/bits/atomic.h @@ -102,38 +102,90 @@ typedef uintmax_t uatomic_max_t; __tmp != 0; \ }) +# define __arch_atomic_exchange_64(mem, value) \ + ({ \ + __typeof (*mem) __val; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: ldarx %0,0,%2\n" \ + " stdcx. %3,0,%2\n" \ + " bne- 1b" \ + : "=&r" (__val), "=m" (*mem) \ + : "r" (mem), "r" (value), "1" (*mem) \ + : "cr0"); \ + __val; \ + }) + +# define __arch_atomic_exchange_and_add_64(mem, value) \ + ({ \ + __typeof (*mem) __val, __tmp; \ + __asm __volatile ("1: ldarx %0,0,%3\n" \ + " addi %1,%0,%4\n" \ + " stdcx. %1,0,%3\n" \ + " bne- 1b" \ + : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ + : "r" (mem), "I" (value), "2" (*mem) \ + : "cr0"); \ + __val; \ + }) + #else /* powerpc32 */ # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \ (abort (), 0) + +# define __arch_atomic_exchange_64(mem, value) \ + ({ abort (); (*mem) = (value); }) +# define __arch_atomic_exchange_and_add_64(mem, value) \ + ({ abort (); (*mem) = (value); }) #endif +#define __arch_atomic_exchange_32(mem, value) \ + ({ \ + __typeof (*mem) __val; \ + __asm __volatile (__ARCH_REL_INSTR "\n" \ + "1: lwarx %0,0,%2\n" \ + " stwcx. %3,0,%2\n" \ + " bne- 1b" \ + : "=&r" (__val), "=m" (*mem) \ + : "r" (mem), "r" (value), "1" (*mem) \ + : "cr0"); \ + __val; \ + }) -#define atomic_exchange(mem, value) \ - ({ if (sizeof (*mem) != 4) \ - abort (); \ - int __val; \ - __asm __volatile (__ARCH_REL_INSTR "\n" \ - "1: lwarx %0,0,%2\n" \ - " stwcx. %3,0,%2\n" \ - " bne- 1b" \ - : "=&r" (__val), "=m" (*mem) \ - : "r" (mem), "r" (value), "1" (*mem) \ - : "cr0"); \ - __val; }) +#define __arch_atomic_exchange_and_add_32(mem, value) \ + ({ \ + __typeof (*mem) __val, __tmp; \ + __asm __volatile ("1: lwarx %0,0,%3\n" \ + " addi %1,%0,%4\n" \ + " stwcx. %1,0,%3\n" \ + " bne- 1b" \ + : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ + : "r" (mem), "I" (value), "2" (*mem) \ + : "cr0"); \ + __val; \ + }) +#define atomic_exchange(mem, value) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_exchange_32 ((mem), (value)); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_exchange_64 ((mem), (value)); \ + else \ + abort (); \ + __result; \ + }) -#define atomic_exchange_and_add(mem, value) \ - ({ if (sizeof (*mem) != 4) \ +#define atomic_exchange_and_add(mem, value) \ + ({ \ + __typeof (*(mem)) __result; \ + if (sizeof (*mem) == 4) \ + __result = __arch_atomic_exchange_and_add_32 ((mem), (value)); \ + else if (sizeof (*mem) == 8) \ + __result = __arch_atomic_exchange_and_add_64 ((mem), (value)); \ + else \ abort (); \ - int __val, __tmp; \ - __asm __volatile ("1: lwarx %0,0,%3\n" \ - " addi %1,%0,%4\n" \ - " stwcx. %1,0,%3\n" \ - " bne- 1b" \ - : "=&b" (__val), "=&r" (__tmp), "=m" (*mem) \ - : "r" (mem), "I" (value), "2" (*mem) \ - : "cr0"); \ - __val; \ + __result; \ }) @@ -156,7 +208,6 @@ typedef uintmax_t uatomic_max_t; }) - #define atomic_full_barrier() __asm ("sync" ::: "memory") #ifdef __powerpc64__ # define atomic_read_barrier() __asm ("lwsync" ::: "memory") -- 2.7.4