* sysdeps/powerpc/bits/atomic.h
authorUlrich Drepper <drepper@redhat.com>
Mon, 26 Mar 2007 20:16:39 +0000 (20:16 +0000)
committerUlrich Drepper <drepper@redhat.com>
Mon, 26 Mar 2007 20:16:39 +0000 (20:16 +0000)
[!MUTEX_HINT_ACQ]: Define MUTEX_HINT_ACQ.
[!MUTEX_HINT_REL]: Define MUTEX_HINT_REL.
(__arch_compare_and_exchange_val_32_acq): Add MUTEX_HINT_ACQ to lwarx.
(__arch_compare_and_exchange_val_32_rel): Add MUTEX_HINT_REL to lwarx.
(__arch_atomic_exchange_val_32_acq): Add MUTEX_HINT_ACQ to lwarx.
(__arch_atomic_exchange_rel_32_rel): Add MUTEX_HINT_REL to lwarx.
* sysdeps/powerpc/powerpc32/bits/atomic.h [_ARCH_PWR6 || _ARCH_PWR6X]:
Define MUTEX_HINT_ACQ as ",1" and MUTEX_HINT_REL as ",0".
(__arch_compare_and_exchange_bool_32_acq): Add MUTEX_HINT_ACQ to lwarx.
(__arch_compare_and_exchange_bool_32_rel): Add MUTEX_HINT_REL to lwarx.
* sysdeps/powerpc/powerpc64/bits/atomic.h [_ARCH_PWR6 || _ARCH_PWR6D]:
Define MUTEX_HINT_ACQ as ",1" and MUTEX_HINT_REL as ",0".
(__arch_compare_and_exchange_bool_32_acq): Add MUTEX_HINT_ACQ to lwarx.
(__arch_compare_and_exchange_bool_32_rel): Add MUTEX_HINT_REL to lwarx.
(__arch_compare_and_exchange_bool_64_acq): Add MUTEX_HINT_ACQ to lwarx.
(__arch_compare_and_exchange_bool_64_rel): Add MUTEX_HINT_REL to lwarx.
(__arch_compare_and_exchange_val_64_acq): Add MUTEX_HINT_ACQ to lwarx.
(__arch_compare_and_exchange_val_64_rel): Add MUTEX_HINT_REL to lwarx.
(__arch_atomic_exchange_val_64_acq): Add MUTEX_HINT_ACQ to lwarx.
(__arch_atomic_exchange_rel_64_rel): Add MUTEX_HINT_REL to lwarx.

2007-03-20  Jakub Jelinek  <jakub@redhat.com>

ChangeLog
nptl/sysdeps/unix/sysv/linux/powerpc/lowlevellock.h
sysdeps/powerpc/bits/atomic.h
sysdeps/powerpc/powerpc32/bits/atomic.h
sysdeps/powerpc/powerpc64/bits/atomic.h

index 1a5f369..5774d4c 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,4 +1,28 @@
-007-03-20  Jakub Jelinek  <jakub@redhat.com>
+2007-03-19  Steven Munroe  <sjmunroe@us.ibm.com>
+
+       * sysdeps/powerpc/bits/atomic.h
+       [!MUTEX_HINT_ACQ]: Define MUTEX_HINT_ACQ.
+       [!MUTEX_HINT_REL]: Define MUTEX_HINT_REL.
+       (__arch_compare_and_exchange_val_32_acq): Add MUTEX_HINT_ACQ to lwarx.
+       (__arch_compare_and_exchange_val_32_rel): Add MUTEX_HINT_REL to lwarx.
+       (__arch_atomic_exchange_val_32_acq): Add MUTEX_HINT_ACQ to lwarx.
+       (__arch_atomic_exchange_rel_32_rel): Add MUTEX_HINT_REL to lwarx.
+       * sysdeps/powerpc/powerpc32/bits/atomic.h [_ARCH_PWR6 || _ARCH_PWR6X]:
+       Define MUTEX_HINT_ACQ as ",1" and MUTEX_HINT_REL as ",0".
+       (__arch_compare_and_exchange_bool_32_acq): Add MUTEX_HINT_ACQ to lwarx.
+       (__arch_compare_and_exchange_bool_32_rel): Add MUTEX_HINT_REL to lwarx.
+       * sysdeps/powerpc/powerpc64/bits/atomic.h [_ARCH_PWR6 || _ARCH_PWR6D]:
+       Define MUTEX_HINT_ACQ as ",1" and MUTEX_HINT_REL as ",0".
+       (__arch_compare_and_exchange_bool_32_acq): Add MUTEX_HINT_ACQ to lwarx.
+       (__arch_compare_and_exchange_bool_32_rel): Add MUTEX_HINT_REL to lwarx.
+       (__arch_compare_and_exchange_bool_64_acq): Add MUTEX_HINT_ACQ to lwarx.
+       (__arch_compare_and_exchange_bool_64_rel): Add MUTEX_HINT_REL to lwarx.
+       (__arch_compare_and_exchange_val_64_acq): Add MUTEX_HINT_ACQ to lwarx.
+       (__arch_compare_and_exchange_val_64_rel): Add MUTEX_HINT_REL to lwarx.
+       (__arch_atomic_exchange_val_64_acq): Add MUTEX_HINT_ACQ to lwarx.
+       (__arch_atomic_exchange_rel_64_rel): Add MUTEX_HINT_REL to lwarx.
+
+2007-03-20  Jakub Jelinek  <jakub@redhat.com>
 
        * sysdeps/unix/sysv/linux/powerpc/libc-start.c
        (__cache_line_size): Define the variable here.  Add
index 0136b97..239f4dd 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004, 2006 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2006, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
@@ -25,7 +25,6 @@
 #include <bits/pthreadtypes.h>
 #include <atomic.h>
 
-
 #ifndef __NR_futex
 # define __NR_futex            221
 #endif
 /* Set *futex to ID if it is 0, atomically.  Returns the old value */
 #define __lll_robust_trylock(futex, id) \
   ({ int __val;                                                                      \
-     __asm __volatile ("1:     lwarx   %0,0,%2\n"                            \
+     __asm __volatile ("1:     lwarx   %0,0,%2" MUTEX_HINT_ACQ "\n"          \
                       "        cmpwi   0,%0,0\n"                             \
                       "        bne     2f\n"                                 \
                       "        stwcx.  %3,0,%2\n"                            \
index 31f27e9..d71f64e 100644 (file)
@@ -70,6 +70,13 @@ typedef uintmax_t uatomic_max_t;
 # endif
 #endif
 
+#ifndef MUTEX_HINT_ACQ
+# define MUTEX_HINT_ACQ
+#endif
+#ifndef MUTEX_HINT_REL
+# define MUTEX_HINT_REL
+#endif
+
 #define atomic_full_barrier()  __asm ("sync" ::: "memory")
 #define atomic_write_barrier() __asm ("eieio" ::: "memory")
 
@@ -78,7 +85,7 @@ typedef uintmax_t uatomic_max_t;
       __typeof (*(mem)) __tmp;                                               \
       __typeof (mem)  __memp = (mem);                                        \
       __asm __volatile (                                                     \
-                       "1:     lwarx   %0,0,%1\n"                            \
+                       "1:     lwarx   %0,0,%1" MUTEX_HINT_ACQ "\n"          \
                        "       cmpw    %0,%2\n"                              \
                        "       bne     2f\n"                                 \
                        "       stwcx.  %3,0,%1\n"                            \
@@ -95,7 +102,7 @@ typedef uintmax_t uatomic_max_t;
       __typeof (*(mem)) __tmp;                                               \
       __typeof (mem)  __memp = (mem);                                        \
       __asm __volatile (__ARCH_REL_INSTR "\n"                                \
-                       "1:     lwarx   %0,0,%1\n"                            \
+                       "1:     lwarx   %0,0,%1" MUTEX_HINT_REL "\n"          \
                        "       cmpw    %0,%2\n"                              \
                        "       bne     2f\n"                                 \
                        "       stwcx.  %3,0,%1\n"                            \
@@ -111,7 +118,7 @@ typedef uintmax_t uatomic_max_t;
   ({                                                                         \
     __typeof (*mem) __val;                                                   \
     __asm __volatile (                                                       \
-                     "1:       lwarx   %0,0,%2\n"                            \
+                     "1:       lwarx   %0,0,%2" MUTEX_HINT_ACQ "\n"          \
                      "         stwcx.  %3,0,%2\n"                            \
                      "         bne-    1b\n"                                 \
                      "   " __ARCH_ACQ_INSTR                                  \
@@ -125,7 +132,7 @@ typedef uintmax_t uatomic_max_t;
   ({                                                                         \
     __typeof (*mem) __val;                                                   \
     __asm __volatile (__ARCH_REL_INSTR "\n"                                  \
-                     "1:       lwarx   %0,0,%2\n"                            \
+                     "1:       lwarx   %0,0,%2" MUTEX_HINT_REL "\n"          \
                      "         stwcx.  %3,0,%2\n"                            \
                      "         bne-    1b"                                   \
                      : "=&r" (__val), "=m" (*mem)                            \
index 6fcc669..62cf991 100644 (file)
@@ -1,5 +1,5 @@
 /* Atomic operations.  PowerPC32 version.
-   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
+/*  POWER6 adds a "Mutex Hint" to the Load and Reserve instruction.
+    This is a hint to the hardware to expect additional updates adjacent
+    to the lock word or not.  If we are acquiring a Mutex, the hint
+    should be true. Otherwise we releasing a Mutex or doing a simple
+    atomic operation.  In that case we don't expect addtional updates
+    adjacent to the lock word after the Store Conditional and the hint
+    should be false.  */
+    
+#if defined _ARCH_PWR6 || defined _ARCH_PWR6X
+# define MUTEX_HINT_ACQ        ",1"
+# define MUTEX_HINT_REL        ",0"
+#else
+# define MUTEX_HINT_ACQ
+# define MUTEX_HINT_REL
+#endif
+
 /*
  * The 32-bit exchange_bool is different on powerpc64 because the subf
  * does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
  * (a load word and zero (high 32) form).  So powerpc64 has a slightly
  * different version in sysdeps/powerpc/powerpc64/bits/atomic.h.
  */
-# define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval)         \
+#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval)         \
 ({                                                                           \
   unsigned int __tmp;                                                        \
   __asm __volatile (                                                         \
-                   "1: lwarx   %0,0,%1\n"                                    \
+                   "1: lwarx   %0,0,%1" MUTEX_HINT_ACQ "\n"                  \
                    "   subf.   %0,%2,%0\n"                                   \
                    "   bne     2f\n"                                         \
                    "   stwcx.  %3,0,%1\n"                                    \
   __tmp != 0;                                                                \
 })
 
-# define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval)        \
+#define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval)         \
 ({                                                                           \
   unsigned int __tmp;                                                        \
   __asm __volatile (__ARCH_REL_INSTR "\n"                                    \
-                   "1: lwarx   %0,0,%1\n"                                    \
+                   "1: lwarx   %0,0,%1" MUTEX_HINT_REL "\n"                  \
                    "   subf.   %0,%2,%0\n"                                   \
                    "   bne     2f\n"                                         \
                    "   stwcx.  %3,0,%1\n"                                    \
 /* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
    load and reserve (ldarx) and store conditional (stdcx.) instructions.
    So for powerpc32 we stub out the 64-bit forms.  */
-# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
   (abort (), 0)
 
-# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
   (abort (), (__typeof (*mem)) 0)
 
-# define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
   (abort (), 0)
 
-# define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
+#define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
   (abort (), (__typeof (*mem)) 0)
 
-# define __arch_atomic_exchange_64_acq(mem, value) \
+#define __arch_atomic_exchange_64_acq(mem, value) \
     ({ abort (); (*mem) = (value); })
 
-# define __arch_atomic_exchange_64_rel(mem, value) \
+#define __arch_atomic_exchange_64_rel(mem, value) \
     ({ abort (); (*mem) = (value); })
 
-# define __arch_atomic_exchange_and_add_64(mem, value) \
+#define __arch_atomic_exchange_and_add_64(mem, value) \
     ({ abort (); (*mem) = (value); })
 
-# define __arch_atomic_increment_val_64(mem) \
+#define __arch_atomic_increment_val_64(mem) \
     ({ abort (); (*mem)++; })
 
-# define __arch_atomic_decrement_val_64(mem) \
+#define __arch_atomic_decrement_val_64(mem) \
     ({ abort (); (*mem)--; })
 
-# define __arch_atomic_decrement_if_positive_64(mem) \
+#define __arch_atomic_decrement_if_positive_64(mem) \
     ({ abort (); (*mem)--; })
 
 #ifdef _ARCH_PWR4
index e46dc1e..3465bb3 100644 (file)
@@ -1,5 +1,5 @@
 /* Atomic operations.  PowerPC64 version.
-   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+   Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Paul Mackerras <paulus@au.ibm.com>, 2003.
 
    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
    02111-1307 USA.  */
 
+/*  POWER6 adds a "Mutex Hint" to the Load and Reserve instruction.
+    This is a hint to the hardware to expect additional updates adjacent
+    to the lock word or not.  If we are acquiring a Mutex, the hint
+    should be true. Otherwise we releasing a Mutex or doing a simple
+    atomic operation.  In that case we don't expect addtional updates
+    adjacent to the lock word after the Store Conditional and the hint
+    should be false.  */
+
+#if defined _ARCH_PWR6 || defined _ARCH_PWR6X
+# define MUTEX_HINT_ACQ        ",1"
+# define MUTEX_HINT_REL        ",0"
+#else
+# define MUTEX_HINT_ACQ
+# define MUTEX_HINT_REL
+#endif
+
 /* The 32-bit exchange_bool is different on powerpc64 because the subf
    does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
    (a load word and zero (high 32) form) load.
    In powerpc64 register values are 64-bit by default,  including oldval.
    The value in old val unknown sign extension, lwarx loads the 32-bit
    value as unsigned.  So we explicitly clear the high 32 bits in oldval.  */
-# define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
 ({                                                                           \
   unsigned int __tmp, __tmp2;                                                \
   __asm __volatile ("   clrldi  %1,%1,32\n"                                  \
-                   "1: lwarx   %0,0,%2\n"                                    \
+                   "1: lwarx   %0,0,%2" MUTEX_HINT_ACQ "\n"                  \
                    "   subf.   %0,%1,%0\n"                                   \
                    "   bne     2f\n"                                         \
                    "   stwcx.  %4,0,%2\n"                                    \
   __tmp != 0;                                                                \
 })
 
-# define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
 ({                                                                           \
   unsigned int __tmp, __tmp2;                                                \
   __asm __volatile (__ARCH_REL_INSTR "\n"                                    \
                    "   clrldi  %1,%1,32\n"                                   \
-                   "1: lwarx   %0,0,%2\n"                                    \
+                   "1: lwarx   %0,0,%2" MUTEX_HINT_REL "\n"                  \
                    "   subf.   %0,%1,%0\n"                                   \
                    "   bne     2f\n"                                         \
                    "   stwcx.  %4,0,%2\n"                                    \
  * and Store doubleword conditional indexed (stdcx) instructions.  So here
  * we define the 64-bit forms.
  */
-# define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
 ({                                                                           \
   unsigned long        __tmp;                                                        \
   __asm __volatile (                                                         \
-                   "1: ldarx   %0,0,%1\n"                                    \
+                   "1: ldarx   %0,0,%1" MUTEX_HINT_ACQ "\n"                  \
                    "   subf.   %0,%2,%0\n"                                   \
                    "   bne     2f\n"                                         \
                    "   stdcx.  %3,0,%1\n"                                    \
   __tmp != 0;                                                                \
 })
 
-# define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
+#define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
 ({                                                                           \
   unsigned long        __tmp;                                                        \
   __asm __volatile (__ARCH_REL_INSTR "\n"                                    \
-                   "1: ldarx   %0,0,%1\n"                                    \
+                   "1: ldarx   %0,0,%2" MUTEX_HINT_REL "\n"                  \
                    "   subf.   %0,%2,%0\n"                                   \
                    "   bne     2f\n"                                         \
                    "   stdcx.  %3,0,%1\n"                                    \
       __typeof (*(mem)) __tmp;                                               \
       __typeof (mem)  __memp = (mem);                                        \
       __asm __volatile (                                                     \
-                       "1:     ldarx   %0,0,%1\n"                            \
+                       "1:     ldarx   %0,0,%1" MUTEX_HINT_ACQ "\n"          \
                        "       cmpd    %0,%2\n"                              \
                        "       bne     2f\n"                                 \
                        "       stdcx.  %3,0,%1\n"                            \
       __typeof (*(mem)) __tmp;                                               \
       __typeof (mem)  __memp = (mem);                                        \
       __asm __volatile (__ARCH_REL_INSTR "\n"                                \
-                       "1:     ldarx   %0,0,%1\n"                            \
+                       "1:     ldarx   %0,0,%1" MUTEX_HINT_REL "\n"          \
                        "       cmpd    %0,%2\n"                              \
                        "       bne     2f\n"                                 \
                        "       stdcx.  %3,0,%1\n"                            \
       __tmp;                                                                 \
   })
 
-# define __arch_atomic_exchange_64_acq(mem, value) \
+#define __arch_atomic_exchange_64_acq(mem, value) \
     ({                                                                       \
       __typeof (*mem) __val;                                                 \
       __asm __volatile (__ARCH_REL_INSTR "\n"                                \
-                       "1:     ldarx   %0,0,%2\n"                            \
+                       "1:     ldarx   %0,0,%2" MUTEX_HINT_ACQ "\n"          \
                        "       stdcx.  %3,0,%2\n"                            \
                        "       bne-    1b\n"                                 \
                  " " __ARCH_ACQ_INSTR                                        \
       __val;                                                                 \
     })
 
-# define __arch_atomic_exchange_64_rel(mem, value) \
+#define __arch_atomic_exchange_64_rel(mem, value) \
     ({                                                                       \
       __typeof (*mem) __val;                                                 \
       __asm __volatile (__ARCH_REL_INSTR "\n"                                \
-                       "1:     ldarx   %0,0,%2\n"                            \
+                       "1:     ldarx   %0,0,%2" MUTEX_HINT_REL "\n"          \
                        "       stdcx.  %3,0,%2\n"                            \
                        "       bne-    1b"                                   \
                        : "=&r" (__val), "=m" (*mem)                          \
       __val;                                                                 \
     })
 
-# define __arch_atomic_exchange_and_add_64(mem, value) \
+#define __arch_atomic_exchange_and_add_64(mem, value) \
     ({                                                                       \
       __typeof (*mem) __val, __tmp;                                          \
       __asm __volatile ("1:    ldarx   %0,0,%3\n"                            \
       __val;                                                                 \
     })
 
-# define __arch_atomic_increment_val_64(mem) \
+#define __arch_atomic_increment_val_64(mem) \
     ({                                                                       \
       __typeof (*(mem)) __val;                                               \
       __asm __volatile ("1:    ldarx   %0,0,%2\n"                            \
       __val;                                                                 \
     })
 
-# define __arch_atomic_decrement_val_64(mem) \
+#define __arch_atomic_decrement_val_64(mem) \
     ({                                                                       \
       __typeof (*(mem)) __val;                                               \
       __asm __volatile ("1:    ldarx   %0,0,%2\n"                            \
       __val;                                                                 \
     })
 
-# define __arch_atomic_decrement_if_positive_64(mem) \
+#define __arch_atomic_decrement_if_positive_64(mem) \
   ({ int __val, __tmp;                                                       \
      __asm __volatile ("1:     ldarx   %0,0,%3\n"                            \
                       "        cmpdi   0,%0,0\n"                             \
 /*
  * All powerpc64 processors support the new "light weight"  sync (lwsync).
  */
-# define atomic_read_barrier() __asm ("lwsync" ::: "memory")
+#define atomic_read_barrier()  __asm ("lwsync" ::: "memory")
 /*
  * "light weight" sync can also be used for the release barrier.
  */
-# ifndef UP
-#  define __ARCH_REL_INSTR     "lwsync"
-# endif
+#ifndef UP
+# define __ARCH_REL_INSTR      "lwsync"
+#endif
 
 /*
  * Include the rest of the atomic ops macros which are common to both