* sysdeps/alpha/Makefile: New file.
authorRichard Henderson <rth@redhat.com>
Tue, 13 Jan 2004 09:36:22 +0000 (09:36 +0000)
committerRichard Henderson <rth@redhat.com>
Tue, 13 Jan 2004 09:36:22 +0000 (09:36 +0000)
        * sysdeps/alpha/tcb-offsets.sym: New file.
        * sysdeps/unix/sysv/linux/alpha/sysdep-cancel.h (SINGLE_THREAD_P):
        Use MULTIPLE_THREADS_OFFSET to implement !libpthread !libc version.

        * sysdeps/unix/sysv/linux/alpha/lowlevellock.h: Rewrite based
        on powerpc version.

sysdeps/alpha/nptl/Makefile [new file with mode: 0644]
sysdeps/alpha/nptl/tcb-offsets.sym [new file with mode: 0644]
sysdeps/unix/sysv/linux/alpha/nptl/lowlevellock.h
sysdeps/unix/sysv/linux/alpha/nptl/sysdep-cancel.h

diff --git a/sysdeps/alpha/nptl/Makefile b/sysdeps/alpha/nptl/Makefile
new file mode 100644 (file)
index 0000000..88c106b
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright (C) 2003 Free Software Foundation, Inc.
+# This file is part of the GNU C Library.
+#
+# The GNU C Library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# The GNU C Library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with the GNU C Library; if not, write to the Free
+# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+# 02111-1307 USA.
+
+ifeq ($(subdir),csu)
+gen-as-const-headers += tcb-offsets.sym
+endif
diff --git a/sysdeps/alpha/nptl/tcb-offsets.sym b/sysdeps/alpha/nptl/tcb-offsets.sym
new file mode 100644 (file)
index 0000000..3f6433d
--- /dev/null
@@ -0,0 +1,12 @@
+#include <sysdep.h>
+#include <tls.h>
+
+--
+
+-- Abuse tls.h macros to derive offsets relative to the thread register.
+# define __builtin_thread_pointer()  ((void *) 0)
+# define thread_offsetof(mem)       ((void *) &THREAD_SELF->mem - (void *) 0)
+
+#if TLS_MULTIPLE_THREADS_IN_TCB
+MULTIPLE_THREADS_OFFSET                thread_offsetof (header.multiple_threads)
+#endif
index cc054f9..9d125e0 100644 (file)
@@ -23,6 +23,7 @@
 #include <sys/param.h>
 #include <bits/pthreadtypes.h>
 #include <atomic.h>
+#include <sysdep.h>
 
 
 #define __NR_futex             394
     INTERNAL_SYSCALL_ERROR_P (__ret, __err)? -__ret : __ret;                 \
   })
 
-/* Set *futex to 1 if it is 0, atomically.  Returns the old value */
-#define __lll_trylock(futex) \
-  ({ int __oldval, __temp;                                             \
-     __asm __volatile (                                                        \
-       "1:     ldl_l   %[__oldval], %[__mem]\n"                        \
-       "       lda     %[__temp], 1\n"                                 \
-       "       bne     %[__oldval], 2f\n"                              \
-       "       stl_c   %[__temp], %[__mem]\n"                          \
-       "       beq     %[__temp], 1b\n"                                \
-               __MB                                                    \
-       "2:"                                                            \
-       : [__oldval] "=&r" (__oldval),                                  \
-         [__temp] "=&r" (__temp)                                       \
-       : [__mem] "m" (*(futex))                                        \
-       : "memory");                                                    \
-     __oldval;                                                         \
-  })
 
-#define lll_mutex_trylock(lock)        __lll_trylock (&(lock))
+static inline int __attribute__((always_inline))
+__lll_mutex_trylock(int *futex)
+{
+  return atomic_compare_and_exchange_val_acq (futex, 1, 0) != 0;
+}
+#define lll_mutex_trylock(lock)        __lll_mutex_trylock (&(lock))
 
 
-extern void __lll_lock_wait (int *futex, int val) attribute_hidden;
+extern void __lll_lock_wait (int *futex) attribute_hidden;
 
-#define lll_mutex_lock(lock) \
-  (void) ({                                                            \
-    int *__futex = &(lock);                                            \
-    int __val = atomic_exchange_and_add (__futex, 1);                  \
-    atomic_full_barrier();                                             \
-    if (__builtin_expect (__val != 0, 0))                              \
-      __lll_lock_wait (__futex, __val);                                        \
-  })
+static inline void __attribute__((always_inline))
+__lll_mutex_lock(int *futex)
+{
+  if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
+    __lll_lock_wait (futex);
+}
+#define lll_mutex_lock(futex) __lll_mutex_lock (&(futex))
 
-#define lll_mutex_cond_lock(lock) \
-  (void) ({                                                            \
-    int *__futex = &(lock);                                            \
-    int __val = atomic_exchange_and_add (__futex, 2);                  \
-    atomic_full_barrier();                                             \
-    if (__builtin_expect (__val != 0, 0))                              \
-      /* Note, the val + 1 is kind of ugly here.  __lll_lock_wait will \
-        add 1 again.  But we added 2 to the futex value so this is the \
-        right value which will be passed to the kernel.  */            \
-      __lll_lock_wait (__futex, __val + 1);                            \
-  })
 
-extern int __lll_timedlock_wait
-       (int *futex, int val, const struct timespec *) attribute_hidden;
+static inline void __attribute__ ((always_inline))
+__lll_mutex_cond_lock (int *futex)
+{
+  if (atomic_compare_and_exchange_bool_acq (futex, 2, 0) != 0)
+    __lll_lock_wait (futex);
+}
+#define lll_mutex_cond_lock(futex) __lll_mutex_cond_lock (&(futex))
 
-#define lll_mutex_timedlock(lock, abstime) \
-  ({ int *__futex = &(lock);                                           \
-     int __val = atomic_exchange_and_add (__futex, 1);                 \
-     atomic_full_barrier();                                            \
-     if (__builtin_expect (__val != 0, 0))                             \
-       __val = __lll_timedlock_wait (__futex, __val, (abstime));       \
-     __val;                                                            \
-  })
 
-#define lll_mutex_unlock(lock) \
-  ((void) ({                                                           \
-    int *__futex = &(lock), __val;                                     \
-    atomic_write_barrier();                                            \
-    __val = atomic_exchange_rel (__futex, 0);                          \
-    if (__builtin_expect (__val > 1, 0))                               \
-      lll_futex_wake (__futex, 1);                                     \
-  }))
-
-#define lll_mutex_unlock_force(lock) \
-  ((void) ({                                                           \
-    int *__futex = &(lock);                                            \
-    atomic_write_barrier();                                            \
-    *__futex = 0;                                                      \
-    atomic_full_barrier();                                             \
-    lll_futex_wake (__futex, 1);                                       \
-  }))
+extern int __lll_timedlock_wait (int *futex, const struct timespec *)
+       attribute_hidden;
+
+static inline int __attribute__ ((always_inline))
+__lll_mutex_timedlock (int *futex, const struct timespec *abstime)
+{
+  int result = 0;
+  if (atomic_compare_and_exchange_bool_acq (futex, 1, 0) != 0)
+    result = __lll_timedlock_wait (futex, abstime);
+  return result;
+}
+#define lll_mutex_timedlock(futex, abstime) \
+  __lll_mutex_timedlock (&(futex), abstime)
+
+
+static inline void __attribute__ ((always_inline))
+__lll_mutex_unlock (int *futex)
+{
+  int val = atomic_exchange_rel (futex, 0);
+  if (__builtin_expect (val > 1, 0))
+    lll_futex_wake (futex, 1);
+}
+#define lll_mutex_unlock(futex) __lll_mutex_unlock(&(futex))
+
+
+static inline void __attribute__ ((always_inline))
+__lll_mutex_unlock_force (int *futex)
+{
+  (void) atomic_exchange_rel (futex, 0);
+  lll_futex_wake (futex, 1);
+}
+#define lll_mutex_unlock_force(futex) __lll_mutex_unlock_force(&(futex))
+
 
 #define lll_mutex_islocked(futex) \
   (futex != 0)
@@ -175,21 +165,21 @@ extern int lll_unlock_wake_cb (int *__futex) attribute_hidden;
    thread ID while the clone is running and is reset to zero
    afterwards. */
 #define lll_wait_tid(tid) \
-  do {                                                                       \
-    __typeof (tid) __tid;                                                    \
-    while ((__tid = (tid)) != 0)                                             \
-      lll_futex_wait (&(tid), __tid);                                        \
+  do {                                 \
+    __typeof (tid) __tid;              \
+    while ((__tid = (tid)) != 0)       \
+      lll_futex_wait (&(tid), __tid);  \
   } while (0)
 
 extern int __lll_timedwait_tid (int *, const struct timespec *)
      attribute_hidden;
 
 #define lll_timedwait_tid(tid, abstime) \
-  ({                                                                         \
-    int __res = 0;                                                           \
-    if ((tid) != 0)                                                          \
-      __res = __lll_timedwait_tid (&(tid), (abstime));                       \
-    __res;                                                                   \
+  ({                                                   \
+    int __res = 0;                                     \
+    if ((tid) != 0)                                    \
+      __res = __lll_timedwait_tid (&(tid), (abstime)); \
+    __res;                                             \
   })
 
 
index 1b27e27..3b08b22 100644 (file)
@@ -132,26 +132,28 @@ __LABEL($syscall_error)                                           \
 #  define CDISABLE     jsr ra, __local_disable_asynccancel; ldgp ra, 0(gp)
 # endif
 
-# if defined IS_IN_libpthread || !defined NOT_IN_libc
-#  ifndef __ASSEMBLER__
+#endif
+
+#if defined IS_IN_libpthread || !defined NOT_IN_libc
+# ifndef __ASSEMBLER__
 extern int __local_multiple_threads attribute_hidden;
-#   define SINGLE_THREAD_P \
+#  define SINGLE_THREAD_P \
        __builtin_expect (__local_multiple_threads == 0, 1)
-#  elif defined(PIC)
-#   define SINGLE_THREAD_P(reg)  ldl reg, __local_multiple_threads(gp) !gprel
-#  else
-#   define SINGLE_THREAD_P(reg)                                        \
+# elif defined(PIC)
+#  define SINGLE_THREAD_P(reg)  ldl reg, __local_multiple_threads(gp) !gprel
+# else
+#  define SINGLE_THREAD_P(reg)                                 \
        ldah    reg, __local_multiple_threads(gp) !gprelhigh;   \
        ldl     reg, __local_multiple_threads(reg) !gprellow
-#  endif
-# else
-#  ifndef __ASSEMBLER__
-#   define SINGLE_THREAD_P \
+# endif
+#else
+# ifndef __ASSEMBLER__
+#  define SINGLE_THREAD_P \
        __builtin_expect (THREAD_GETMEM (THREAD_SELF, \
                                   header.multiple_threads) == 0, 1)
-#  else
-#   error Not done
-#  endif
+# else
+#  define SINGLE_THREAD_P(reg)                                 \
+       call_pal PAL_rduniq;                                    \
+       ldl reg, MULTIPLE_THREADS_OFFSET($0)
 # endif
-
 #endif