* sysdeps/unix/sysv/linux/not-cancel.h (__openat_not_cancel,
authorUlrich Drepper <drepper@redhat.com>
Wed, 15 Feb 2006 17:20:33 +0000 (17:20 +0000)
committerUlrich Drepper <drepper@redhat.com>
Wed, 15 Feb 2006 17:20:33 +0000 (17:20 +0000)
__openat64_not_cancel): Remove prototypes.
(__openat_nocancel, __openat64_nocancel): New prototypes or defines.
(openat_not_cancel, openat_not_cancel_3, openat64_not_cancel,
openat64_not_cancel_3): Use them.

28 files changed:
ChangeLog
nptl/ChangeLog
nptl/Makefile
nptl/descr.h
nptl/pthreadP.h
nptl/pthread_create.c
nptl/pthread_mutex_consistent.c
nptl/pthread_mutex_destroy.c
nptl/pthread_mutex_lock.c
nptl/pthread_mutex_timedlock.c
nptl/pthread_mutex_trylock.c
nptl/pthread_mutex_unlock.c
nptl/sysdeps/unix/sysv/linux/Makefile
nptl/sysdeps/unix/sysv/linux/alpha/bits/pthreadtypes.h
nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S [new file with mode: 0644]
nptl/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S [new file with mode: 0644]
nptl/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S [new file with mode: 0644]
nptl/sysdeps/unix/sysv/linux/i386/lowlevellock.h
nptl/sysdeps/unix/sysv/linux/ia64/bits/pthreadtypes.h
nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.sym [new file with mode: 0644]
nptl/sysdeps/unix/sysv/linux/powerpc/bits/pthreadtypes.h
nptl/sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c
nptl/sysdeps/unix/sysv/linux/s390/bits/pthreadtypes.h
nptl/sysdeps/unix/sysv/linux/sh/bits/pthreadtypes.h
nptl/sysdeps/unix/sysv/linux/sparc/bits/pthreadtypes.h
nptl/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S [new file with mode: 0644]
sysdeps/unix/sysv/linux/not-cancel.h

index a0c4c17..ba21c34 100644 (file)
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,11 @@
+2006-02-13  Jakub Jelinek  <jakub@redhat.com>
+
+       * sysdeps/unix/sysv/linux/not-cancel.h (__openat_not_cancel,
+       __openat64_not_cancel): Remove prototypes.
+       (__openat_nocancel, __openat64_nocancel): New prototypes or defines.
+       (openat_not_cancel, openat_not_cancel_3, openat64_not_cancel,
+       openat64_not_cancel_3): Use them.
+
 2006-02-12  Ulrich Drepper  <drepper@redhat.com>
 
        * io/ftw.c: Start using *at functions.
index 073b364..c280d8d 100644 (file)
@@ -1,3 +1,30 @@
+2006-02-15  Ulrich Drepper  <drepper@redhat.com>
+
+       * pthreadp.h: Define PTHREAD_MUTEX_INCONSISTENT instead of
+       PTHREAD_MUTEX_OWNERDEAD.
+       (PTHREAD_MUTEX_ROBUST_PRIVATE_NP): Define as 16, not 256.
+       Define FUTEX_WAITERS, FUTEX_OWNER_DIED, FUTEX_TID_MASK.
+       * Makefile (libpthread-routines): Add lowlevelrobustlock.
+       * pthread_create.c (start_thread): Very much simplify robust_list loop.
+       * pthread_mutex_consistent.c: Inconsistent mutex have __owner now set
+       to PTHREAD_MUTEX_INCONSISTENT.
+       * pthread_mutex_destroy.c: Allow destroying of inconsistent mutexes.
+       * pthread_mutex_lock.c: Reimplement robust mutex handling.
+       * pthread_mutex_trylock.c: Likewise.
+       * pthread_mutex_timedlock.c: Likewise.
+       * pthread_mutex_unlock.c: Likewise.
+       * sysdeps/unix/sysv/linux/pthread_mutex_cond_lock.c: Likewise.
+       * sysdeps/unix/sysv/linux/Makefile (gen-as-const-headers): Add
+       lowlevelrobustlock.sym.
+       * sysdeps/unix/sysv/linux/lowlevelrobustlock.sym: New file.
+       * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Add lll_robust_mutex_*
+       definitions.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
+       * sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: New file.
+       * sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S: New file.
+       * sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S: New file.
+       * sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: New file.
+
 2006-02-12  Ulrich Drepper  <drepper@redhat.com>
 
        * allocatestack.c (allocate_stack): Initialize robust_list.
index d7b3aaf..e484c6a 100644 (file)
@@ -100,7 +100,7 @@ libpthread-routines = init vars events version \
                      cleanup_defer_compat unwind \
                      pt-longjmp pt-cleanup\
                      cancellation \
-                     lowlevellock \
+                     lowlevellock lowlevelrobustlock \
                      pt-vfork \
                      ptw-write ptw-read ptw-close ptw-fcntl ptw-accept \
                      ptw-connect ptw-recv ptw-recvfrom ptw-recvmsg ptw-send \
index 6984138..d5491c1 100644 (file)
@@ -166,7 +166,7 @@ struct pthread
   do {                                                                       \
     __pthread_slist_t *runp = THREAD_GETMEM (THREAD_SELF, robust_list.__next);\
     if (runp == &mutex->__data.__list)                                       \
-      THREAD_SETMEM (THREAD_SELF, robust_list, runp->__next);                \
+      THREAD_SETMEM (THREAD_SELF, robust_list.__next, runp->__next);         \
     else                                                                     \
       {                                                                              \
        while (runp->__next != &mutex->__data.__list)                         \
index 61b7176..77d8f5a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -52,7 +52,7 @@
 
 
 /* Magic cookie representing robust mutex with dead owner.  */
-#define PTHREAD_MUTEX_OWNERDEAD                INT_MAX
+#define PTHREAD_MUTEX_INCONSISTENT     INT_MAX
 /* Magic cookie representing not recoverable robust mutex.  */
 #define PTHREAD_MUTEX_NOTRECOVERABLE   (INT_MAX - 1)
 
@@ -60,7 +60,7 @@
 /* Internal mutex type value.  */
 enum
 {
-  PTHREAD_MUTEX_ROBUST_PRIVATE_NP = 256,
+  PTHREAD_MUTEX_ROBUST_PRIVATE_NP = 16,
   PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP
   = PTHREAD_MUTEX_ROBUST_PRIVATE_NP | PTHREAD_MUTEX_RECURSIVE_NP,
   PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP
@@ -77,6 +77,12 @@ enum
   (PTHREAD_MUTEXATTR_FLAG_ROBUST | PTHREAD_MUTEXATTR_FLAG_PSHARED)
 
 
+/* Bits used in robust mutex implementation.  */
+#define FUTEX_WAITERS          0x80000000
+#define FUTEX_OWNER_DIED       0x40000000
+#define FUTEX_TID_MASK         0x1fffffff
+
+
 /* Internal variables.  */
 
 
index b1253b2..f3d90ec 100644 (file)
@@ -324,17 +324,12 @@ start_thread (void *arg)
            ((char *) robust - offsetof (struct __pthread_mutex_s, __list));
          robust = robust->__next;
 
-         assert (lll_mutex_islocked (this->__lock));
-         this->__count = 0;
-         --this->__nusers;
-         assert (this->__owner != PTHREAD_MUTEX_NOTRECOVERABLE);
-         this->__owner = PTHREAD_MUTEX_OWNERDEAD;
          this->__list.__next = NULL;
 #ifdef __PTHREAD_MUTEX_HAVE_PREV
          this->__list.__prev = NULL;
 #endif
 
-         lll_mutex_unlock (this->__lock);
+         lll_robust_mutex_dead (this->__lock);
        }
       while (robust != &pd->robust_list);
 
index 2edfe8a..0cfe972 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2005.
 
@@ -27,10 +27,10 @@ pthread_mutex_consistent_np (mutex)
 {
   /* Test whether this is a robust mutex with a dead owner.  */
   if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_PRIVATE_NP) == 0
-      || mutex->__data.__owner != -THREAD_GETMEM (THREAD_SELF, tid))
+      || mutex->__data.__owner != PTHREAD_MUTEX_INCONSISTENT)
     return EINVAL;
 
-  mutex->__data.__owner = -mutex->__data.__owner;
+  mutex->__data.__owner = THREAD_GETMEM (THREAD_SELF, tid);
 
   return 0;
 }
index 2bf76a9..19a647a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -26,9 +26,17 @@ __pthread_mutex_destroy (mutex)
      pthread_mutex_t *mutex;
 {
   if (mutex->__data.__nusers != 0)
-    return EBUSY;
+    {
+      if ((mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_PRIVATE_NP) != 0
+         && (mutex->__data.__lock & FUTEX_OWNER_DIED) != 0
+         && mutex->__data.__nusers == 1)
+       goto dead_robust_mutex;
+
+      return EBUSY;
+    }
 
   /* Set to an invalid value.  */
+ dead_robust_mutex:
   mutex->__data.__kind = -1;
 
   return 0;
index 420711a..dd22567 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -27,6 +27,7 @@
 #ifndef LLL_MUTEX_LOCK
 # define LLL_MUTEX_LOCK(mutex) lll_mutex_lock (mutex)
 # define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_trylock (mutex)
+# define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_lock (mutex, id)
 #endif
 
 
@@ -36,6 +37,7 @@ __pthread_mutex_lock (mutex)
 {
   assert (sizeof (mutex->__size) >= sizeof (mutex->__data));
 
+  int oldval;
   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
 
   int retval = 0;
@@ -107,60 +109,83 @@ __pthread_mutex_lock (mutex)
       break;
 
     case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
-      /* Check whether we already hold the mutex.  */
-      if (abs (mutex->__data.__owner) == id)
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
+      oldval = mutex->__data.__lock;
+      do
        {
-         /* Just bump the counter.  */
-         if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
-           /* Overflow of the counter.  */
-           return EAGAIN;
-
-         ++mutex->__data.__count;
-
-         return 0;
-       }
-
-      /* We have to get the mutex.  */
-      LLL_MUTEX_LOCK (mutex->__data.__lock);
+         if ((oldval & FUTEX_OWNER_DIED) != 0)
+           {
+             /* The previous owner died.  Try locking the mutex.  */
+             int newval;
+             while ((newval
+                     = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                            id, oldval))
+                    != oldval)
+               {
+                 if ((newval & FUTEX_OWNER_DIED) == 0)
+                   goto normal;
+                 oldval = newval;
+               }
 
-      mutex->__data.__count = 1;
+             /* We got the mutex.  */
+             mutex->__data.__count = 1;
+             /* But it is inconsistent unless marked otherwise.  */
+             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+             ENQUEUE_MUTEX (mutex);
+
+             /* Note that we deliberately exit here.  If we fall
+                through to the end of the function __nusers would be
+                incremented which is not correct because the old
+                owner has to be discounted.  If we are not supposed
+                to increment __nusers we actually have to decrement
+                it here.  */
+#ifdef NO_INCR
+             --mutex->__data.__nusers;
+#endif
 
-      goto robust;
+             return EOWNERDEAD;
+           }
 
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
-      /* Check whether we already hold the mutex.  */
-      if (__builtin_expect (abs (mutex->__data.__owner) == id, 0))
-       return EDEADLK;
+       normal:
+         /* Check whether we already hold the mutex.  */
+         if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
+                               == id, 0))
+           {
+             if (mutex->__data.__kind
+                 == PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
+               return EDEADLK;
 
-      /* FALLTHROUGH */
+             if (mutex->__data.__kind
+                 == PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
+               {
+                 /* Just bump the counter.  */
+                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                   /* Overflow of the counter.  */
+                   return EAGAIN;
 
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
-      LLL_MUTEX_LOCK (mutex->__data.__lock);
+                 ++mutex->__data.__count;
 
-    robust:
-      if (__builtin_expect (mutex->__data.__owner
-                           == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
-       {
-         /* This mutex is now not recoverable.  */
-         mutex->__data.__count = 0;
-         lll_mutex_unlock (mutex->__data.__lock);
-         return ENOTRECOVERABLE;
-       }
+                 return 0;
+               }
+           }
 
-      /* This mutex is either healthy or we can try to recover it.  */
-      assert (mutex->__data.__owner == 0
-             || mutex->__data.__owner == PTHREAD_MUTEX_OWNERDEAD);
+         oldval = LLL_ROBUST_MUTEX_LOCK (mutex->__data.__lock, id);
 
-      if (__builtin_expect (mutex->__data.__owner
-                           == PTHREAD_MUTEX_OWNERDEAD, 0))
-       {
-         retval = EOWNERDEAD;
-         /* We signal ownership of a not yet recovered robust mutex
-            by storing the negative thread ID.  */
-         id = -id;
+         if (__builtin_expect (mutex->__data.__owner
+                               == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+           {
+             /* This mutex is now not recoverable.  */
+             mutex->__data.__count = 0;
+             lll_mutex_unlock (mutex->__data.__lock);
+             return ENOTRECOVERABLE;
+           }
        }
+      while ((oldval & FUTEX_OWNER_DIED) != 0);
 
+      mutex->__data.__count = 1;
       ENQUEUE_MUTEX (mutex);
       break;
 
index bc4ead7..b69caed 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -28,6 +28,7 @@ pthread_mutex_timedlock (mutex, abstime)
      pthread_mutex_t *mutex;
      const struct timespec *abstime;
 {
+  int oldval;
   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
   int result = 0;
 
@@ -103,67 +104,83 @@ pthread_mutex_timedlock (mutex, abstime)
       break;
 
     case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
-      /* Check whether we already hold the mutex.  */
-      if (abs (mutex->__data.__owner) == id)
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
+    case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
+      oldval = mutex->__data.__lock;
+      do
        {
-         /* Just bump the counter.  */
-         if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
-           /* Overflow of the counter.  */
-           return EAGAIN;
-
-         ++mutex->__data.__count;
+         if ((oldval & FUTEX_OWNER_DIED) != 0)
+           {
+             /* The previous owner died.  Try locking the mutex.  */
+             int newval;
+             while ((newval
+                     = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                            id, oldval))
+                    != oldval)
+               {
+                 if ((newval & FUTEX_OWNER_DIED) == 0)
+                   goto normal;
+                 oldval = newval;
+               }
 
-         goto out;
-       }
+             /* We got the mutex.  */
+             mutex->__data.__count = 1;
+             /* But it is inconsistent unless marked otherwise.  */
+             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
 
-      /* We have to get the mutex.  */
-      result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+             ENQUEUE_MUTEX (mutex);
 
-      if (result != 0)
-       goto out;
+             /* Note that we deliberately exist here.  If we fall
+                through to the end of the function __nusers would be
+                incremented which is not correct because the old
+                owner has to be discounted.  */
+             return EOWNERDEAD;
+           }
 
-      /* Only locked once so far.  */
-      mutex->__data.__count = 1;
-      goto robust;
+       normal:
+         /* Check whether we already hold the mutex.  */
+         if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
+                               == id, 0))
+           {
+             if (mutex->__data.__kind
+                 == PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
+               return EDEADLK;
 
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
-      /* Check whether we already hold the mutex.  */
-      if (__builtin_expect (abs (mutex->__data.__owner) == id, 0))
-       return EDEADLK;
+             if (mutex->__data.__kind
+                 == PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
+               {
+                 /* Just bump the counter.  */
+                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                   /* Overflow of the counter.  */
+                   return EAGAIN;
 
-      /* FALLTHROUGH */
+                 ++mutex->__data.__count;
 
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
-    case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
-      result = lll_mutex_timedlock (mutex->__data.__lock, abstime);
+                 return 0;
+               }
+           }
 
-      if (result != 0)
-       goto out;
+         result = lll_robust_mutex_timedlock (mutex->__data.__lock, abstime,
+                                              id);
 
-    robust:
-      if (__builtin_expect (mutex->__data.__owner
-                           == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
-       {
-         /* This mutex is now not recoverable.  */
-         mutex->__data.__count = 0;
-         lll_mutex_unlock (mutex->__data.__lock);
-         return ENOTRECOVERABLE;
-       }
+         if (__builtin_expect (mutex->__data.__owner
+                               == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+           {
+             /* This mutex is now not recoverable.  */
+             mutex->__data.__count = 0;
+             lll_mutex_unlock (mutex->__data.__lock);
+             return ENOTRECOVERABLE;
+           }
 
-      /* This mutex is either healthy or we can try to recover it.  */
-      assert (mutex->__data.__owner == 0
-             || mutex->__data.__owner == PTHREAD_MUTEX_OWNERDEAD);
+         if (result == ETIMEDOUT || result == EINVAL)
+           goto out;
 
-      if (__builtin_expect (mutex->__data.__owner
-                           == PTHREAD_MUTEX_OWNERDEAD, 0))
-       {
-         result = EOWNERDEAD;
-         /* We signal ownership of a not yet recovered robust mutex
-            by storing the negative thread ID.  */
-         mutex->__data.__owner = -id;
-         ++mutex->__data.__nusers;
+         oldval = result;
        }
+      while ((oldval & FUTEX_OWNER_DIED) != 0);
 
+      mutex->__data.__count = 1;
       ENQUEUE_MUTEX (mutex);
       break;
 
index ae73ecc..5a13ea6 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002, 2003, 2005 Free Software Foundation, Inc.
+/* Copyright (C) 2002, 2003, 2005, 2006 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -28,6 +28,7 @@ int
 __pthread_mutex_trylock (mutex)
      pthread_mutex_t *mutex;
 {
+  int oldval;
   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
 
   switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
@@ -77,73 +78,88 @@ __pthread_mutex_trylock (mutex)
 
 
     case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
-      /* Check whether we already hold the mutex.  */
-      if (abs (mutex->__data.__owner) == id)
-       {
-         /* Just bump the counter.  */
-         if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
-           /* Overflow of the counter.  */
-           return EAGAIN;
-
-         ++mutex->__data.__count;
-
-         return 0;
-       }
-
-      /* We have to get the mutex.  */
-      if (lll_mutex_trylock (mutex->__data.__lock) == 0)
-       {
-         mutex->__data.__count = 1;
-
-         goto robust;
-       }
-
-      break;
-
     case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
-      /* Check whether we already hold the mutex.  */
-      if (__builtin_expect (abs (mutex->__data.__owner) == id, 0))
-       return EDEADLK;
-
-      /* FALLTHROUGH */
-
     case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
     case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
-      if (lll_mutex_trylock (mutex->__data.__lock) != 0)
-       break;
-
-    robust:
-      if (__builtin_expect (mutex->__data.__owner
-                           == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
-       {
-         /* This mutex is now not recoverable.  */
-         mutex->__data.__count = 0;
-         lll_mutex_unlock (mutex->__data.__lock);
-         return ENOTRECOVERABLE;
-       }
-
-      /* This mutex is either healthy or we can try to recover it.  */
-      assert (mutex->__data.__owner == 0
-             || mutex->__data.__owner == PTHREAD_MUTEX_OWNERDEAD);
-
-      /* Record the ownership.  */
-      int retval = 0;
-      if (__builtin_expect (mutex->__data.__owner
-                           == PTHREAD_MUTEX_OWNERDEAD, 0))
+      oldval = mutex->__data.__lock;
+      do
        {
-         retval = EOWNERDEAD;
-         /* We signal ownership of a not yet recovered robust
-            mutex by storing the negative thread ID.  */
-         id = -id;
+         if ((oldval & FUTEX_OWNER_DIED) != 0)
+           {
+             /* The previous owner died.  Try locking the mutex.  */
+             int newval;
+             while ((newval
+                     = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
+                                                            id, oldval))
+                    != oldval)
+               {
+                 if ((newval & FUTEX_OWNER_DIED) == 0)
+                   goto normal;
+                 oldval = newval;
+               }
+
+             /* We got the mutex.  */
+             mutex->__data.__count = 1;
+             /* But it is inconsistent unless marked otherwise.  */
+             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
+
+             ENQUEUE_MUTEX (mutex);
+
+             /* Note that we deliberately exist here.  If we fall
+                through to the end of the function __nusers would be
+                incremented which is not correct because the old
+                owner has to be discounted.  */
+             return EOWNERDEAD;
+           }
+
+       normal:
+         /* Check whether we already hold the mutex.  */
+         if (__builtin_expect ((mutex->__data.__lock & FUTEX_TID_MASK)
+                               == id, 0))
+           {
+             if (mutex->__data.__kind
+                 == PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP)
+               return EDEADLK;
+
+             if (mutex->__data.__kind
+                 == PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP)
+               {
+                 /* Just bump the counter.  */
+                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
+                   /* Overflow of the counter.  */
+                   return EAGAIN;
+
+                 ++mutex->__data.__count;
+
+                 return 0;
+               }
+           }
+
+         oldval = lll_robust_mutex_trylock (mutex->__data.__lock, id);
+         if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
+           return EBUSY;
+
+       robust:
+         if (__builtin_expect (mutex->__data.__owner
+                               == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
+           {
+             /* This mutex is now not recoverable.  */
+             mutex->__data.__count = 0;
+             if (oldval == id)
+               lll_mutex_unlock (mutex->__data.__lock);
+             return ENOTRECOVERABLE;
+           }
        }
+      while ((oldval & FUTEX_OWNER_DIED) != 0);
 
       ENQUEUE_MUTEX (mutex);
 
       mutex->__data.__owner = id;
       ++mutex->__data.__nusers;
+      mutex->__data.__count = 1;
+
+      return 0;
 
-      return retval
-;
     default:
       /* Correct code cannot set any other type.  */
       return EINVAL;
index 4d87381..d41eefe 100644 (file)
@@ -41,23 +41,32 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
       if (--mutex->__data.__count != 0)
        /* We still hold the mutex.  */
        return 0;
-      break;
+      goto normal;
 
     case PTHREAD_MUTEX_ERRORCHECK_NP:
       /* Error checking mutex.  */
       if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)
          || ! lll_mutex_islocked (mutex->__data.__lock))
        return EPERM;
-      break;
+      /* FALLTHROUGH */
 
     case PTHREAD_MUTEX_TIMED_NP:
     case PTHREAD_MUTEX_ADAPTIVE_NP:
-      /* Normal mutex.  Nothing special to do.  */
+      /* Always reset the owner field.  */
+    normal:
+      mutex->__data.__owner = 0;
+      if (decr)
+       /* One less user.  */
+       --mutex->__data.__nusers;
+
+      /* Unlock.  */
+      lll_mutex_unlock (mutex->__data.__lock);
       break;
 
     case PTHREAD_MUTEX_ROBUST_PRIVATE_RECURSIVE_NP:
       /* Recursive mutex.  */
-      if (mutex->__data.__owner == -THREAD_GETMEM (THREAD_SELF, tid))
+      if ((mutex->__data.__lock & FUTEX_TID_MASK)
+         == THREAD_GETMEM (THREAD_SELF, tid))
        {
          if (--mutex->__data.__count != 0)
            /* We still hold the mutex.  */
@@ -78,7 +87,8 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
     case PTHREAD_MUTEX_ROBUST_PRIVATE_ERRORCHECK_NP:
     case PTHREAD_MUTEX_ROBUST_PRIVATE_NP:
     case PTHREAD_MUTEX_ROBUST_PRIVATE_ADAPTIVE_NP:
-      if (abs (mutex->__data.__owner) != THREAD_GETMEM (THREAD_SELF, tid)
+      if ((mutex->__data.__lock & FUTEX_TID_MASK)
+         != THREAD_GETMEM (THREAD_SELF, tid)
          || ! lll_mutex_islocked (mutex->__data.__lock))
        return EPERM;
 
@@ -86,15 +96,21 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
         making the state consistent, mark the mutex as unrecoverable
         and make all waiters.  */
       if (__builtin_expect (mutex->__data.__owner
-                           == -THREAD_GETMEM (THREAD_SELF, tid)
-                           || (mutex->__data.__owner
-                               == PTHREAD_MUTEX_NOTRECOVERABLE), 0))
+                           == PTHREAD_MUTEX_INCONSISTENT, 0))
       notrecoverable:
        newowner = PTHREAD_MUTEX_NOTRECOVERABLE;
 
     robust:
       /* Remove mutex from the list.  */
       DEQUEUE_MUTEX (mutex);
+
+      mutex->__data.__owner = newowner;
+      if (decr)
+       /* One less user.  */
+       --mutex->__data.__nusers;
+
+      /* Unlock.  */
+      lll_robust_mutex_unlock (mutex->__data.__lock);
       break;
 
     default:
@@ -102,15 +118,6 @@ __pthread_mutex_unlock_usercnt (mutex, decr)
       return EINVAL;
     }
 
-  /* Always reset the owner field.  */
-  mutex->__data.__owner = newowner;
-  if (decr)
-    /* One less user.  */
-    --mutex->__data.__nusers;
-
-  /* Unlock.  */
-  lll_mutex_unlock (mutex->__data.__lock);
-
   return 0;
 }
 
index 14f513c..88dce1a 100644 (file)
@@ -1,4 +1,4 @@
-# Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+# Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
 # This file is part of the GNU C Library.
 # Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
 
@@ -24,7 +24,8 @@ sysdep_routines += register-atfork unregister-atfork libc_pthread_init \
 libpthread-sysdep_routines += pt-fork pthread_mutex_cond_lock
 
 gen-as-const-headers += lowlevelcond.sym lowlevelrwlock.sym \
-                       lowlevelbarrier.sym unwindbuf.sym
+                       lowlevelbarrier.sym unwindbuf.sym \
+                       lowlevelrobustlock.sym
 endif
 
 ifeq ($(subdir),posix)
index cb91691..41a54d4 100644 (file)
@@ -43,6 +43,13 @@ typedef union
 } pthread_attr_t;
 
 
+typedef struct __pthread_internal_list
+{
+  struct __pthread_internal_list *__prev;
+  struct __pthread_internal_list *__next;
+} __pthread_list_t;
+
+
 /* Data structures for mutex handling.  The structure of the attribute
    type is deliberately not exposed.  */
 typedef union
@@ -57,8 +64,7 @@ typedef union
        binary compatibility.  */
     int __kind;
     int __spins;
-    struct __pthread_mutex_s *__next;
-    struct __pthread_mutex_s *__prev;
+    __pthread_list_t __list;
 #define __PTHREAD_MUTEX_HAVE_PREV      1
   } __data;
   char __size[__SIZEOF_PTHREAD_MUTEX_T];
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S
new file mode 100644 (file)
index 0000000..1c516c7
--- /dev/null
@@ -0,0 +1,186 @@
+/* Copyright (C) 2002, 2003, 2004, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <lowlevelrobustlock.h>
+
+       .text
+
+#ifndef LOCK
+# ifdef UP
+#  define LOCK
+# else
+#  define LOCK lock
+# endif
+#endif
+
+#define SYS_gettimeofday       __NR_gettimeofday
+#define SYS_futex              240
+#define FUTEX_WAIT             0
+#define FUTEX_WAKE             1
+#define FUTEX_WAITERS          0x80000000
+#define FUTEX_OWNER_DIED       0x40000000
+
+
+       .globl  __lll_robust_mutex_lock_wait
+       .type   __lll_robust_mutex_lock_wait,@function
+       .hidden __lll_robust_mutex_lock_wait
+       .align  16
+__lll_robust_mutex_lock_wait:
+       pushl   %edx
+       pushl   %ebx
+       pushl   %esi
+
+       movl    %ecx, %ebx
+       xorl    %esi, %esi      /* No timeout.  */
+       xorl    %ecx, %ecx      /* movl $FUTEX_WAIT, %ecx */
+
+4:     movl    %eax, %edx
+       orl     $FUTEX_WAITERS, %edx
+
+       testl   $FUTEX_OWNER_DIED, %eax
+       jnz     3f
+
+       cmpl    %edx, %eax      /* NB:   %edx == 2 */
+       je      1f
+
+       LOCK
+       cmpxchgl %edx, (%ebx)
+       jnz     2f
+
+1:     movl    $SYS_futex, %eax
+       ENTER_KERNEL
+
+       movl    (%ebx), %eax
+
+2:     test    %eax, %eax
+       jne     4b
+
+       movl    %gs:TID, %edx
+       LOCK
+       cmpxchgl %edx, (%ebx)
+       jnz     4b
+       /* NB:   %eax == 0 */
+
+3:     popl    %esi
+       popl    %ebx
+       popl    %edx
+       ret
+       .size   __lll_robust_mutex_lock_wait,.-__lll_robust_mutex_lock_wait
+
+
+       .globl  __lll_robust_mutex_timedlock_wait
+       .type   __lll_robust_mutex_timedlock_wait,@function
+       .hidden __lll_robust_mutex_timedlock_wait
+       .align  16
+__lll_robust_mutex_timedlock_wait:
+       /* Check for a valid timeout value.  */
+       cmpl    $1000000000, 4(%edx)
+       jae     3f
+
+       pushl   %edi
+       pushl   %esi
+       pushl   %ebx
+       pushl   %ebp
+
+       /* Stack frame for the timespec and timeval structs.  */
+       subl    $12, %esp
+
+       movl    %ecx, %ebp
+       movl    %edx, %edi
+
+1:     movl    %eax, 8(%esp)
+
+       /* Get current time.  */
+       movl    %esp, %ebx
+       xorl    %ecx, %ecx
+       movl    $SYS_gettimeofday, %eax
+       ENTER_KERNEL
+
+       /* Compute relative timeout.  */
+       movl    4(%esp), %eax
+       movl    $1000, %edx
+       mul     %edx            /* Milli seconds to nano seconds.  */
+       movl    (%edi), %ecx
+       movl    4(%edi), %edx
+       subl    (%esp), %ecx
+       subl    %eax, %edx
+       jns     4f
+       addl    $1000000000, %edx
+       subl    $1, %ecx
+4:     testl   %ecx, %ecx
+       js      8f              /* Time is already up.  */
+
+       /* Store relative timeout.  */
+       movl    %ecx, (%esp)
+       movl    %edx, 4(%esp)
+
+       movl    %ebp, %ebx
+
+       movl    8(%esp), %edx
+       movl    %edx, %eax
+       orl     $FUTEX_WAITERS, %edx
+
+       testl   $FUTEX_OWNER_DIED, %eax
+       jnz     6f
+
+       cmpl    %eax, %edx
+       je      2f
+
+       LOCK
+       cmpxchgl %edx, (%ebx)
+       movl    $0, %ecx        /* Must use mov to avoid changing cc.  */
+       jnz     5f
+
+2:
+       /* Futex call.  */
+       movl    %esp, %esi
+       xorl    %ecx, %ecx      /* movl $FUTEX_WAIT, %ecx */
+       movl    $SYS_futex, %eax
+       ENTER_KERNEL
+       movl    %eax, %ecx
+
+       movl    (%ebx), %eax
+
+5:     testl   %eax, %eax
+       jne     7f
+
+       movl    %gs:TID, %edx
+       LOCK
+       cmpxchgl %edx, (%ebx)
+       jnz     7f
+
+6:     addl    $12, %esp
+       popl    %ebp
+       popl    %ebx
+       popl    %esi
+       popl    %edi
+       ret
+
+       /* Check whether the time expired.  */
+7:     cmpl    $-ETIMEDOUT, %ecx
+       jne     1b
+
+8:     movl    $ETIMEDOUT, %eax
+       jmp     6b
+
+3:     movl    $EINVAL, %eax
+       ret
+       .size   __lll_robust_mutex_timedlock_wait,.-__lll_robust_mutex_timedlock_wait
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S b/nptl/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S
new file mode 100644 (file)
index 0000000..f768e16
--- /dev/null
@@ -0,0 +1,20 @@
+/* Copyright (C) 2002, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include "../i486/lowlevelrobustlock.S"
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S b/nptl/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S
new file mode 100644 (file)
index 0000000..f768e16
--- /dev/null
@@ -0,0 +1,20 @@
+/* Copyright (C) 2002, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include "../i486/lowlevelrobustlock.S"
index b233d97..f3b9714 100644 (file)
@@ -139,6 +139,16 @@ extern int __lll_mutex_unlock_wake (int *__futex)
      ret; })
 
 
+#define lll_robust_mutex_trylock(futex, id) \
+  ({ int ret;                                                                \
+     __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"                          \
+                      : "=a" (ret), "=m" (futex)                             \
+                      : "r" (id), "m" (futex),                               \
+                        "0" (LLL_MUTEX_LOCK_INITIALIZER)                     \
+                      : "memory");                                           \
+     ret; })
+
+
 #define lll_mutex_cond_trylock(futex) \
   ({ int ret;                                                                \
      __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"                          \
@@ -167,6 +177,25 @@ extern int __lll_mutex_unlock_wake (int *__futex)
                              : "memory"); })
 
 
+#define lll_robust_mutex_lock(futex, id) \
+  ({ int result, ignore;                                                     \
+     __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"                              \
+                      "jnz _L_mutex_lock_%=\n\t"                             \
+                      ".subsection 1\n\t"                                    \
+                      ".type _L_mutex_lock_%=,@function\n"                   \
+                      "_L_mutex_lock_%=:\n\t"                                \
+                      "leal %2, %%ecx\n\t"                                   \
+                      "call __lll_robust_mutex_lock_wait\n\t"                \
+                      "jmp 1f\n\t"                                           \
+                      ".size _L_mutex_lock_%=,.-_L_mutex_lock_%=\n"          \
+                      ".previous\n"                                          \
+                      "1:"                                                   \
+                      : "=a" (result), "=c" (ignore), "=m" (futex)           \
+                      : "0" (0), "1" (id), "m" (futex)                       \
+                      : "memory");                                           \
+     result; })
+
+
 /* Special version of lll_mutex_lock which causes the unlock function to
    always wakeup waiters.  */
 #define lll_mutex_cond_lock(futex) \
@@ -187,6 +216,25 @@ extern int __lll_mutex_unlock_wake (int *__futex)
                              : "memory"); })
 
 
+#define lll_robust_mutex_cond_lock(futex, id) \
+  ({ int result, ignore;                                                     \
+     __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t"                              \
+                      "jnz _L_mutex_cond_lock_%=\n\t"                        \
+                      ".subsection 1\n\t"                                    \
+                      ".type _L_mutex_cond_lock_%=,@function\n"              \
+                      "_L_mutex_cond_lock_%=:\n\t"                           \
+                      "leal %2, %%ecx\n\t"                                   \
+                      "call __lll_robust_mutex_lock_wait\n\t"                \
+                      "jmp 1f\n\t"                                           \
+                      ".size _L_mutex_cond_lock_%=,.-_L_mutex_cond_lock_%=\n"\
+                      ".previous\n"                                          \
+                      "1:"                                                   \
+                      : "=a" (result), "=c" (ignore), "=m" (futex)           \
+                      : "0" (0), "1" (id | FUTEX_WAITERS), "m" (futex)       \
+                      : "memory");                                           \
+     result; })
+
+
 #define lll_mutex_timedlock(futex, timeout) \
   ({ int result, ignore1, ignore2;                                           \
      __asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t"                              \
@@ -208,9 +256,30 @@ extern int __lll_mutex_unlock_wake (int *__futex)
      result; })
 
 
+#define lll_robust_mutex_timedlock(futex, timeout, id) \
+  ({ int result, ignore1, ignore2;                                           \
+     __asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t"                              \
+                      "jnz _L_mutex_timedlock_%=\n\t"                        \
+                      ".subsection 1\n\t"                                    \
+                      ".type _L_mutex_timedlock_%=,@function\n"              \
+                      "_L_mutex_timedlock_%=:\n\t"                           \
+                      "leal %3, %%ecx\n\t"                                   \
+                      "movl %7, %%edx\n\t"                                   \
+                      "call __lll_robust_mutex_timedlock_wait\n\t"           \
+                      "jmp 1f\n\t"                                           \
+                      ".size _L_mutex_timedlock_%=,.-_L_mutex_timedlock_%=\n"\
+                      ".previous\n"                                          \
+                      "1:"                                                   \
+                      : "=a" (result), "=c" (ignore1), "=&d" (ignore2),      \
+                        "=m" (futex)                                         \
+                      : "0" (0), "1" (id), "m" (futex), "m" (timeout)        \
+                      : "memory");                                           \
+     result; })
+
+
 #define lll_mutex_unlock(futex) \
   (void) ({ int ignore;                                                              \
-            __asm __volatile (LOCK_INSTR "subl $1,%0\n\t"                    \
+            __asm __volatile (LOCK_INSTR "subl $1, %0\n\t"                   \
                              "jne _L_mutex_unlock_%=\n\t"                    \
                              ".subsection 1\n\t"                             \
                              ".type _L_mutex_unlock_%=,@function\n"          \
@@ -226,6 +295,53 @@ extern int __lll_mutex_unlock_wake (int *__futex)
                              : "memory"); })
 
 
+#define lll_robust_mutex_unlock(futex) \
+  (void) ({ int ignore;                                                              \
+            __asm __volatile (LOCK_INSTR "andl %2, %0\n\t"                   \
+                             "jne _L_mutex_unlock_%=\n\t"                    \
+                             ".subsection 1\n\t"                             \
+                             ".type _L_mutex_unlock_%=,@function\n"          \
+                             "_L_mutex_unlock_%=:\n\t"                       \
+                             "leal %0, %%eax\n\t"                            \
+                             "call __lll_mutex_unlock_wake\n\t"              \
+                             "jmp 1f\n\t"                                    \
+                             ".size _L_mutex_unlock_%=,.-_L_mutex_unlock_%=\n" \
+                             ".previous\n"                                   \
+                             "1:"                                            \
+                             : "=m" (futex), "=&a" (ignore)                  \
+                             : "i" (FUTEX_TID_MASK), "m" (futex)             \
+                             : "memory"); })
+
+
+#define lll_robust_mutex_dead(futex) \
+  (void) ({ int __ignore;                                                    \
+           register int _nr asm ("edx") = 1;                                 \
+           __asm __volatile (LOCK_INSTR "orl %5, (%2)\n\t"                   \
+                             LLL_EBX_LOAD                                    \
+                             LLL_ENTER_KERNEL                                \
+                             LLL_EBX_LOAD                                    \
+                             : "=a" (__ignore)                               \
+                             : "0" (SYS_futex), LLL_EBX_REG (&(futex)),      \
+                               "c" (FUTEX_WAKE), "d" (_nr),                  \
+                               "i" (FUTEX_OWNER_DIED),                       \
+                               "i" (offsetof (tcbhead_t, sysinfo))); })
+
+
+#define lll_futex_wake(futex, nr) \
+  do {                                                                       \
+    int __ignore;                                                            \
+    register __typeof (nr) _nr asm ("edx") = (nr);                           \
+    __asm __volatile (LLL_EBX_LOAD                                           \
+                     LLL_ENTER_KERNEL                                        \
+                     LLL_EBX_LOAD                                            \
+                     : "=a" (__ignore)                                       \
+                     : "0" (SYS_futex), LLL_EBX_REG (futex),                 \
+                       "c" (FUTEX_WAKE), "d" (_nr),                          \
+                       "i" (0) /* phony, to align next arg's number */,      \
+                       "i" (offsetof (tcbhead_t, sysinfo)));                 \
+  } while (0)
+
+
 #define lll_mutex_islocked(futex) \
   (futex != 0)
 
index a13bb08..892769d 100644 (file)
@@ -43,6 +43,13 @@ typedef union
 } pthread_attr_t;
 
 
+typedef struct __pthread_internal_list
+{
+  struct __pthread_internal_list *__prev;
+  struct __pthread_internal_list *__next;
+} __pthread_list_t;
+
+
 /* Data structures for mutex handling.  The structure of the attribute
    type is not exposed on purpose.  */
 typedef union
@@ -57,8 +64,7 @@ typedef union
        binary compatibility.  */
     int __kind;
     int __spins;
-    struct __pthread_mutex_s *__next;
-    struct __pthread_mutex_s *__prev;
+    __pthread_list_t __list;
 #define __PTHREAD_MUTEX_HAVE_PREV      1
   } __data;
   char __size[__SIZEOF_PTHREAD_MUTEX_T];
diff --git a/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.sym b/nptl/sysdeps/unix/sysv/linux/lowlevelrobustlock.sym
new file mode 100644 (file)
index 0000000..2f1e9da
--- /dev/null
@@ -0,0 +1,6 @@
+#include <stddef.h>
+#include <pthreadP.h>
+
+--
+
+TID            offsetof (struct pthread, tid)
index 493c2ab..a7150f6 100644 (file)
@@ -58,6 +58,20 @@ typedef union
 } pthread_attr_t;
 
 
+#if __WORDSIZE == 64
+typedef struct __pthread_internal_list
+{
+  struct __pthread_internal_list *__prev;
+  struct __pthread_internal_list *__next;
+} __pthread_list_t;
+#else
+typedef struct __pthread_internal_slist
+{
+  struct __pthread_internal_slist *__next;
+} __pthread_slist_t;
+#endif
+
+
 /* Data structures for mutex handling.  The structure of the attribute
    type is deliberately not exposed.  */
 typedef union
@@ -75,15 +89,14 @@ typedef union
     int __kind;
 #if __WORDSIZE == 64
     int __spins;
-    struct __pthread_mutex_s *__next;
-    struct __pthread_mutex_s *__prev;
+    __pthread_list_t __list;
 # define __PTHREAD_MUTEX_HAVE_PREV     1
 #else
     unsigned int __nusers;
     __extension__ union
     {
       int __spins;
-      struct __pthread_mutex_s *__next;
+      __pthread_slist_t __list;
     };
 #endif
   } __data;
index 990db87..a97351f 100644 (file)
@@ -1,7 +1,8 @@
 #include <pthreadP.h>
 
-#define LLL_MUTEX_LOCK(mutex) lll_mutex_cond_lock(mutex)
-#define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_cond_trylock(mutex)
+#define LLL_MUTEX_LOCK(mutex) lll_mutex_cond_lock (mutex)
+#define LLL_MUTEX_TRYLOCK(mutex) lll_mutex_cond_trylock (mutex)
+#define LLL_ROBUST_MUTEX_LOCK(mutex, id) lll_robust_mutex_cond_lock (mutex, id)
 #define __pthread_mutex_lock __pthread_mutex_cond_lock
 #define NO_INCR
 
index df8beeb..c77031d 100644 (file)
@@ -57,6 +57,20 @@ typedef union
 } pthread_attr_t;
 
 
+#if __WORDSIZE == 64
+typedef struct __pthread_internal_list
+{
+  struct __pthread_internal_list *__prev;
+  struct __pthread_internal_list *__next;
+} __pthread_list_t;
+#else
+typedef struct __pthread_internal_slist
+{
+  struct __pthread_internal_slist *__next;
+} __pthread_slist_t;
+#endif
+
+
 /* Data structures for mutex handling.  The structure of the attribute
    type is not exposed on purpose.  */
 typedef union
@@ -74,15 +88,14 @@ typedef union
     int __kind;
 #if __WORDSIZE == 64
     int __spins;
-    struct __pthread_mutex_s *__next;
-    struct __pthread_mutex_s *__prev;
+    __pthread_list_t __list;
 # define __PTHREAD_MUTEX_HAVE_PREV     1
 #else
     unsigned int __nusers;
     __extension__ union
     {
       int __spins;
-      struct __pthread_mutex_s *__next;
+      __pthread_slist_t __list;
     };
 #endif
   } __data;
index 7642ecc..969686d 100644 (file)
@@ -44,6 +44,12 @@ typedef union
 } pthread_attr_t;
 
 
+typedef struct __pthread_internal_slist
+{
+  struct __pthread_internal_slist *__next;
+} __pthread_slist_t;
+
+
 /* Data structures for mutex handling.  The structure of the attribute
    type is not exposed on purpose.  */
 typedef union
@@ -60,7 +66,7 @@ typedef union
     __extension__ union
     {
       int __spins;
-      struct __pthread_mutex_s *__next;
+      __pthread_slist_t __list;
     };
   } __data;
   char __size[__SIZEOF_PTHREAD_MUTEX_T];
index 7dc61c1..e734c12 100644 (file)
@@ -58,6 +58,20 @@ typedef union
 } pthread_attr_t;
 
 
+#if __WORDSIZE == 64
+typedef struct __pthread_internal_list
+{
+  struct __pthread_internal_list *__prev;
+  struct __pthread_internal_list *__next;
+} __pthread_list_t;
+#else
+typedef struct __pthread_internal_slist
+{
+  struct __pthread_internal_slist *__next;
+} __pthread_slist_t;
+#endif
+
+
 /* Data structures for mutex handling.  The structure of the attribute
    type is deliberately not exposed.  */
 typedef union
@@ -75,15 +89,14 @@ typedef union
     int __kind;
 #if __WORDSIZE == 64
     int __spins;
-    struct __pthread_mutex_s *__next;
-    struct __pthread_mutex_s *__prev;
+    __pthread_list_t __list;
 # define __PTHREAD_MUTEX_HAVE_PREV     1
 #else
     unsigned int __nusers;
     __extension__ union
     {
       int __spins;
-      struct __pthread_mutex_s *__next;
+      __pthread_slist_t __list;
     };
 #endif
   } __data;
index ebcfe6e..f91ce9b 100644 (file)
@@ -111,6 +111,16 @@ extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
      ret; })
 
 
+#define lll_robust_mutex_trylock(futex, id)                                  \
+  ({ int ret;                                                                \
+     __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"                          \
+                      : "=a" (ret), "=m" (futex)                             \
+                      : "r" (id), "m" (futex),                               \
+                        "0" (LLL_MUTEX_LOCK_INITIALIZER)                     \
+                      : "memory");                                           \
+     ret; })
+
+
 #define lll_mutex_cond_trylock(futex) \
   ({ int ret;                                                                \
      __asm __volatile (LOCK_INSTR "cmpxchgl %2, %1"                          \
@@ -139,6 +149,25 @@ extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
                              : "cx", "r11", "cc", "memory"); })
 
 
+#define lll_robust_mutex_lock(futex, id) \
+  ({ int result, ignore1, ignore2;                                   \
+    __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t"                       \
+                     "jnz 1f\n\t"                                            \
+                     ".subsection 1\n"                                       \
+                     "1:\tleaq %2, %%rdi\n\t"                                \
+                     "subq $128, %%rsp\n\t"                                  \
+                     "callq __lll_robust_mutex_lock_wait\n\t"                \
+                     "addq $128, %%rsp\n\t"                                  \
+                     "jmp 2f\n\t"                                            \
+                     ".previous\n"                                           \
+                     "2:"                                                    \
+                     : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),        \
+                       "=a" (result)                                         \
+                     : "0" (id), "m" (futex), "3" (0)                        \
+                     : "cx", "r11", "cc", "memory");                         \
+    result; })
+
+
 #define lll_mutex_cond_lock(futex) \
   (void) ({ int ignore1, ignore2, ignore3;                                   \
            __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t"                \
@@ -157,6 +186,25 @@ extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
                              : "cx", "r11", "cc", "memory"); })
 
 
+#define lll_robust_mutex_cond_lock(futex, id) \
+  ({ int result, ignore1, ignore2;                                           \
+    __asm __volatile (LOCK_INSTR "cmpxchgl %0, %2\n\t"                       \
+                     "jnz 1f\n\t"                                            \
+                     ".subsection 1\n"                                       \
+                     "1:\tleaq %2, %%rdi\n\t"                                \
+                     "subq $128, %%rsp\n\t"                                  \
+                     "callq __lll_robust_mutex_lock_wait\n\t"                \
+                     "addq $128, %%rsp\n\t"                                  \
+                     "jmp 2f\n\t"                                            \
+                     ".previous\n"                                           \
+                     "2:"                                                    \
+                     : "=S" (ignore1), "=&D" (ignore2), "=m" (futex),        \
+                       "=a" (result)                                         \
+                     : "0" (id | FUTEX_WAITERS), "m" (futex), "3" (0)        \
+                     : "cx", "r11", "cc", "memory");                         \
+    result; })
+
+
 #define lll_mutex_timedlock(futex, timeout) \
   ({ int result, ignore1, ignore2, ignore3;                                  \
      __asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t"                              \
@@ -177,6 +225,26 @@ extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
      result; })
 
 
+#define lll_robust_mutex_timedlock(futex, timeout, id) \
+  ({ int result, ignore1, ignore2, ignore3;                                  \
+     __asm __volatile (LOCK_INSTR "cmpxchgl %2, %4\n\t"                              \
+                      "jnz 1f\n\t"                                           \
+                      ".subsection 1\n"                                      \
+                      "1:\tleaq %4, %%rdi\n\t"                               \
+                      "movq %8, %%rdx\n\t"                                   \
+                      "subq $128, %%rsp\n\t"                                 \
+                      "callq __lll_robust_mutex_timedlock_wait\n\t"          \
+                      "addq $128, %%rsp\n\t"                                 \
+                      "jmp 2f\n\t"                                           \
+                      ".previous\n"                                          \
+                      "2:"                                                   \
+                      : "=a" (result), "=&D" (ignore1), "=S" (ignore2),      \
+                        "=&d" (ignore3), "=m" (futex)                        \
+                      : "0" (0), "2" (id), "m" (futex), "m" (timeout)        \
+                      : "memory", "cx", "cc", "r10", "r11");                 \
+     result; })
+
+
 #define lll_mutex_unlock(futex) \
   (void) ({ int ignore;                                                              \
             __asm __volatile (LOCK_INSTR "decl %0\n\t"                       \
@@ -194,6 +262,34 @@ extern int __lll_mutex_unlock_wait (int *__futex) attribute_hidden;
                              : "ax", "cx", "r11", "cc", "memory"); })
 
 
+#define lll_robust_mutex_unlock(futex) \
+  (void) ({ int ignore;                                                              \
+           __asm __volatile (LOCK_INSTR "andl %2, %0\n\t"                    \
+                             "jne 1f\n\t"                                    \
+                             ".subsection 1\n"                               \
+                             "1:\tleaq %0, %%rdi\n\t"                        \
+                             "subq $128, %%rsp\n\t"                          \
+                             "callq __lll_mutex_unlock_wake\n\t"             \
+                             "addq $128, %%rsp\n\t"                          \
+                             "jmp 2f\n\t"                                    \
+                             ".previous\n"                                   \
+                             "2:"                                            \
+                             : "=m" (futex), "=&D" (ignore)                  \
+                             : "i" (FUTEX_TID_MASK), "m" (futex)             \
+                             : "ax", "cx", "r11", "cc", "memory"); })
+
+
+#define lll_robust_mutex_dead(futex) \
+  (void) ({ int ignore;                     \
+           __asm __volatile (LOCK_INSTR "orl %3, (%2)\n\t"                   \
+                             "syscall"                                       \
+                             : "=m" (futex), "=a" (ignore)                   \
+                             : "D" (&(futex)), "i" (FUTEX_OWNER_DIED),       \
+                               "S" (FUTEX_WAKE), "1" (__NR_futex),           \
+                               "d" (1)                                       \
+                             : "cx", "r11", "cc", "memory"); })
+
+
 #define lll_mutex_islocked(futex) \
   (futex != LLL_MUTEX_LOCK_INITIALIZER)
 
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S b/nptl/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
new file mode 100644 (file)
index 0000000..7bb9191
--- /dev/null
@@ -0,0 +1,192 @@
+/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+   Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307 USA.  */
+
+#include <sysdep.h>
+#include <pthread-errnos.h>
+#include <lowlevelrobustlock.h>
+
+       .text
+
+#ifndef LOCK
+# ifdef UP
+#  define LOCK
+# else
+#  define LOCK lock
+# endif
+#endif
+
+#define SYS_futex              202
+#define FUTEX_WAIT             0
+#define FUTEX_WAKE             1
+#define FUTEX_WAITERS          0x80000000
+#define FUTEX_OWNER_DIED       0x40000000
+
+/* For the calculation see asm/vsyscall.h.  */
+#define VSYSCALL_ADDR_vgettimeofday    0xffffffffff600000
+
+
+       .globl  __lll_robust_mutex_lock_wait
+       .type   __lll_robust_mutex_lock_wait,@function
+       .hidden __lll_robust_mutex_lock_wait
+       .align  16
+__lll_robust_mutex_lock_wait:
+       pushq   %r10
+       pushq   %rdx
+
+       xorq    %r10, %r10      /* No timeout.  */
+#if FUTEX_WAIT == 0
+       xorl    %esi, %esi
+#else
+       movl    $FUTEX_WAIT, %esi
+#endif
+
+4:     movl    %eax, %edx
+       orl     $FUTEX_WAITERS, %edx
+
+       testl   $FUTEX_OWNER_DIED, %eax
+       jnz     3f
+
+       cmpl    %edx, %eax
+       je      1f
+
+       LOCK
+       cmpxchgl %edx, (%rdi)
+       jnz     2f
+
+1:     movl    $SYS_futex, %eax
+       syscall
+
+       movl    (%rdi), %eax
+
+2:     testl   %eax, %eax
+       jne     4b
+
+       movl    %fs:TID, %edx
+       LOCK
+       cmpxchgl %edx, (%rdi)
+       jnz     4b
+       /* NB:   %rax == 0 */
+
+3:     popq    %rdx
+       popq    %r10
+       retq
+       .size   __lll_robust_mutex_lock_wait,.-__lll_robust_mutex_lock_wait
+
+
+       .globl  __lll_robust_mutex_timedlock_wait
+       .type   __lll_robust_mutex_timedlock_wait,@function
+       .hidden __lll_robust_mutex_timedlock_wait
+       .align  16
+__lll_robust_mutex_timedlock_wait:
+       /* Check for a valid timeout value.  */
+       cmpq    $1000000000, 8(%rdx)
+       jae     3f
+
+       pushq   %r8
+       pushq   %r9
+       pushq   %r12
+       pushq   %r13
+
+       /* Stack frame for the timespec and timeval structs.  */
+       subq    $24, %rsp
+
+       movq    %rdi, %r12
+       movq    %rdx, %r13
+
+1:     movq    %rax, 16(%rsp)
+
+       /* Get current time.  */
+       movq    %rsp, %rdi
+       xorl    %esi, %esi
+       movq    $VSYSCALL_ADDR_vgettimeofday, %rax
+       /* This is a regular function call, all caller-save registers
+          might be clobbered.  */
+       callq   *%rax
+
+       /* Compute relative timeout.  */
+       movq    8(%rsp), %rax
+       movl    $1000, %edi
+       mul     %rdi            /* Milli seconds to nano seconds.  */
+       movq    (%r13), %rdi
+       movq    8(%r13), %rsi
+       subq    (%rsp), %rdi
+       subq    %rax, %rsi
+       jns     4f
+       addq    $1000000000, %rsi
+       decq    %rdi
+4:     testq   %rdi, %rdi
+       js      8f              /* Time is already up.  */
+
+       /* Futex call.  */
+       movq    %rdi, (%rsp)    /* Store relative timeout.  */
+       movq    %rsi, 8(%rsp)
+
+       movq    16(%rsp), %rdx
+       movl    %edx, %eax
+       orl     $FUTEX_WAITERS, %edx
+
+       testl   $FUTEX_OWNER_DIED, %eax
+       jnz     6f
+
+       cmpl    %eax, %edx
+       je      2f
+
+       LOCK
+       cmpxchgl %edx, (%r12)
+       movq    $0, %rcx        /* Must use mov to avoid changing cc.  */
+       jnz     5f
+
+2:     movq    %rsp, %r10
+#if FUTEX_WAIT == 0
+       xorl    %esi, %esi
+#else
+       movl    $FUTEX_WAIT, %esi
+#endif
+       movq    %r12, %rdi
+       movl    $SYS_futex, %eax
+       syscall
+       movq    %rax, %rcx
+
+       movl    (%r12), %eax
+
+5:     testl   %eax, %eax
+       jne     7f
+
+       movl    %fs:TID, %edx
+       LOCK
+       cmpxchgl %edx, (%r12)
+       jnz     7f
+
+6:     addq    $24, %rsp
+       popq    %r13
+       popq    %r12
+       popq    %r9
+       popq    %r8
+       retq
+
+       /* Check whether the time expired.  */
+7:     cmpq    $-ETIMEDOUT, %rcx
+       jne     1b
+
+8:     movl    $ETIMEDOUT, %eax
+       jmp     6b
+
+3:     movl    $EINVAL, %eax
+       retq
+       .size   __lll_robust_mutex_timedlock_wait,.-__lll_robust_mutex_timedlock_wait
index ece8d69..4a76335 100644 (file)
    INLINE_SYSCALL (open, 2, (const char *) (name), (flags))
 
 /* Uncancelable openat.  */
-extern int __openat_not_cancel (int fd, const char *fname, int oflag,
+#if !defined NOT_IN_libc || defined IS_IN_libpthread || defined IS_IN_librt
+extern int __openat_nocancel (int fd, const char *fname, int oflag,
+                             mode_t mode) attribute_hidden;
+extern int __openat64_nocancel (int fd, const char *fname, int oflag,
                                mode_t mode) attribute_hidden;
+#else
+# define __openat_nocancel(fd, fname, oflag, mode) \
+  openat (fd, fname, oflag, mode)
+# define __openat64_nocancel(fd, fname, oflag, mode) \
+  openat64 (fd, fname, oflag, mode)
+#endif
+
 #define openat_not_cancel(fd, fname, oflag, mode) \
-  __openat_not_cancel (fd, fname, oflag, mode)
+  __openat_nocancel (fd, fname, oflag, mode)
 #define openat_not_cancel_3(fd, fname, oflag) \
-  __openat_not_cancel (fd, fname, oflag, 0)
-extern int __openat64_not_cancel (int fd, const char *fname, int oflag,
-                                 mode_t mode) attribute_hidden;
+  __openat_nocancel (fd, fname, oflag, 0)
 #define openat64_not_cancel(fd, fname, oflag, mode) \
-  __openat64_not_cancel (fd, fname, oflag, mode)
+  __openat64_nocancel (fd, fname, oflag, mode)
 #define openat64_not_cancel_3(fd, fname, oflag) \
-  __openat64_not_cancel (fd, fname, oflag, 0)
+  __openat64_nocancel (fd, fname, oflag, 0)
 
 /* Uncancelable close.  */
 #define close_not_cancel(fd) \