Refactor PI mutexes internal definitions
authorAdhemerval Zanella <adhemerval.zanella@linaro.org>
Wed, 30 Oct 2019 16:29:40 +0000 (13:29 -0300)
committerAdhemerval Zanella <adhemerval.zanella@linaro.org>
Thu, 31 Oct 2019 14:09:10 +0000 (11:09 -0300)
This patch adds the generic futex_lock_pi and futex_unlock_pi to wrap
around the syscall machinery required to issue the syscall calls. It
simplifies a bit the futex code required to implement PI mutexes.

No function changes, checked on x86_64-linux-gnu.

Reviewed-by: Carlos O'Donell <carlos@redhat.com>
nptl/pthread_mutex_init.c
nptl/pthread_mutex_lock.c
nptl/pthread_mutex_timedlock.c
nptl/pthread_mutex_trylock.c
nptl/pthread_mutex_unlock.c
sysdeps/nptl/futex-internal.h
sysdeps/nptl/lowlevellock-futex.h

index fe4eeee..20800b8 100644 (file)
@@ -24,6 +24,7 @@
 #include "pthreadP.h"
 #include <atomic.h>
 #include <pthread-offsets.h>
+#include <futex-internal.h>
 
 #include <stap-probe.h>
 
@@ -37,19 +38,13 @@ static const struct pthread_mutexattr default_mutexattr =
 static bool
 prio_inherit_missing (void)
 {
-#ifdef __NR_futex
   static int tpi_supported;
-  if (__glibc_unlikely (tpi_supported == 0))
+  if (__glibc_unlikely (atomic_load_relaxed (&tpi_supported) == 0))
     {
-      int lock = 0;
-      INTERNAL_SYSCALL_DECL (err);
-      int ret = INTERNAL_SYSCALL (futex, err, 4, &lock, FUTEX_UNLOCK_PI, 0, 0);
-      assert (INTERNAL_SYSCALL_ERROR_P (ret, err));
-      tpi_supported = INTERNAL_SYSCALL_ERRNO (ret, err) == ENOSYS ? -1 : 1;
+      int e = futex_unlock_pi (&(unsigned int){0}, 0);
+      atomic_store_relaxed (&tpi_supported, e == ENOSYS ? -1 : 1);
     }
   return __glibc_unlikely (tpi_supported < 0);
-#endif
-  return true;
 }
 
 int
index ace436d..05bba50 100644 (file)
@@ -24,7 +24,7 @@
 #include <not-cancel.h>
 #include "pthreadP.h"
 #include <atomic.h>
-#include <lowlevellock.h>
+#include <futex-internal.h>
 #include <stap-probe.h>
 
 #ifndef lll_lock_elision
@@ -416,21 +416,16 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
            int private = (robust
                           ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
                           : PTHREAD_MUTEX_PSHARED (mutex));
-           INTERNAL_SYSCALL_DECL (__err);
-           int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-                                     __lll_private_flag (FUTEX_LOCK_PI,
-                                                         private), 1, 0);
-
-           if (INTERNAL_SYSCALL_ERROR_P (e, __err)
-               && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
-                   || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
+           int e = futex_lock_pi ((unsigned int *) &mutex->__data.__lock,
+                                  NULL, private);
+           if (e == ESRCH || e == EDEADLK)
              {
-               assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
+               assert (e != EDEADLK
                        || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
                            && kind != PTHREAD_MUTEX_RECURSIVE_NP));
                /* ESRCH can happen only for non-robust PI mutexes where
                   the owner of the lock died.  */
-               assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);
+               assert (e != ESRCH || !robust);
 
                /* Delay the thread indefinitely.  */
                while (1)
@@ -479,11 +474,8 @@ __pthread_mutex_lock_full (pthread_mutex_t *mutex)
            /* This mutex is now not recoverable.  */
            mutex->__data.__count = 0;
 
-           INTERNAL_SYSCALL_DECL (__err);
-           INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-                             __lll_private_flag (FUTEX_UNLOCK_PI,
-                                                 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
-                             0, 0);
+           futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
+                            PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
 
            /* To the kernel, this will be visible after the kernel has
               acquired the mutex in the syscall.  */
index 76b93bd..eb4baae 100644 (file)
@@ -25,6 +25,7 @@
 #include <atomic.h>
 #include <lowlevellock.h>
 #include <not-cancel.h>
+#include <futex-internal.h>
 
 #include <stap-probe.h>
 
@@ -377,39 +378,29 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
            int private = (robust
                           ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
                           : PTHREAD_MUTEX_PSHARED (mutex));
-           INTERNAL_SYSCALL_DECL (__err);
-
-           int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-                                     __lll_private_flag (FUTEX_LOCK_PI,
-                                                         private), 1,
-                                     abstime);
-           if (INTERNAL_SYSCALL_ERROR_P (e, __err))
+           int e = futex_lock_pi ((unsigned int *) &mutex->__data.__lock,
+                                  abstime, private);
+           if (e == ETIMEDOUT)
+             return ETIMEDOUT;
+           else if (e == ESRCH || e == EDEADLK)
              {
-               if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
-                 return ETIMEDOUT;
-
-               if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
-                   || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
-                 {
-                   assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
-                           || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
-                               && kind != PTHREAD_MUTEX_RECURSIVE_NP));
-                   /* ESRCH can happen only for non-robust PI mutexes where
-                      the owner of the lock died.  */
-                   assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
-                           || !robust);
-
-                   /* Delay the thread until the timeout is reached.
-                      Then return ETIMEDOUT.  */
-                   do
-                     e = lll_timedwait (&(int){0}, 0, clockid, abstime,
-                                        private);
-                   while (e != ETIMEDOUT);
-                   return ETIMEDOUT;
-                 }
-
-               return INTERNAL_SYSCALL_ERRNO (e, __err);
+               assert (e != EDEADLK
+                       || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
+                          && kind != PTHREAD_MUTEX_RECURSIVE_NP));
+               /* ESRCH can happen only for non-robust PI mutexes where
+                  the owner of the lock died.  */
+               assert (e != ESRCH || !robust);
+
+               /* Delay the thread until the timeout is reached. Then return
+                  ETIMEDOUT.  */
+               do
+                 e = lll_timedwait (&(int){0}, 0, clockid, abstime,
+                                    private);
+               while (e != ETIMEDOUT);
+               return ETIMEDOUT;
              }
+           else if (e != 0)
+             return e;
 
            oldval = mutex->__data.__lock;
 
@@ -447,11 +438,8 @@ __pthread_mutex_clocklock_common (pthread_mutex_t *mutex,
            /* This mutex is now not recoverable.  */
            mutex->__data.__count = 0;
 
-           INTERNAL_SYSCALL_DECL (__err);
-           INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-                             __lll_private_flag (FUTEX_UNLOCK_PI,
-                                                 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
-                             0, 0);
+           futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
+                            PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
 
            /* To the kernel, this will be visible after the kernel has
               acquired the mutex in the syscall.  */
index 87e87c0..d24bb58 100644 (file)
@@ -21,6 +21,7 @@
 #include <stdlib.h>
 #include "pthreadP.h"
 #include <lowlevellock.h>
+#include <futex-internal.h>
 
 #ifndef lll_trylock_elision
 #define lll_trylock_elision(a,t) lll_trylock(a)
@@ -346,11 +347,8 @@ __pthread_mutex_trylock (pthread_mutex_t *mutex)
            /* This mutex is now not recoverable.  */
            mutex->__data.__count = 0;
 
-           INTERNAL_SYSCALL_DECL (__err);
-           INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
-                             __lll_private_flag (FUTEX_UNLOCK_PI,
-                                                 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
-                             0, 0);
+           futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
+                            PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
 
            /* To the kernel, this will be visible after the kernel has
               acquired the mutex in the syscall.  */
index 71038f9..53f8b86 100644 (file)
@@ -22,6 +22,7 @@
 #include "pthreadP.h"
 #include <lowlevellock.h>
 #include <stap-probe.h>
+#include <futex-internal.h>
 
 #ifndef lll_unlock_elision
 #define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; })
@@ -277,9 +278,8 @@ __pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr)
          if (((l & FUTEX_WAITERS) != 0)
              || (l != THREAD_GETMEM (THREAD_SELF, tid)))
            {
-             INTERNAL_SYSCALL_DECL (__err);
-             INTERNAL_SYSCALL (futex, __err, 2, &mutex->__data.__lock,
-                               __lll_private_flag (FUTEX_UNLOCK_PI, private));
+             futex_unlock_pi ((unsigned int *) &mutex->__data.__lock,
+                              private);
              break;
            }
        }
index 7692146..4feb772 100644 (file)
@@ -381,4 +381,90 @@ futex_wake (unsigned int* futex_word, int processes_to_wake, int private)
     }
 }
 
+/* The operation checks the value of the futex, if the value is 0, then
+   it is atomically set to the caller's thread ID.  If the futex value is
+   nonzero, it is atomically sets the FUTEX_WAITERS bit, which signals wrt
+   other futex owner that it cannot unlock the futex in user space by
+   atomically by setting its value to 0.
+
+   If more than one wait operations is issued, the enqueueing of the waiters
+   are done in descending priority order.
+
+   The ABSTIME arguments provides an absolute timeout (measured against the
+   CLOCK_REALTIME clock).  If TIMEOUT is NULL, the operation will block
+   indefinitely.
+
+   Returns:
+
+     - 0 if woken by a PI unlock operation or spuriously.
+     - EAGAIN if the futex owner thread ID is about to exit, but has not yet
+       handled the state cleanup.
+     - EDEADLK if the futex is already locked by the caller.
+     - ESRCH if the thread ID int he futex does not exist.
+     - EINVAL is the state is corrupted or if there is a waiter on the
+       futex.
+     - ETIMEDOUT if the ABSTIME expires.
+*/
+static __always_inline int
+futex_lock_pi (unsigned int *futex_word, const struct timespec *abstime,
+              int private)
+{
+  int err = lll_futex_timed_lock_pi (futex_word, abstime, private);
+  switch (err)
+    {
+    case 0:
+    case -EAGAIN:
+    case -EINTR:
+    case -ETIMEDOUT:
+    case -ESRCH:
+    case -EDEADLK:
+    case -EINVAL: /* This indicates either state corruption or that the kernel
+                    found a waiter on futex address which is waiting via
+                    FUTEX_WAIT or FUTEX_WAIT_BITSET.  This is reported on
+                    some futex_lock_pi usage (pthread_mutex_timedlock for
+                    instance).  */
+      return -err;
+
+    case -EFAULT: /* Must have been caused by a glibc or application bug.  */
+    case -ENOSYS: /* Must have been caused by a glibc bug.  */
+    /* No other errors are documented at this time.  */
+    default:
+      futex_fatal_error ();
+    }
+}
+
+/* Wakes the top priority waiter that called a futex_lock_pi operation on
+   the futex.
+
+   Returns the same values as futex_lock_pi under those same conditions;
+   additionally, returns EPERM when the caller is not allowed to attach
+   itself to the futex.  */
+static __always_inline int
+futex_unlock_pi (unsigned int *futex_word, int private)
+{
+  int err = lll_futex_timed_unlock_pi (futex_word, private);
+  switch (err)
+    {
+    case 0:
+    case -EAGAIN:
+    case -EINTR:
+    case -ETIMEDOUT:
+    case -ESRCH:
+    case -EDEADLK:
+    case -ENOSYS:
+    case -EPERM:  /*  The caller is not allowed to attach itself to the futex.
+                     Used to check if PI futexes are supported by the
+                     kernel.  */
+      return -err;
+
+    case -EINVAL: /* Either due to wrong alignment or due to the timeout not
+                    being normalized.  Must have been caused by a glibc or
+                    application bug.  */
+    case -EFAULT: /* Must have been caused by a glibc or application bug.  */
+    /* No other errors are documented at this time.  */
+    default:
+      futex_fatal_error ();
+    }
+}
+
 #endif  /* futex-internal.h */
index 392277e..ff0fd4e 100644 (file)
 
 
 /* Priority Inheritance support.  */
+#define lll_futex_timed_lock_pi(futexp, abstime, private)              \
+  lll_futex_syscall (4, futexp,                                                \
+                    __lll_private_flag (FUTEX_LOCK_PI, private),       \
+                    0, abstime)
+
+#define lll_futex_timed_unlock_pi(futexp, private)                     \
+  lll_futex_syscall (4, futexp,                                                \
+                    __lll_private_flag (FUTEX_UNLOCK_PI, private),     \
+                    0, 0)
 
 /* Like lll_futex_wait (FUTEXP, VAL, PRIVATE) but with the expectation
    that lll_futex_cmp_requeue_pi (FUTEXP, _, _, MUTEX, _, PRIVATE) will