1 /* Copyright (C) 2002-2019 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
22 #include <sys/param.h>
26 #include <lowlevellock.h>
27 #include <not-cancel.h>
29 #include <stap-probe.h>
31 #ifndef lll_clocklock_elision
32 #define lll_clocklock_elision(futex, adapt_count, clockid, abstime, private) \
33 lll_clocklock (futex, clockid, abstime, private)
36 #ifndef lll_trylock_elision
37 #define lll_trylock_elision(a,t) lll_trylock(a)
41 #define FORCE_ELISION(m, s)
45 __pthread_mutex_timedlock (pthread_mutex_t *mutex,
46 const struct timespec *abstime)
49 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
52 LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime);
54 /* We must not check ABSTIME here. If the thread does not block
55 abstime must not be checked for a valid value. */
57 /* See concurrency notes regarding mutex type which is loaded from __kind
58 in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */
59 switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
60 PTHREAD_MUTEX_TIMED_NP))
62 /* Recursive mutex. */
63 case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
64 case PTHREAD_MUTEX_RECURSIVE_NP:
65 /* Check whether we already hold the mutex. */
66 if (mutex->__data.__owner == id)
68 /* Just bump the counter. */
69 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
70 /* Overflow of the counter. */
73 ++mutex->__data.__count;
78 /* We have to get the mutex. */
79 result = lll_clocklock (mutex->__data.__lock, CLOCK_REALTIME, abstime,
80 PTHREAD_MUTEX_PSHARED (mutex));
85 /* Only locked once so far. */
86 mutex->__data.__count = 1;
89 /* Error checking mutex. */
90 case PTHREAD_MUTEX_ERRORCHECK_NP:
91 /* Check whether we already hold the mutex. */
92 if (__glibc_unlikely (mutex->__data.__owner == id))
95 /* Don't do lock elision on an error checking mutex. */
98 case PTHREAD_MUTEX_TIMED_NP:
99 FORCE_ELISION (mutex, goto elision);
102 result = lll_clocklock (mutex->__data.__lock, CLOCK_REALTIME, abstime,
103 PTHREAD_MUTEX_PSHARED (mutex));
106 case PTHREAD_MUTEX_TIMED_ELISION_NP:
107 elision: __attribute__((unused))
108 /* Don't record ownership */
109 return lll_clocklock_elision (mutex->__data.__lock,
110 mutex->__data.__spins,
111 CLOCK_REALTIME, abstime,
112 PTHREAD_MUTEX_PSHARED (mutex));
115 case PTHREAD_MUTEX_ADAPTIVE_NP:
119 if (lll_trylock (mutex->__data.__lock) != 0)
122 int max_cnt = MIN (max_adaptive_count (),
123 mutex->__data.__spins * 2 + 10);
126 if (cnt++ >= max_cnt)
128 result = lll_clocklock (mutex->__data.__lock,
129 CLOCK_REALTIME, abstime,
130 PTHREAD_MUTEX_PSHARED (mutex));
135 while (lll_trylock (mutex->__data.__lock) != 0);
137 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
141 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
142 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
143 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
144 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
145 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
146 &mutex->__data.__list.__next);
147 /* We need to set op_pending before starting the operation. Also
148 see comments at ENQUEUE_MUTEX. */
149 __asm ("" ::: "memory");
151 oldval = mutex->__data.__lock;
152 /* This is set to FUTEX_WAITERS iff we might have shared the
153 FUTEX_WAITERS flag with other threads, and therefore need to keep it
154 set to avoid lost wake-ups. We have the same requirement in the
155 simple mutex algorithm. */
156 unsigned int assume_other_futex_waiters = 0;
159 /* Try to acquire the lock through a CAS from 0 (not acquired) to
160 our TID | assume_other_futex_waiters. */
161 if (__glibc_likely (oldval == 0))
164 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
165 id | assume_other_futex_waiters, 0);
166 if (__glibc_likely (oldval == 0))
170 if ((oldval & FUTEX_OWNER_DIED) != 0)
172 /* The previous owner died. Try locking the mutex. */
173 int newval = id | (oldval & FUTEX_WAITERS)
174 | assume_other_futex_waiters;
177 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
179 if (newval != oldval)
185 /* We got the mutex. */
186 mutex->__data.__count = 1;
187 /* But it is inconsistent unless marked otherwise. */
188 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
190 /* We must not enqueue the mutex before we have acquired it.
191 Also see comments at ENQUEUE_MUTEX. */
192 __asm ("" ::: "memory");
193 ENQUEUE_MUTEX (mutex);
194 /* We need to clear op_pending after we enqueue the mutex. */
195 __asm ("" ::: "memory");
196 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
198 /* Note that we deliberately exit here. If we fall
199 through to the end of the function __nusers would be
200 incremented which is not correct because the old
201 owner has to be discounted. */
205 /* Check whether we already hold the mutex. */
206 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
208 int kind = PTHREAD_MUTEX_TYPE (mutex);
209 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
211 /* We do not need to ensure ordering wrt another memory
212 access. Also see comments at ENQUEUE_MUTEX. */
213 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
218 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
220 /* We do not need to ensure ordering wrt another memory
222 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
225 /* Just bump the counter. */
226 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
227 /* Overflow of the counter. */
230 ++mutex->__data.__count;
232 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
238 /* We are about to block; check whether the timeout is invalid. */
239 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
241 /* Work around the fact that the kernel rejects negative timeout
242 values despite them being valid. */
243 if (__glibc_unlikely (abstime->tv_sec < 0))
246 /* We cannot acquire the mutex nor has its owner died. Thus, try
247 to block using futexes. Set FUTEX_WAITERS if necessary so that
248 other threads are aware that there are potentially threads
249 blocked on the futex. Restart if oldval changed in the
251 if ((oldval & FUTEX_WAITERS) == 0)
253 if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock,
254 oldval | FUTEX_WAITERS,
258 oldval = mutex->__data.__lock;
261 oldval |= FUTEX_WAITERS;
264 /* It is now possible that we share the FUTEX_WAITERS flag with
265 another thread; therefore, update assume_other_futex_waiters so
266 that we do not forget about this when handling other cases
267 above and thus do not cause lost wake-ups. */
268 assume_other_futex_waiters |= FUTEX_WAITERS;
270 /* Block using the futex. */
271 int err = lll_futex_clock_wait_bitset (&mutex->__data.__lock,
272 oldval, CLOCK_REALTIME, abstime,
273 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
274 /* The futex call timed out. */
275 if (err == -ETIMEDOUT)
277 /* Reload current lock value. */
278 oldval = mutex->__data.__lock;
281 /* We have acquired the mutex; check if it is still consistent. */
282 if (__builtin_expect (mutex->__data.__owner
283 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
285 /* This mutex is now not recoverable. */
286 mutex->__data.__count = 0;
287 int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex);
288 lll_unlock (mutex->__data.__lock, private);
289 /* FIXME This violates the mutex destruction requirements. See
290 __pthread_mutex_unlock_full. */
291 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
292 return ENOTRECOVERABLE;
295 mutex->__data.__count = 1;
296 /* We must not enqueue the mutex before we have acquired it.
297 Also see comments at ENQUEUE_MUTEX. */
298 __asm ("" ::: "memory");
299 ENQUEUE_MUTEX (mutex);
300 /* We need to clear op_pending after we enqueue the mutex. */
301 __asm ("" ::: "memory");
302 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
305 /* The PI support requires the Linux futex system call. If that's not
306 available, pthread_mutex_init should never have allowed the type to
307 be set. So it will get the default case for an invalid type. */
309 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
310 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
311 case PTHREAD_MUTEX_PI_NORMAL_NP:
312 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
313 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
314 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
315 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
316 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
320 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
321 in sysdeps/nptl/bits/thread-shared-types.h. */
322 int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind));
323 kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP;
324 robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
329 /* Note: robust PI futexes are signaled by setting bit 0. */
330 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
331 (void *) (((uintptr_t) &mutex->__data.__list.__next)
333 /* We need to set op_pending before starting the operation. Also
334 see comments at ENQUEUE_MUTEX. */
335 __asm ("" ::: "memory");
338 oldval = mutex->__data.__lock;
340 /* Check whether we already hold the mutex. */
341 if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
343 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
345 /* We do not need to ensure ordering wrt another memory
347 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
351 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
353 /* We do not need to ensure ordering wrt another memory
355 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
357 /* Just bump the counter. */
358 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
359 /* Overflow of the counter. */
362 ++mutex->__data.__count;
364 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
370 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
375 /* The mutex is locked. The kernel will now take care of
376 everything. The timeout value must be a relative value.
378 int private = (robust
379 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
380 : PTHREAD_MUTEX_PSHARED (mutex));
381 INTERNAL_SYSCALL_DECL (__err);
383 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
384 __lll_private_flag (FUTEX_LOCK_PI,
387 if (INTERNAL_SYSCALL_ERROR_P (e, __err))
389 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
392 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
393 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
395 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
396 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
397 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
398 /* ESRCH can happen only for non-robust PI mutexes where
399 the owner of the lock died. */
400 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
403 /* Delay the thread until the timeout is reached.
404 Then return ETIMEDOUT. */
405 struct timespec reltime;
408 INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
410 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
411 reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
412 if (reltime.tv_nsec < 0)
414 reltime.tv_nsec += 1000000000;
417 if (reltime.tv_sec >= 0)
418 while (__nanosleep_nocancel (&reltime, &reltime) != 0)
424 return INTERNAL_SYSCALL_ERRNO (e, __err);
427 oldval = mutex->__data.__lock;
429 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
432 if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
434 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
436 /* We got the mutex. */
437 mutex->__data.__count = 1;
438 /* But it is inconsistent unless marked otherwise. */
439 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
441 /* We must not enqueue the mutex before we have acquired it.
442 Also see comments at ENQUEUE_MUTEX. */
443 __asm ("" ::: "memory");
444 ENQUEUE_MUTEX_PI (mutex);
445 /* We need to clear op_pending after we enqueue the mutex. */
446 __asm ("" ::: "memory");
447 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
449 /* Note that we deliberately exit here. If we fall
450 through to the end of the function __nusers would be
451 incremented which is not correct because the old owner
452 has to be discounted. */
457 && __builtin_expect (mutex->__data.__owner
458 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
460 /* This mutex is now not recoverable. */
461 mutex->__data.__count = 0;
463 INTERNAL_SYSCALL_DECL (__err);
464 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
465 __lll_private_flag (FUTEX_UNLOCK_PI,
466 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
469 /* To the kernel, this will be visible after the kernel has
470 acquired the mutex in the syscall. */
471 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
472 return ENOTRECOVERABLE;
475 mutex->__data.__count = 1;
478 /* We must not enqueue the mutex before we have acquired it.
479 Also see comments at ENQUEUE_MUTEX. */
480 __asm ("" ::: "memory");
481 ENQUEUE_MUTEX_PI (mutex);
482 /* We need to clear op_pending after we enqueue the mutex. */
483 __asm ("" ::: "memory");
484 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
488 #endif /* __NR_futex. */
490 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
491 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
492 case PTHREAD_MUTEX_PP_NORMAL_NP:
493 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
495 /* See concurrency notes regarding __kind in struct __pthread_mutex_s
496 in sysdeps/nptl/bits/thread-shared-types.h. */
497 int kind = atomic_load_relaxed (&(mutex->__data.__kind))
498 & PTHREAD_MUTEX_KIND_MASK_NP;
500 oldval = mutex->__data.__lock;
502 /* Check whether we already hold the mutex. */
503 if (mutex->__data.__owner == id)
505 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
508 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
510 /* Just bump the counter. */
511 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
512 /* Overflow of the counter. */
515 ++mutex->__data.__count;
517 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
523 int oldprio = -1, ceilval;
526 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
527 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
529 if (__pthread_current_priority () > ceiling)
534 __pthread_tpp_change_priority (oldprio, -1);
538 result = __pthread_tpp_change_priority (oldprio, ceiling);
542 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
546 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
547 ceilval | 1, ceilval);
549 if (oldval == ceilval)
555 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
559 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
562 if (oldval != ceilval)
564 /* Reject invalid timeouts. */
565 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
574 /* Get the current time. */
575 (void) __gettimeofday (&tv, NULL);
577 /* Compute relative timeout. */
578 rt.tv_sec = abstime->tv_sec - tv.tv_sec;
579 rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
582 rt.tv_nsec += 1000000000;
586 /* Already timed out? */
593 lll_futex_timed_wait (&mutex->__data.__lock,
595 PTHREAD_MUTEX_PSHARED (mutex));
598 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
599 ceilval | 2, ceilval)
602 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
604 assert (mutex->__data.__owner == 0);
605 mutex->__data.__count = 1;
610 /* Correct code cannot set any other type. */
616 /* Record the ownership. */
617 mutex->__data.__owner = id;
618 ++mutex->__data.__nusers;
620 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
626 weak_alias (__pthread_mutex_timedlock, pthread_mutex_timedlock)