1 /* Copyright (C) 2002-2012 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
23 #include <lowlevellock.h>
24 #include <not-cancel.h>
26 #include <stap-probe.h>
30 pthread_mutex_timedlock (mutex, abstime)
31 pthread_mutex_t *mutex;
32 const struct timespec *abstime;
35 pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
38 LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime);
40 /* We must not check ABSTIME here. If the thread does not block
41 abstime must not be checked for a valid value. */
43 switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
44 PTHREAD_MUTEX_TIMED_NP))
46 /* Recursive mutex. */
47 case PTHREAD_MUTEX_RECURSIVE_NP:
48 /* Check whether we already hold the mutex. */
49 if (mutex->__data.__owner == id)
51 /* Just bump the counter. */
52 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
53 /* Overflow of the counter. */
56 ++mutex->__data.__count;
61 /* We have to get the mutex. */
62 result = lll_timedlock (mutex->__data.__lock, abstime,
63 PTHREAD_MUTEX_PSHARED (mutex));
68 /* Only locked once so far. */
69 mutex->__data.__count = 1;
72 /* Error checking mutex. */
73 case PTHREAD_MUTEX_ERRORCHECK_NP:
74 /* Check whether we already hold the mutex. */
75 if (__builtin_expect (mutex->__data.__owner == id, 0))
80 case PTHREAD_MUTEX_TIMED_NP:
83 result = lll_timedlock (mutex->__data.__lock, abstime,
84 PTHREAD_MUTEX_PSHARED (mutex));
87 case PTHREAD_MUTEX_ADAPTIVE_NP:
91 if (lll_trylock (mutex->__data.__lock) != 0)
94 int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
95 mutex->__data.__spins * 2 + 10);
100 result = lll_timedlock (mutex->__data.__lock, abstime,
101 PTHREAD_MUTEX_PSHARED (mutex));
109 while (lll_trylock (mutex->__data.__lock) != 0);
111 mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
115 case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
116 case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
117 case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
118 case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
119 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
120 &mutex->__data.__list.__next);
122 oldval = mutex->__data.__lock;
126 if ((oldval & FUTEX_OWNER_DIED) != 0)
128 /* The previous owner died. Try locking the mutex. */
129 int newval = id | (oldval & FUTEX_WAITERS);
132 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
134 if (newval != oldval)
140 /* We got the mutex. */
141 mutex->__data.__count = 1;
142 /* But it is inconsistent unless marked otherwise. */
143 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
145 ENQUEUE_MUTEX (mutex);
146 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
148 /* Note that we deliberately exit here. If we fall
149 through to the end of the function __nusers would be
150 incremented which is not correct because the old
151 owner has to be discounted. */
155 /* Check whether we already hold the mutex. */
156 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
158 int kind = PTHREAD_MUTEX_TYPE (mutex);
159 if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
161 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
166 if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
168 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
171 /* Just bump the counter. */
172 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
173 /* Overflow of the counter. */
176 ++mutex->__data.__count;
178 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
184 result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
185 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
187 if (__builtin_expect (mutex->__data.__owner
188 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
190 /* This mutex is now not recoverable. */
191 mutex->__data.__count = 0;
192 lll_unlock (mutex->__data.__lock,
193 PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
194 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
195 return ENOTRECOVERABLE;
198 if (result == ETIMEDOUT || result == EINVAL)
203 while ((oldval & FUTEX_OWNER_DIED) != 0);
205 mutex->__data.__count = 1;
206 ENQUEUE_MUTEX (mutex);
207 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
210 case PTHREAD_MUTEX_PI_RECURSIVE_NP:
211 case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
212 case PTHREAD_MUTEX_PI_NORMAL_NP:
213 case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
214 case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
215 case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
216 case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
217 case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
219 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
220 int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
223 /* Note: robust PI futexes are signaled by setting bit 0. */
224 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
225 (void *) (((uintptr_t) &mutex->__data.__list.__next)
228 oldval = mutex->__data.__lock;
230 /* Check whether we already hold the mutex. */
231 if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
233 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
235 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
239 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
241 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
243 /* Just bump the counter. */
244 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
245 /* Overflow of the counter. */
248 ++mutex->__data.__count;
250 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
256 oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
261 /* The mutex is locked. The kernel will now take care of
262 everything. The timeout value must be a relative value.
264 int private = (robust
265 ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
266 : PTHREAD_MUTEX_PSHARED (mutex));
267 INTERNAL_SYSCALL_DECL (__err);
269 int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
270 __lll_private_flag (FUTEX_LOCK_PI,
273 if (INTERNAL_SYSCALL_ERROR_P (e, __err))
275 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
278 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
279 || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
281 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
282 || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
283 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
284 /* ESRCH can happen only for non-robust PI mutexes where
285 the owner of the lock died. */
286 assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
289 /* Delay the thread until the timeout is reached.
290 Then return ETIMEDOUT. */
291 struct timespec reltime;
294 INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
296 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
297 reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
298 if (reltime.tv_nsec < 0)
300 reltime.tv_nsec += 1000000000;
303 if (reltime.tv_sec >= 0)
304 while (nanosleep_not_cancel (&reltime, &reltime) != 0)
310 return INTERNAL_SYSCALL_ERRNO (e, __err);
313 oldval = mutex->__data.__lock;
315 assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
318 if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
320 atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
322 /* We got the mutex. */
323 mutex->__data.__count = 1;
324 /* But it is inconsistent unless marked otherwise. */
325 mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
327 ENQUEUE_MUTEX_PI (mutex);
328 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
330 /* Note that we deliberately exit here. If we fall
331 through to the end of the function __nusers would be
332 incremented which is not correct because the old owner
333 has to be discounted. */
338 && __builtin_expect (mutex->__data.__owner
339 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
341 /* This mutex is now not recoverable. */
342 mutex->__data.__count = 0;
344 INTERNAL_SYSCALL_DECL (__err);
345 INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
346 __lll_private_flag (FUTEX_UNLOCK_PI,
347 PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
350 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
351 return ENOTRECOVERABLE;
354 mutex->__data.__count = 1;
357 ENQUEUE_MUTEX_PI (mutex);
358 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
363 case PTHREAD_MUTEX_PP_RECURSIVE_NP:
364 case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
365 case PTHREAD_MUTEX_PP_NORMAL_NP:
366 case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
368 int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
370 oldval = mutex->__data.__lock;
372 /* Check whether we already hold the mutex. */
373 if (mutex->__data.__owner == id)
375 if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
378 if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
380 /* Just bump the counter. */
381 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
382 /* Overflow of the counter. */
385 ++mutex->__data.__count;
387 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
393 int oldprio = -1, ceilval;
396 int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
397 >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
399 if (__pthread_current_priority () > ceiling)
404 __pthread_tpp_change_priority (oldprio, -1);
408 result = __pthread_tpp_change_priority (oldprio, ceiling);
412 ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
416 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
417 ceilval | 1, ceilval);
419 if (oldval == ceilval)
425 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
429 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
432 if (oldval != ceilval)
434 /* Reject invalid timeouts. */
435 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
444 /* Get the current time. */
445 (void) __gettimeofday (&tv, NULL);
447 /* Compute relative timeout. */
448 rt.tv_sec = abstime->tv_sec - tv.tv_sec;
449 rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
452 rt.tv_nsec += 1000000000;
456 /* Already timed out? */
463 lll_futex_timed_wait (&mutex->__data.__lock,
465 PTHREAD_MUTEX_PSHARED (mutex));
468 while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
469 ceilval | 2, ceilval)
472 while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
474 assert (mutex->__data.__owner == 0);
475 mutex->__data.__count = 1;
480 /* Correct code cannot set any other type. */
486 /* Record the ownership. */
487 mutex->__data.__owner = id;
488 ++mutex->__data.__nusers;
490 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);