Add systemtap static probe points in generic and x86_64 pthread code.
[platform/upstream/glibc.git] / nptl / pthread_mutex_timedlock.c
1 /* Copyright (C) 2002-2012 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, see
17    <http://www.gnu.org/licenses/>.  */
18
19 #include <assert.h>
20 #include <errno.h>
21 #include <time.h>
22 #include "pthreadP.h"
23 #include <lowlevellock.h>
24 #include <not-cancel.h>
25
26 #include <stap-probe.h>
27
28
29 int
30 pthread_mutex_timedlock (mutex, abstime)
31      pthread_mutex_t *mutex;
32      const struct timespec *abstime;
33 {
34   int oldval;
35   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
36   int result = 0;
37
38   LIBC_PROBE (mutex_timedlock_entry, 2, mutex, abstime);
39
40   /* We must not check ABSTIME here.  If the thread does not block
41      abstime must not be checked for a valid value.  */
42
43   switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
44                             PTHREAD_MUTEX_TIMED_NP))
45     {
46       /* Recursive mutex.  */
47     case PTHREAD_MUTEX_RECURSIVE_NP:
48       /* Check whether we already hold the mutex.  */
49       if (mutex->__data.__owner == id)
50         {
51           /* Just bump the counter.  */
52           if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
53             /* Overflow of the counter.  */
54             return EAGAIN;
55
56           ++mutex->__data.__count;
57
58           goto out;
59         }
60
61       /* We have to get the mutex.  */
62       result = lll_timedlock (mutex->__data.__lock, abstime,
63                               PTHREAD_MUTEX_PSHARED (mutex));
64
65       if (result != 0)
66         goto out;
67
68       /* Only locked once so far.  */
69       mutex->__data.__count = 1;
70       break;
71
72       /* Error checking mutex.  */
73     case PTHREAD_MUTEX_ERRORCHECK_NP:
74       /* Check whether we already hold the mutex.  */
75       if (__builtin_expect (mutex->__data.__owner == id, 0))
76         return EDEADLK;
77
78       /* FALLTHROUGH */
79
80     case PTHREAD_MUTEX_TIMED_NP:
81     simple:
82       /* Normal mutex.  */
83       result = lll_timedlock (mutex->__data.__lock, abstime,
84                               PTHREAD_MUTEX_PSHARED (mutex));
85       break;
86
87     case PTHREAD_MUTEX_ADAPTIVE_NP:
88       if (! __is_smp)
89         goto simple;
90
91       if (lll_trylock (mutex->__data.__lock) != 0)
92         {
93           int cnt = 0;
94           int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
95                              mutex->__data.__spins * 2 + 10);
96           do
97             {
98               if (cnt++ >= max_cnt)
99                 {
100                   result = lll_timedlock (mutex->__data.__lock, abstime,
101                                           PTHREAD_MUTEX_PSHARED (mutex));
102                   break;
103                 }
104
105 #ifdef BUSY_WAIT_NOP
106               BUSY_WAIT_NOP;
107 #endif
108             }
109           while (lll_trylock (mutex->__data.__lock) != 0);
110
111           mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
112         }
113       break;
114
115     case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
116     case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
117     case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
118     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
119       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
120                      &mutex->__data.__list.__next);
121
122       oldval = mutex->__data.__lock;
123       do
124         {
125         again:
126           if ((oldval & FUTEX_OWNER_DIED) != 0)
127             {
128               /* The previous owner died.  Try locking the mutex.  */
129               int newval = id | (oldval & FUTEX_WAITERS);
130
131               newval
132                 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
133                                                        newval, oldval);
134               if (newval != oldval)
135                 {
136                   oldval = newval;
137                   goto again;
138                 }
139
140               /* We got the mutex.  */
141               mutex->__data.__count = 1;
142               /* But it is inconsistent unless marked otherwise.  */
143               mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
144
145               ENQUEUE_MUTEX (mutex);
146               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
147
148               /* Note that we deliberately exit here.  If we fall
149                  through to the end of the function __nusers would be
150                  incremented which is not correct because the old
151                  owner has to be discounted.  */
152               return EOWNERDEAD;
153             }
154
155           /* Check whether we already hold the mutex.  */
156           if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
157             {
158               int kind = PTHREAD_MUTEX_TYPE (mutex);
159               if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
160                 {
161                   THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
162                                  NULL);
163                   return EDEADLK;
164                 }
165
166               if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
167                 {
168                   THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
169                                  NULL);
170
171                   /* Just bump the counter.  */
172                   if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
173                     /* Overflow of the counter.  */
174                     return EAGAIN;
175
176                   ++mutex->__data.__count;
177
178                   LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
179
180                   return 0;
181                 }
182             }
183
184           result = lll_robust_timedlock (mutex->__data.__lock, abstime, id,
185                                          PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
186
187           if (__builtin_expect (mutex->__data.__owner
188                                 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
189             {
190               /* This mutex is now not recoverable.  */
191               mutex->__data.__count = 0;
192               lll_unlock (mutex->__data.__lock,
193                           PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
194               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
195               return ENOTRECOVERABLE;
196             }
197
198           if (result == ETIMEDOUT || result == EINVAL)
199             goto out;
200
201           oldval = result;
202         }
203       while ((oldval & FUTEX_OWNER_DIED) != 0);
204
205       mutex->__data.__count = 1;
206       ENQUEUE_MUTEX (mutex);
207       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
208       break;
209
210     case PTHREAD_MUTEX_PI_RECURSIVE_NP:
211     case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
212     case PTHREAD_MUTEX_PI_NORMAL_NP:
213     case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
214     case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
215     case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
216     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
217     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
218       {
219         int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
220         int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
221
222         if (robust)
223           /* Note: robust PI futexes are signaled by setting bit 0.  */
224           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
225                          (void *) (((uintptr_t) &mutex->__data.__list.__next)
226                                    | 1));
227
228         oldval = mutex->__data.__lock;
229
230         /* Check whether we already hold the mutex.  */
231         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
232           {
233             if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
234               {
235                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
236                 return EDEADLK;
237               }
238
239             if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
240               {
241                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
242
243                 /* Just bump the counter.  */
244                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
245                   /* Overflow of the counter.  */
246                   return EAGAIN;
247
248                 ++mutex->__data.__count;
249
250                 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
251
252                 return 0;
253               }
254           }
255
256         oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
257                                                       id, 0);
258
259         if (oldval != 0)
260           {
261             /* The mutex is locked.  The kernel will now take care of
262                everything.  The timeout value must be a relative value.
263                Convert it.  */
264             int private = (robust
265                            ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
266                            : PTHREAD_MUTEX_PSHARED (mutex));
267             INTERNAL_SYSCALL_DECL (__err);
268
269             int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
270                                       __lll_private_flag (FUTEX_LOCK_PI,
271                                                           private), 1,
272                                       abstime);
273             if (INTERNAL_SYSCALL_ERROR_P (e, __err))
274               {
275                 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ETIMEDOUT)
276                   return ETIMEDOUT;
277
278                 if (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
279                     || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK)
280                   {
281                     assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
282                             || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
283                                 && kind != PTHREAD_MUTEX_RECURSIVE_NP));
284                     /* ESRCH can happen only for non-robust PI mutexes where
285                        the owner of the lock died.  */
286                     assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH
287                             || !robust);
288
289                     /* Delay the thread until the timeout is reached.
290                        Then return ETIMEDOUT.  */
291                     struct timespec reltime;
292                     struct timespec now;
293
294                     INTERNAL_SYSCALL (clock_gettime, __err, 2, CLOCK_REALTIME,
295                                       &now);
296                     reltime.tv_sec = abstime->tv_sec - now.tv_sec;
297                     reltime.tv_nsec = abstime->tv_nsec - now.tv_nsec;
298                     if (reltime.tv_nsec < 0)
299                       {
300                         reltime.tv_nsec += 1000000000;
301                         --reltime.tv_sec;
302                       }
303                     if (reltime.tv_sec >= 0)
304                       while (nanosleep_not_cancel (&reltime, &reltime) != 0)
305                         continue;
306
307                     return ETIMEDOUT;
308                   }
309
310                 return INTERNAL_SYSCALL_ERRNO (e, __err);
311               }
312
313             oldval = mutex->__data.__lock;
314
315             assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
316           }
317
318         if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
319           {
320             atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
321
322             /* We got the mutex.  */
323             mutex->__data.__count = 1;
324             /* But it is inconsistent unless marked otherwise.  */
325             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
326
327             ENQUEUE_MUTEX_PI (mutex);
328             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
329
330             /* Note that we deliberately exit here.  If we fall
331                through to the end of the function __nusers would be
332                incremented which is not correct because the old owner
333                has to be discounted.  */
334             return EOWNERDEAD;
335           }
336
337         if (robust
338             && __builtin_expect (mutex->__data.__owner
339                                  == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
340           {
341             /* This mutex is now not recoverable.  */
342             mutex->__data.__count = 0;
343
344             INTERNAL_SYSCALL_DECL (__err);
345             INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
346                               __lll_private_flag (FUTEX_UNLOCK_PI,
347                                                   PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
348                               0, 0);
349
350             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
351             return ENOTRECOVERABLE;
352           }
353
354         mutex->__data.__count = 1;
355         if (robust)
356           {
357             ENQUEUE_MUTEX_PI (mutex);
358             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
359           }
360         }
361       break;
362
363     case PTHREAD_MUTEX_PP_RECURSIVE_NP:
364     case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
365     case PTHREAD_MUTEX_PP_NORMAL_NP:
366     case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
367       {
368         int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
369
370         oldval = mutex->__data.__lock;
371
372         /* Check whether we already hold the mutex.  */
373         if (mutex->__data.__owner == id)
374           {
375             if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
376               return EDEADLK;
377
378             if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
379               {
380                 /* Just bump the counter.  */
381                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
382                   /* Overflow of the counter.  */
383                   return EAGAIN;
384
385                 ++mutex->__data.__count;
386
387                 LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
388
389                 return 0;
390               }
391           }
392
393         int oldprio = -1, ceilval;
394         do
395           {
396             int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
397                           >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
398
399             if (__pthread_current_priority () > ceiling)
400               {
401                 result = EINVAL;
402               failpp:
403                 if (oldprio != -1)
404                   __pthread_tpp_change_priority (oldprio, -1);
405                 return result;
406               }
407
408             result = __pthread_tpp_change_priority (oldprio, ceiling);
409             if (result)
410               return result;
411
412             ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
413             oldprio = ceiling;
414
415             oldval
416               = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
417                                                      ceilval | 1, ceilval);
418
419             if (oldval == ceilval)
420               break;
421
422             do
423               {
424                 oldval
425                   = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
426                                                          ceilval | 2,
427                                                          ceilval | 1);
428
429                 if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
430                   break;
431
432                 if (oldval != ceilval)
433                   {
434                     /* Reject invalid timeouts.  */
435                     if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
436                       {
437                         result = EINVAL;
438                         goto failpp;
439                       }
440
441                     struct timeval tv;
442                     struct timespec rt;
443
444                     /* Get the current time.  */
445                     (void) __gettimeofday (&tv, NULL);
446
447                     /* Compute relative timeout.  */
448                     rt.tv_sec = abstime->tv_sec - tv.tv_sec;
449                     rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000;
450                     if (rt.tv_nsec < 0)
451                       {
452                         rt.tv_nsec += 1000000000;
453                         --rt.tv_sec;
454                       }
455
456                     /* Already timed out?  */
457                     if (rt.tv_sec < 0)
458                       {
459                         result = ETIMEDOUT;
460                         goto failpp;
461                       }
462
463                     lll_futex_timed_wait (&mutex->__data.__lock,
464                                           ceilval | 2, &rt,
465                                           PTHREAD_MUTEX_PSHARED (mutex));
466                   }
467               }
468             while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
469                                                         ceilval | 2, ceilval)
470                    != ceilval);
471           }
472         while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
473
474         assert (mutex->__data.__owner == 0);
475         mutex->__data.__count = 1;
476       }
477       break;
478
479     default:
480       /* Correct code cannot set any other type.  */
481       return EINVAL;
482     }
483
484   if (result == 0)
485     {
486       /* Record the ownership.  */
487       mutex->__data.__owner = id;
488       ++mutex->__data.__nusers;
489
490       LIBC_PROBE (mutex_timedlock_acquired, 1, mutex);
491     }
492
493  out:
494   return result;
495 }