Revert "sysdeps/ieee754/ldbl-128ibm/e_expl.c"
[platform/upstream/glibc.git] / nptl / pthread_mutex_trylock.c
1 /* Copyright (C) 2002-2013 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, see
17    <http://www.gnu.org/licenses/>.  */
18
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include "pthreadP.h"
23 #include <lowlevellock.h>
24
25
26 int
27 __pthread_mutex_trylock (mutex)
28      pthread_mutex_t *mutex;
29 {
30   int oldval;
31   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
32
33   switch (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex),
34                             PTHREAD_MUTEX_TIMED_NP))
35     {
36       /* Recursive mutex.  */
37     case PTHREAD_MUTEX_RECURSIVE_NP:
38       /* Check whether we already hold the mutex.  */
39       if (mutex->__data.__owner == id)
40         {
41           /* Just bump the counter.  */
42           if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
43             /* Overflow of the counter.  */
44             return EAGAIN;
45
46           ++mutex->__data.__count;
47           return 0;
48         }
49
50       if (lll_trylock (mutex->__data.__lock) == 0)
51         {
52           /* Record the ownership.  */
53           mutex->__data.__owner = id;
54           mutex->__data.__count = 1;
55           ++mutex->__data.__nusers;
56           return 0;
57         }
58       break;
59
60     case PTHREAD_MUTEX_ERRORCHECK_NP:
61     case PTHREAD_MUTEX_TIMED_NP:
62     case PTHREAD_MUTEX_ADAPTIVE_NP:
63       /* Normal mutex.  */
64       if (lll_trylock (mutex->__data.__lock) != 0)
65         break;
66
67       /* Record the ownership.  */
68       mutex->__data.__owner = id;
69       ++mutex->__data.__nusers;
70
71       return 0;
72
73     case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
74     case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
75     case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
76     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
77       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
78                      &mutex->__data.__list.__next);
79
80       oldval = mutex->__data.__lock;
81       do
82         {
83         again:
84           if ((oldval & FUTEX_OWNER_DIED) != 0)
85             {
86               /* The previous owner died.  Try locking the mutex.  */
87               int newval = id | (oldval & FUTEX_WAITERS);
88
89               newval
90                 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
91                                                        newval, oldval);
92
93               if (newval != oldval)
94                 {
95                   oldval = newval;
96                   goto again;
97                 }
98
99               /* We got the mutex.  */
100               mutex->__data.__count = 1;
101               /* But it is inconsistent unless marked otherwise.  */
102               mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
103
104               ENQUEUE_MUTEX (mutex);
105               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
106
107               /* Note that we deliberately exist here.  If we fall
108                  through to the end of the function __nusers would be
109                  incremented which is not correct because the old
110                  owner has to be discounted.  */
111               return EOWNERDEAD;
112             }
113
114           /* Check whether we already hold the mutex.  */
115           if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
116             {
117               int kind = PTHREAD_MUTEX_TYPE (mutex);
118               if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
119                 {
120                   THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
121                                  NULL);
122                   return EDEADLK;
123                 }
124
125               if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
126                 {
127                   THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
128                                  NULL);
129
130                   /* Just bump the counter.  */
131                   if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
132                     /* Overflow of the counter.  */
133                     return EAGAIN;
134
135                   ++mutex->__data.__count;
136
137                   return 0;
138                 }
139             }
140
141           oldval = lll_robust_trylock (mutex->__data.__lock, id);
142           if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
143             {
144               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
145
146               return EBUSY;
147             }
148
149           if (__builtin_expect (mutex->__data.__owner
150                                 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
151             {
152               /* This mutex is now not recoverable.  */
153               mutex->__data.__count = 0;
154               if (oldval == id)
155                 lll_unlock (mutex->__data.__lock,
156                             PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
157               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
158               return ENOTRECOVERABLE;
159             }
160         }
161       while ((oldval & FUTEX_OWNER_DIED) != 0);
162
163       ENQUEUE_MUTEX (mutex);
164       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
165
166       mutex->__data.__owner = id;
167       ++mutex->__data.__nusers;
168       mutex->__data.__count = 1;
169
170       return 0;
171
172     case PTHREAD_MUTEX_PI_RECURSIVE_NP:
173     case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
174     case PTHREAD_MUTEX_PI_NORMAL_NP:
175     case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
176     case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
177     case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
178     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
179     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
180       {
181         int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
182         int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
183
184         if (robust)
185           /* Note: robust PI futexes are signaled by setting bit 0.  */
186           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
187                          (void *) (((uintptr_t) &mutex->__data.__list.__next)
188                                    | 1));
189
190         oldval = mutex->__data.__lock;
191
192         /* Check whether we already hold the mutex.  */
193         if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
194           {
195             if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
196               {
197                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
198                 return EDEADLK;
199               }
200
201             if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
202               {
203                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
204
205                 /* Just bump the counter.  */
206                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
207                   /* Overflow of the counter.  */
208                   return EAGAIN;
209
210                 ++mutex->__data.__count;
211
212                 return 0;
213               }
214           }
215
216         oldval
217           = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
218                                                  id, 0);
219
220         if (oldval != 0)
221           {
222             if ((oldval & FUTEX_OWNER_DIED) == 0)
223               {
224                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
225
226                 return EBUSY;
227               }
228
229             assert (robust);
230
231             /* The mutex owner died.  The kernel will now take care of
232                everything.  */
233             int private = (robust
234                            ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
235                            : PTHREAD_MUTEX_PSHARED (mutex));
236             INTERNAL_SYSCALL_DECL (__err);
237             int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
238                                       __lll_private_flag (FUTEX_TRYLOCK_PI,
239                                                           private), 0, 0);
240
241             if (INTERNAL_SYSCALL_ERROR_P (e, __err)
242                 && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
243               {
244                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
245
246                 return EBUSY;
247               }
248
249             oldval = mutex->__data.__lock;
250           }
251
252         if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
253           {
254             atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
255
256             /* We got the mutex.  */
257             mutex->__data.__count = 1;
258             /* But it is inconsistent unless marked otherwise.  */
259             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
260
261             ENQUEUE_MUTEX (mutex);
262             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
263
264             /* Note that we deliberately exit here.  If we fall
265                through to the end of the function __nusers would be
266                incremented which is not correct because the old owner
267                has to be discounted.  */
268             return EOWNERDEAD;
269           }
270
271         if (robust
272             && __builtin_expect (mutex->__data.__owner
273                                  == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
274           {
275             /* This mutex is now not recoverable.  */
276             mutex->__data.__count = 0;
277
278             INTERNAL_SYSCALL_DECL (__err);
279             INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
280                               __lll_private_flag (FUTEX_UNLOCK_PI,
281                                                   PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
282                               0, 0);
283
284             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
285             return ENOTRECOVERABLE;
286           }
287
288         if (robust)
289           {
290             ENQUEUE_MUTEX_PI (mutex);
291             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
292           }
293
294         mutex->__data.__owner = id;
295         ++mutex->__data.__nusers;
296         mutex->__data.__count = 1;
297
298         return 0;
299       }
300
301     case PTHREAD_MUTEX_PP_RECURSIVE_NP:
302     case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
303     case PTHREAD_MUTEX_PP_NORMAL_NP:
304     case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
305       {
306         int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
307
308         oldval = mutex->__data.__lock;
309
310         /* Check whether we already hold the mutex.  */
311         if (mutex->__data.__owner == id)
312           {
313             if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
314               return EDEADLK;
315
316             if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
317               {
318                 /* Just bump the counter.  */
319                 if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
320                   /* Overflow of the counter.  */
321                   return EAGAIN;
322
323                 ++mutex->__data.__count;
324
325                 return 0;
326               }
327           }
328
329         int oldprio = -1, ceilval;
330         do
331           {
332             int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
333                           >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
334
335             if (__pthread_current_priority () > ceiling)
336               {
337                 if (oldprio != -1)
338                   __pthread_tpp_change_priority (oldprio, -1);
339                 return EINVAL;
340               }
341
342             int retval = __pthread_tpp_change_priority (oldprio, ceiling);
343             if (retval)
344               return retval;
345
346             ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
347             oldprio = ceiling;
348
349             oldval
350               = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
351                                                      ceilval | 1, ceilval);
352
353             if (oldval == ceilval)
354               break;
355           }
356         while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
357
358         if (oldval != ceilval)
359           {
360             __pthread_tpp_change_priority (oldprio, -1);
361             break;
362           }
363
364         assert (mutex->__data.__owner == 0);
365         /* Record the ownership.  */
366         mutex->__data.__owner = id;
367         ++mutex->__data.__nusers;
368         mutex->__data.__count = 1;
369
370         return 0;
371       }
372       break;
373
374     default:
375       /* Correct code cannot set any other type.  */
376       return EINVAL;
377     }
378
379   return EBUSY;
380 }
381 strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock)