iconv: Suppress array out of bounds warning.
[platform/upstream/glibc.git] / nptl / pthread_mutex_trylock.c
1 /* Copyright (C) 2002-2015 Free Software Foundation, Inc.
2    This file is part of the GNU C Library.
3    Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
4
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, see
17    <http://www.gnu.org/licenses/>.  */
18
19 #include <assert.h>
20 #include <errno.h>
21 #include <stdlib.h>
22 #include "pthreadP.h"
23 #include <lowlevellock.h>
24
25 #ifndef lll_trylock_elision
26 #define lll_trylock_elision(a,t) lll_trylock(a)
27 #endif
28
29 #ifndef FORCE_ELISION
30 #define FORCE_ELISION(m, s)
31 #endif
32
33 int
34 __pthread_mutex_trylock (mutex)
35      pthread_mutex_t *mutex;
36 {
37   int oldval;
38   pid_t id = THREAD_GETMEM (THREAD_SELF, tid);
39
40   switch (__builtin_expect (PTHREAD_MUTEX_TYPE_ELISION (mutex),
41                             PTHREAD_MUTEX_TIMED_NP))
42     {
43       /* Recursive mutex.  */
44     case PTHREAD_MUTEX_RECURSIVE_NP|PTHREAD_MUTEX_ELISION_NP:
45     case PTHREAD_MUTEX_RECURSIVE_NP:
46       /* Check whether we already hold the mutex.  */
47       if (mutex->__data.__owner == id)
48         {
49           /* Just bump the counter.  */
50           if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
51             /* Overflow of the counter.  */
52             return EAGAIN;
53
54           ++mutex->__data.__count;
55           return 0;
56         }
57
58       if (lll_trylock (mutex->__data.__lock) == 0)
59         {
60           /* Record the ownership.  */
61           mutex->__data.__owner = id;
62           mutex->__data.__count = 1;
63           ++mutex->__data.__nusers;
64           return 0;
65         }
66       break;
67
68     case PTHREAD_MUTEX_TIMED_ELISION_NP:
69     elision: __attribute__((unused))
70       if (lll_trylock_elision (mutex->__data.__lock,
71                                mutex->__data.__elision) != 0)
72         break;
73       /* Don't record the ownership.  */
74       return 0;
75
76     case PTHREAD_MUTEX_TIMED_NP:
77       FORCE_ELISION (mutex, goto elision);
78       /*FALL THROUGH*/
79     case PTHREAD_MUTEX_ADAPTIVE_NP:
80     case PTHREAD_MUTEX_ERRORCHECK_NP:
81       if (lll_trylock (mutex->__data.__lock) != 0)
82         break;
83
84       /* Record the ownership.  */
85       mutex->__data.__owner = id;
86       ++mutex->__data.__nusers;
87
88       return 0;
89
90     case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
91     case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
92     case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
93     case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
94       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
95                      &mutex->__data.__list.__next);
96
97       oldval = mutex->__data.__lock;
98       do
99         {
100         again:
101           if ((oldval & FUTEX_OWNER_DIED) != 0)
102             {
103               /* The previous owner died.  Try locking the mutex.  */
104               int newval = id | (oldval & FUTEX_WAITERS);
105
106               newval
107                 = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
108                                                        newval, oldval);
109
110               if (newval != oldval)
111                 {
112                   oldval = newval;
113                   goto again;
114                 }
115
116               /* We got the mutex.  */
117               mutex->__data.__count = 1;
118               /* But it is inconsistent unless marked otherwise.  */
119               mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
120
121               ENQUEUE_MUTEX (mutex);
122               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
123
124               /* Note that we deliberately exist here.  If we fall
125                  through to the end of the function __nusers would be
126                  incremented which is not correct because the old
127                  owner has to be discounted.  */
128               return EOWNERDEAD;
129             }
130
131           /* Check whether we already hold the mutex.  */
132           if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
133             {
134               int kind = PTHREAD_MUTEX_TYPE (mutex);
135               if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
136                 {
137                   THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
138                                  NULL);
139                   return EDEADLK;
140                 }
141
142               if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
143                 {
144                   THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
145                                  NULL);
146
147                   /* Just bump the counter.  */
148                   if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
149                     /* Overflow of the counter.  */
150                     return EAGAIN;
151
152                   ++mutex->__data.__count;
153
154                   return 0;
155                 }
156             }
157
158           oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
159                                                         id, 0);
160           if (oldval != 0 && (oldval & FUTEX_OWNER_DIED) == 0)
161             {
162               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
163
164               return EBUSY;
165             }
166
167           if (__builtin_expect (mutex->__data.__owner
168                                 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
169             {
170               /* This mutex is now not recoverable.  */
171               mutex->__data.__count = 0;
172               if (oldval == id)
173                 lll_unlock (mutex->__data.__lock,
174                             PTHREAD_ROBUST_MUTEX_PSHARED (mutex));
175               THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
176               return ENOTRECOVERABLE;
177             }
178         }
179       while ((oldval & FUTEX_OWNER_DIED) != 0);
180
181       ENQUEUE_MUTEX (mutex);
182       THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
183
184       mutex->__data.__owner = id;
185       ++mutex->__data.__nusers;
186       mutex->__data.__count = 1;
187
188       return 0;
189
190     /* The PI support requires the Linux futex system call.  If that's not
191        available, pthread_mutex_init should never have allowed the type to
192        be set.  So it will get the default case for an invalid type.  */
193 #ifdef __NR_futex
194     case PTHREAD_MUTEX_PI_RECURSIVE_NP:
195     case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
196     case PTHREAD_MUTEX_PI_NORMAL_NP:
197     case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
198     case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
199     case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
200     case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
201     case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
202       {
203         int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
204         int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;
205
206         if (robust)
207           /* Note: robust PI futexes are signaled by setting bit 0.  */
208           THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
209                          (void *) (((uintptr_t) &mutex->__data.__list.__next)
210                                    | 1));
211
212         oldval = mutex->__data.__lock;
213
214         /* Check whether we already hold the mutex.  */
215         if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id))
216           {
217             if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
218               {
219                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
220                 return EDEADLK;
221               }
222
223             if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
224               {
225                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
226
227                 /* Just bump the counter.  */
228                 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
229                   /* Overflow of the counter.  */
230                   return EAGAIN;
231
232                 ++mutex->__data.__count;
233
234                 return 0;
235               }
236           }
237
238         oldval
239           = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
240                                                  id, 0);
241
242         if (oldval != 0)
243           {
244             if ((oldval & FUTEX_OWNER_DIED) == 0)
245               {
246                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
247
248                 return EBUSY;
249               }
250
251             assert (robust);
252
253             /* The mutex owner died.  The kernel will now take care of
254                everything.  */
255             int private = (robust
256                            ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex)
257                            : PTHREAD_MUTEX_PSHARED (mutex));
258             INTERNAL_SYSCALL_DECL (__err);
259             int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
260                                       __lll_private_flag (FUTEX_TRYLOCK_PI,
261                                                           private), 0, 0);
262
263             if (INTERNAL_SYSCALL_ERROR_P (e, __err)
264                 && INTERNAL_SYSCALL_ERRNO (e, __err) == EWOULDBLOCK)
265               {
266                 THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
267
268                 return EBUSY;
269               }
270
271             oldval = mutex->__data.__lock;
272           }
273
274         if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED))
275           {
276             atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);
277
278             /* We got the mutex.  */
279             mutex->__data.__count = 1;
280             /* But it is inconsistent unless marked otherwise.  */
281             mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;
282
283             ENQUEUE_MUTEX (mutex);
284             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
285
286             /* Note that we deliberately exit here.  If we fall
287                through to the end of the function __nusers would be
288                incremented which is not correct because the old owner
289                has to be discounted.  */
290             return EOWNERDEAD;
291           }
292
293         if (robust
294             && __builtin_expect (mutex->__data.__owner
295                                  == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
296           {
297             /* This mutex is now not recoverable.  */
298             mutex->__data.__count = 0;
299
300             INTERNAL_SYSCALL_DECL (__err);
301             INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
302                               __lll_private_flag (FUTEX_UNLOCK_PI,
303                                                   PTHREAD_ROBUST_MUTEX_PSHARED (mutex)),
304                               0, 0);
305
306             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
307             return ENOTRECOVERABLE;
308           }
309
310         if (robust)
311           {
312             ENQUEUE_MUTEX_PI (mutex);
313             THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
314           }
315
316         mutex->__data.__owner = id;
317         ++mutex->__data.__nusers;
318         mutex->__data.__count = 1;
319
320         return 0;
321       }
322 #endif  /* __NR_futex.  */
323
324     case PTHREAD_MUTEX_PP_RECURSIVE_NP:
325     case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
326     case PTHREAD_MUTEX_PP_NORMAL_NP:
327     case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
328       {
329         int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
330
331         oldval = mutex->__data.__lock;
332
333         /* Check whether we already hold the mutex.  */
334         if (mutex->__data.__owner == id)
335           {
336             if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
337               return EDEADLK;
338
339             if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
340               {
341                 /* Just bump the counter.  */
342                 if (__glibc_unlikely (mutex->__data.__count + 1 == 0))
343                   /* Overflow of the counter.  */
344                   return EAGAIN;
345
346                 ++mutex->__data.__count;
347
348                 return 0;
349               }
350           }
351
352         int oldprio = -1, ceilval;
353         do
354           {
355             int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
356                           >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
357
358             if (__pthread_current_priority () > ceiling)
359               {
360                 if (oldprio != -1)
361                   __pthread_tpp_change_priority (oldprio, -1);
362                 return EINVAL;
363               }
364
365             int retval = __pthread_tpp_change_priority (oldprio, ceiling);
366             if (retval)
367               return retval;
368
369             ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
370             oldprio = ceiling;
371
372             oldval
373               = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
374                                                      ceilval | 1, ceilval);
375
376             if (oldval == ceilval)
377               break;
378           }
379         while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);
380
381         if (oldval != ceilval)
382           {
383             __pthread_tpp_change_priority (oldprio, -1);
384             break;
385           }
386
387         assert (mutex->__data.__owner == 0);
388         /* Record the ownership.  */
389         mutex->__data.__owner = id;
390         ++mutex->__data.__nusers;
391         mutex->__data.__count = 1;
392
393         return 0;
394       }
395       break;
396
397     default:
398       /* Correct code cannot set any other type.  */
399       return EINVAL;
400     }
401
402   return EBUSY;
403 }
404
405 #ifndef __pthread_mutex_trylock
406 #ifndef pthread_mutex_trylock
407 strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock)
408 #endif
409 #endif