1 /* Copyright (C) 2002-2007,2008,2009,2010,2011 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
25 #include <hp-timing.h>
28 #include <libc-internal.h>
30 #include <kernel-features.h>
32 #include <shlib-compat.h>
34 #include <stap-probe.h>
37 /* Local function to start thread and handle cleanup. */
38 static int start_thread (void *arg);
41 /* Nozero if debugging mode is enabled. */
44 /* Globally enabled events. */
45 static td_thr_events_t __nptl_threads_events __attribute_used__;
47 /* Pointer to descriptor with the last event. */
48 static struct pthread *__nptl_last_event __attribute_used__;
50 /* Number of threads running. */
51 unsigned int __nptl_nthreads = 1;
54 /* Code to allocate and deallocate a stack. */
55 #include "allocatestack.c"
57 /* Code to create the thread. */
58 #include <createthread.c>
63 __find_in_stack_list (pd)
67 struct pthread *result = NULL;
69 lll_lock (stack_cache_lock, LLL_PRIVATE);
71 list_for_each (entry, &stack_used)
75 curp = list_entry (entry, struct pthread, list);
84 list_for_each (entry, &__stack_user)
88 curp = list_entry (entry, struct pthread, list);
96 lll_unlock (stack_cache_lock, LLL_PRIVATE);
102 /* Deallocate POSIX thread-local-storage. */
105 __nptl_deallocate_tsd (void)
107 struct pthread *self = THREAD_SELF;
109 /* Maybe no data was ever allocated. This happens often so we have
111 if (THREAD_GETMEM (self, specific_used))
121 /* So far no new nonzero data entry. */
122 THREAD_SETMEM (self, specific_used, false);
124 for (cnt = idx = 0; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
126 struct pthread_key_data *level2;
128 level2 = THREAD_GETMEM_NC (self, specific, cnt);
134 for (inner = 0; inner < PTHREAD_KEY_2NDLEVEL_SIZE;
137 void *data = level2[inner].data;
141 /* Always clear the data. */
142 level2[inner].data = NULL;
144 /* Make sure the data corresponds to a valid
145 key. This test fails if the key was
146 deallocated and also if it was
147 re-allocated. It is the user's
148 responsibility to free the memory in this
150 if (level2[inner].seq
151 == __pthread_keys[idx].seq
152 /* It is not necessary to register a destructor
154 && __pthread_keys[idx].destr != NULL)
155 /* Call the user-provided destructor. */
156 __pthread_keys[idx].destr (data);
161 idx += PTHREAD_KEY_1STLEVEL_SIZE;
164 if (THREAD_GETMEM (self, specific_used) == 0)
165 /* No data has been modified. */
168 /* We only repeat the process a fixed number of times. */
169 while (__builtin_expect (++round < PTHREAD_DESTRUCTOR_ITERATIONS, 0));
171 /* Just clear the memory of the first block for reuse. */
172 memset (&THREAD_SELF->specific_1stblock, '\0',
173 sizeof (self->specific_1stblock));
176 /* Free the memory for the other blocks. */
177 for (cnt = 1; cnt < PTHREAD_KEY_1STLEVEL_SIZE; ++cnt)
179 struct pthread_key_data *level2;
181 level2 = THREAD_GETMEM_NC (self, specific, cnt);
184 /* The first block is allocated as part of the thread
187 THREAD_SETMEM_NC (self, specific, cnt, NULL);
191 THREAD_SETMEM (self, specific_used, false);
196 /* Deallocate a thread's stack after optionally making sure the thread
197 descriptor is still valid. */
200 __free_tcb (struct pthread *pd)
202 /* The thread is exiting now. */
203 if (__builtin_expect (atomic_bit_test_set (&pd->cancelhandling,
204 TERMINATED_BIT) == 0, 1))
206 /* Remove the descriptor from the list. */
207 if (DEBUGGING_P && __find_in_stack_list (pd) == NULL)
208 /* Something is really wrong. The descriptor for a still
209 running thread is gone. */
213 if (__builtin_expect (pd->tpp != NULL, 0))
215 struct priority_protection_data *tpp = pd->tpp;
221 /* Queue the stack memory block for reuse and exit the process. The
222 kernel will signal via writing to the address returned by
223 QUEUE-STACK when the stack is available. */
224 __deallocate_stack (pd);
230 start_thread (void *arg)
232 struct pthread *pd = (struct pthread *) arg;
235 /* Remember the time when the thread was started. */
238 THREAD_SETMEM (pd, cpuclock_offset, now);
241 /* Initialize resolver state pointer. */
244 /* Initialize pointers to locale data. */
247 /* Allow setxid from now onwards. */
248 if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0) == -2, 0))
249 lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);
251 #ifdef __NR_set_robust_list
252 # ifndef __ASSUME_SET_ROBUST_LIST
253 if (__set_robust_list_avail >= 0)
256 INTERNAL_SYSCALL_DECL (err);
257 /* This call should never fail because the initial call in init.c
259 INTERNAL_SYSCALL (set_robust_list, err, 2, &pd->robust_head,
260 sizeof (struct robust_list_head));
264 /* If the parent was running cancellation handlers while creating
265 the thread the new thread inherited the signal mask. Reset the
266 cancellation signal mask. */
267 if (__builtin_expect (pd->parent_cancelhandling & CANCELING_BITMASK, 0))
269 INTERNAL_SYSCALL_DECL (err);
271 __sigemptyset (&mask);
272 __sigaddset (&mask, SIGCANCEL);
273 (void) INTERNAL_SYSCALL (rt_sigprocmask, err, 4, SIG_UNBLOCK, &mask,
277 /* This is where the try/finally block should be created. For
278 compilers without that support we do use setjmp. */
279 struct pthread_unwind_buf unwind_buf;
281 /* No previous handlers. */
282 unwind_buf.priv.data.prev = NULL;
283 unwind_buf.priv.data.cleanup = NULL;
286 not_first_call = setjmp ((struct __jmp_buf_tag *) unwind_buf.cancel_jmp_buf);
287 if (__builtin_expect (! not_first_call, 1))
289 /* Store the new cleanup handler info. */
290 THREAD_SETMEM (pd, cleanup_jmp_buf, &unwind_buf);
292 if (__builtin_expect (pd->stopped_start, 0))
294 int oldtype = CANCEL_ASYNC ();
296 /* Get the lock the parent locked to force synchronization. */
297 lll_lock (pd->lock, LLL_PRIVATE);
298 /* And give it up right away. */
299 lll_unlock (pd->lock, LLL_PRIVATE);
301 CANCEL_RESET (oldtype);
304 LIBC_PROBE (pthread_start, 3, (pthread_t) pd, pd->start_routine, pd->arg);
306 /* Run the code the user provided. */
307 #ifdef CALL_THREAD_FCT
308 THREAD_SETMEM (pd, result, CALL_THREAD_FCT (pd));
310 THREAD_SETMEM (pd, result, pd->start_routine (pd->arg));
314 /* Run the destructor for the thread-local data. */
315 __nptl_deallocate_tsd ();
317 /* Clean up any state libc stored in thread-local variables. */
318 __libc_thread_freeres ();
320 /* If this is the last thread we terminate the process now. We
321 do not notify the debugger, it might just irritate it if there
322 is no thread left. */
323 if (__builtin_expect (atomic_decrement_and_test (&__nptl_nthreads), 0))
324 /* This was the last thread. */
327 /* Report the death of the thread if this is wanted. */
328 if (__builtin_expect (pd->report_events, 0))
330 /* See whether TD_DEATH is in any of the mask. */
331 const int idx = __td_eventword (TD_DEATH);
332 const uint32_t mask = __td_eventmask (TD_DEATH);
334 if ((mask & (__nptl_threads_events.event_bits[idx]
335 | pd->eventbuf.eventmask.event_bits[idx])) != 0)
337 /* Yep, we have to signal the death. Add the descriptor to
338 the list but only if it is not already on it. */
339 if (pd->nextevent == NULL)
341 pd->eventbuf.eventnum = TD_DEATH;
342 pd->eventbuf.eventdata = pd;
345 pd->nextevent = __nptl_last_event;
346 while (atomic_compare_and_exchange_bool_acq (&__nptl_last_event,
350 /* Now call the function to signal the event. */
351 __nptl_death_event ();
355 /* The thread is exiting now. Don't set this bit until after we've hit
356 the event-reporting breakpoint, so that td_thr_get_info on us while at
357 the breakpoint reports TD_THR_RUN state rather than TD_THR_ZOMBIE. */
358 atomic_bit_set (&pd->cancelhandling, EXITING_BIT);
360 #ifndef __ASSUME_SET_ROBUST_LIST
361 /* If this thread has any robust mutexes locked, handle them now. */
362 # ifdef __PTHREAD_MUTEX_HAVE_PREV
363 void *robust = pd->robust_head.list;
365 __pthread_slist_t *robust = pd->robust_list.__next;
367 /* We let the kernel do the notification if it is able to do so.
368 If we have to do it here there for sure are no PI mutexes involved
369 since the kernel support for them is even more recent. */
370 if (__set_robust_list_avail < 0
371 && __builtin_expect (robust != (void *) &pd->robust_head, 0))
375 struct __pthread_mutex_s *this = (struct __pthread_mutex_s *)
376 ((char *) robust - offsetof (struct __pthread_mutex_s,
378 robust = *((void **) robust);
380 # ifdef __PTHREAD_MUTEX_HAVE_PREV
381 this->__list.__prev = NULL;
383 this->__list.__next = NULL;
385 lll_robust_dead (this->__lock, /* XYZ */ LLL_SHARED);
387 while (robust != (void *) &pd->robust_head);
391 /* Mark the memory of the stack as usable to the kernel. We free
392 everything except for the space used for the TCB itself. */
393 size_t pagesize_m1 = __getpagesize () - 1;
394 #ifdef _STACK_GROWS_DOWN
395 char *sp = CURRENT_STACK_FRAME;
396 size_t freesize = (sp - (char *) pd->stackblock) & ~pagesize_m1;
400 assert (freesize < pd->stackblock_size);
401 if (freesize > PTHREAD_STACK_MIN)
402 madvise (pd->stackblock, freesize - PTHREAD_STACK_MIN, MADV_DONTNEED);
404 /* If the thread is detached free the TCB. */
405 if (IS_DETACHED (pd))
408 else if (__builtin_expect (pd->cancelhandling & SETXID_BITMASK, 0))
410 /* Some other thread might call any of the setXid functions and expect
411 us to reply. In this case wait until we did that. */
413 lll_futex_wait (&pd->setxid_futex, 0, LLL_PRIVATE);
414 while (pd->cancelhandling & SETXID_BITMASK);
416 /* Reset the value so that the stack can be reused. */
417 pd->setxid_futex = 0;
420 /* We cannot call '_exit' here. '_exit' will terminate the process.
422 The 'exit' implementation in the kernel will signal when the
423 process is really dead since 'clone' got passed the CLONE_CLEARTID
424 flag. The 'tid' field in the TCB will be set to zero.
426 The exit code is zero since in case all threads exit by calling
427 'pthread_exit' the exit status must be 0 (zero). */
428 __exit_thread_inline (0);
435 /* Default thread attributes for the case when the user does not
437 static const struct pthread_attr default_attr =
439 /* Just some value > 0 which gets rounded to the nearest page size. */
445 __pthread_create_2_1 (newthread, attr, start_routine, arg)
446 pthread_t *newthread;
447 const pthread_attr_t *attr;
448 void *(*start_routine) (void *);
453 const struct pthread_attr *iattr = (struct pthread_attr *) attr;
455 /* Is this the best idea? On NUMA machines this could mean
456 accessing far-away memory. */
457 iattr = &default_attr;
459 struct pthread *pd = NULL;
460 int err = ALLOCATE_STACK (iattr, &pd);
461 if (__builtin_expect (err != 0, 0))
462 /* Something went wrong. Maybe a parameter of the attributes is
463 invalid or we could not allocate memory. Note we have to
464 translate error codes. */
465 return err == ENOMEM ? EAGAIN : err;
468 /* Initialize the TCB. All initializations with zero should be
469 performed in 'get_cached_stack'. This way we avoid doing this if
470 the stack freshly allocated with 'mmap'. */
473 /* Reference to the TCB itself. */
474 pd->header.self = pd;
476 /* Self-reference for TLS. */
480 /* Store the address of the start routine and the parameter. Since
481 we do not start the function directly the stillborn thread will
482 get the information from its thread descriptor. */
483 pd->start_routine = start_routine;
486 /* Copy the thread attribute flags. */
487 struct pthread *self = THREAD_SELF;
488 pd->flags = ((iattr->flags & ~(ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET))
489 | (self->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)));
491 /* Initialize the field for the ID of the thread which is waiting
492 for us. This is a self-reference in case the thread is created
494 pd->joinid = iattr->flags & ATTR_FLAG_DETACHSTATE ? pd : NULL;
496 /* The debug events are inherited from the parent. */
497 pd->eventbuf = self->eventbuf;
500 /* Copy the parent's scheduling parameters. The flags will say what
501 is valid and what is not. */
502 pd->schedpolicy = self->schedpolicy;
503 pd->schedparam = self->schedparam;
505 /* Copy the stack guard canary. */
506 #ifdef THREAD_COPY_STACK_GUARD
507 THREAD_COPY_STACK_GUARD (pd);
510 /* Copy the pointer guard value. */
511 #ifdef THREAD_COPY_POINTER_GUARD
512 THREAD_COPY_POINTER_GUARD (pd);
515 /* Determine scheduling parameters for the thread. */
517 && __builtin_expect ((iattr->flags & ATTR_FLAG_NOTINHERITSCHED) != 0, 0)
518 && (iattr->flags & (ATTR_FLAG_SCHED_SET | ATTR_FLAG_POLICY_SET)) != 0)
520 INTERNAL_SYSCALL_DECL (scerr);
522 /* Use the scheduling parameters the user provided. */
523 if (iattr->flags & ATTR_FLAG_POLICY_SET)
524 pd->schedpolicy = iattr->schedpolicy;
525 else if ((pd->flags & ATTR_FLAG_POLICY_SET) == 0)
527 pd->schedpolicy = INTERNAL_SYSCALL (sched_getscheduler, scerr, 1, 0);
528 pd->flags |= ATTR_FLAG_POLICY_SET;
531 if (iattr->flags & ATTR_FLAG_SCHED_SET)
532 memcpy (&pd->schedparam, &iattr->schedparam,
533 sizeof (struct sched_param));
534 else if ((pd->flags & ATTR_FLAG_SCHED_SET) == 0)
536 INTERNAL_SYSCALL (sched_getparam, scerr, 2, 0, &pd->schedparam);
537 pd->flags |= ATTR_FLAG_SCHED_SET;
540 /* Check for valid priorities. */
541 int minprio = INTERNAL_SYSCALL (sched_get_priority_min, scerr, 1,
543 int maxprio = INTERNAL_SYSCALL (sched_get_priority_max, scerr, 1,
545 if (pd->schedparam.sched_priority < minprio
546 || pd->schedparam.sched_priority > maxprio)
548 /* Perhaps a thread wants to change the IDs and if waiting
549 for this stillborn thread. */
550 if (__builtin_expect (atomic_exchange_acq (&pd->setxid_futex, 0)
552 lll_futex_wake (&pd->setxid_futex, 1, LLL_PRIVATE);
554 __deallocate_stack (pd);
560 /* Pass the descriptor to the caller. */
561 *newthread = (pthread_t) pd;
563 /* Start the thread. */
564 return create_thread (pd, iattr, STACK_VARIABLES_ARGS);
566 versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
569 #if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
571 __pthread_create_2_0 (newthread, attr, start_routine, arg)
572 pthread_t *newthread;
573 const pthread_attr_t *attr;
574 void *(*start_routine) (void *);
577 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
578 the old size and access to the new members might crash the program.
579 We convert the struct now. */
580 struct pthread_attr new_attr;
584 struct pthread_attr *iattr = (struct pthread_attr *) attr;
585 size_t ps = __getpagesize ();
587 /* Copy values from the user-provided attributes. */
588 new_attr.schedparam = iattr->schedparam;
589 new_attr.schedpolicy = iattr->schedpolicy;
590 new_attr.flags = iattr->flags;
592 /* Fill in default values for the fields not present in the old
594 new_attr.guardsize = ps;
595 new_attr.stackaddr = NULL;
596 new_attr.stacksize = 0;
597 new_attr.cpuset = NULL;
599 /* We will pass this value on to the real implementation. */
600 attr = (pthread_attr_t *) &new_attr;
603 return __pthread_create_2_1 (newthread, attr, start_routine, arg);
605 compat_symbol (libpthread, __pthread_create_2_0, pthread_create,
609 /* Information for libthread_db. */
611 #include "../nptl_db/db_info.c"
613 /* If pthread_create is present, libgcc_eh.a and libsupc++.a expects some other POSIX thread
614 functions to be present as well. */
615 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_lock)
616 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_trylock)
617 PTHREAD_STATIC_FN_REQUIRE (pthread_mutex_unlock)
619 PTHREAD_STATIC_FN_REQUIRE (pthread_once)
620 PTHREAD_STATIC_FN_REQUIRE (pthread_cancel)
622 PTHREAD_STATIC_FN_REQUIRE (pthread_key_create)
623 PTHREAD_STATIC_FN_REQUIRE (pthread_key_delete)
624 PTHREAD_STATIC_FN_REQUIRE (pthread_setspecific)
625 PTHREAD_STATIC_FN_REQUIRE (pthread_getspecific)