2 /* Linuxthreads - a simple clone()-based implementation of Posix */
3 /* threads for Linux. */
4 /* Copyright (C) 1996 Xavier Leroy (Xavier.Leroy@inria.fr) */
6 /* This program is free software; you can redistribute it and/or */
7 /* modify it under the terms of the GNU Library General Public License */
8 /* as published by the Free Software Foundation; either version 2 */
9 /* of the License, or (at your option) any later version. */
11 /* This program is distributed in the hope that it will be useful, */
12 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
13 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
14 /* GNU Library General Public License for more details. */
16 /* Thread creation, initialization, and basic low-level routines */
26 #include <sys/resource.h>
27 #include <sys/sysctl.h>
28 #include <shlib-compat.h>
30 #include "internals.h"
35 #include <locale.h> /* for __uselocale */
39 #if __ASSUME_REALTIME_SIGNALS && !defined __SIGRTMIN
40 # error "This must not happen; new kernel assumed but old headers"
43 #if !(USE_TLS && HAVE___THREAD)
44 /* These variables are used by the setup code. */
48 /* We need the global/static resolver state here. */
52 extern struct __res_state _res;
57 /* We need only a few variables. */
58 static pthread_descr manager_thread;
62 /* Descriptor of the initial thread */
64 struct _pthread_descr_struct __pthread_initial_thread = {
67 .self = &__pthread_initial_thread /* pthread_descr self */
70 &__pthread_initial_thread, /* pthread_descr p_nextlive */
71 &__pthread_initial_thread, /* pthread_descr p_prevlive */
72 NULL, /* pthread_descr p_nextwaiting */
73 NULL, /* pthread_descr p_nextlock */
74 PTHREAD_THREADS_MAX, /* pthread_t p_tid */
76 0, /* int p_priority */
77 &__pthread_handles[0].h_lock, /* struct _pthread_fastlock * p_lock */
79 NULL, /* sigjmp_buf * p_signal_buf */
80 NULL, /* sigjmp_buf * p_cancel_buf */
81 0, /* char p_terminated */
82 0, /* char p_detached */
83 0, /* char p_exited */
84 NULL, /* void * p_retval */
86 NULL, /* pthread_descr p_joining */
87 NULL, /* struct _pthread_cleanup_buffer * p_cleanup */
88 0, /* char p_cancelstate */
89 0, /* char p_canceltype */
90 0, /* char p_canceled */
91 NULL, /* char * p_in_sighandler */
92 0, /* char p_sigwaiting */
93 PTHREAD_START_ARGS_INITIALIZER(NULL),
94 /* struct pthread_start_args p_start_args */
95 {NULL}, /* void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE] */
96 {NULL}, /* void * p_libc_specific[_LIBC_TSD_KEY_N] */
97 &_errno, /* int *p_errnop */
99 &_h_errno, /* int *p_h_errnop */
100 0, /* int p_h_errno */
101 &_res, /* struct __res_state *p_resp */
102 {}, /* struct __res_state p_res */
103 1, /* int p_userstack */
104 NULL, /* void * p_guardaddr */
105 0, /* size_t p_guardsize */
106 0, /* Always index 0 */
107 0, /* int p_report_events */
108 {{{0, }}, 0, NULL}, /* td_eventbuf_t p_eventbuf */
109 __ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */
110 0, /* char p_woken_by_cancel */
111 0, /* char p_condvar_avail */
112 0, /* char p_sem_avail */
113 NULL, /* struct pthread_extricate_if *p_extricate */
114 NULL, /* pthread_readlock_info *p_readlock_list; */
115 NULL, /* pthread_readlock_info *p_readlock_free; */
116 0 /* int p_untracked_readlock_count; */
119 /* Descriptor of the manager thread; none of this is used but the error
120 variables, the p_pid and p_priority fields,
121 and the address for identification. */
123 #define manager_thread (&__pthread_manager_thread)
124 struct _pthread_descr_struct __pthread_manager_thread = {
127 .self = &__pthread_manager_thread /* pthread_descr self */
130 NULL, /* pthread_descr p_nextlive */
131 NULL, /* pthread_descr p_prevlive */
132 NULL, /* pthread_descr p_nextwaiting */
133 NULL, /* pthread_descr p_nextlock */
136 0, /* int p_priority */
137 &__pthread_handles[1].h_lock, /* struct _pthread_fastlock * p_lock */
138 0, /* int p_signal */
139 NULL, /* sigjmp_buf * p_signal_buf */
140 NULL, /* sigjmp_buf * p_cancel_buf */
141 0, /* char p_terminated */
142 0, /* char p_detached */
143 0, /* char p_exited */
144 NULL, /* void * p_retval */
145 0, /* int p_retval */
146 NULL, /* pthread_descr p_joining */
147 NULL, /* struct _pthread_cleanup_buffer * p_cleanup */
148 0, /* char p_cancelstate */
149 0, /* char p_canceltype */
150 0, /* char p_canceled */
151 NULL, /* char * p_in_sighandler */
152 0, /* char p_sigwaiting */
153 PTHREAD_START_ARGS_INITIALIZER(__pthread_manager),
154 /* struct pthread_start_args p_start_args */
155 {NULL}, /* void ** p_specific[PTHREAD_KEY_1STLEVEL_SIZE] */
156 {NULL}, /* void * p_libc_specific[_LIBC_TSD_KEY_N] */
157 &__pthread_manager_thread.p_errno, /* int *p_errnop */
159 NULL, /* int *p_h_errnop */
160 0, /* int p_h_errno */
161 NULL, /* struct __res_state *p_resp */
162 {}, /* struct __res_state p_res */
163 0, /* int p_userstack */
164 NULL, /* void * p_guardaddr */
165 0, /* size_t p_guardsize */
166 1, /* Always index 1 */
167 0, /* int p_report_events */
168 {{{0, }}, 0, NULL}, /* td_eventbuf_t p_eventbuf */
169 __ATOMIC_INITIALIZER, /* struct pthread_atomic p_resume_count */
170 0, /* char p_woken_by_cancel */
171 0, /* char p_condvar_avail */
172 0, /* char p_sem_avail */
173 NULL, /* struct pthread_extricate_if *p_extricate */
174 NULL, /* pthread_readlock_info *p_readlock_list; */
175 NULL, /* pthread_readlock_info *p_readlock_free; */
176 0 /* int p_untracked_readlock_count; */
180 /* Pointer to the main thread (the father of the thread manager thread) */
181 /* Originally, this is the initial thread, but this changes after fork() */
184 pthread_descr __pthread_main_thread;
186 pthread_descr __pthread_main_thread = &__pthread_initial_thread;
189 /* Limit between the stack of the initial thread (above) and the
190 stacks of other threads (below). Aligned on a STACK_SIZE boundary. */
192 char *__pthread_initial_thread_bos;
194 /* File descriptor for sending requests to the thread manager. */
195 /* Initially -1, meaning that the thread manager is not running. */
197 int __pthread_manager_request = -1;
199 /* Other end of the pipe for sending requests to the thread manager. */
201 int __pthread_manager_reader;
203 /* Limits of the thread manager stack */
205 char *__pthread_manager_thread_bos;
206 char *__pthread_manager_thread_tos;
208 /* For process-wide exit() */
210 int __pthread_exit_requested;
211 int __pthread_exit_code;
213 /* Maximum stack size. */
214 size_t __pthread_max_stacksize;
216 /* Nozero if the machine has more than one processor. */
217 int __pthread_smp_kernel;
220 #if !__ASSUME_REALTIME_SIGNALS
221 /* Pointers that select new or old suspend/resume functions
222 based on availability of rt signals. */
224 void (*__pthread_restart)(pthread_descr) = __pthread_restart_old;
225 void (*__pthread_suspend)(pthread_descr) = __pthread_suspend_old;
226 int (*__pthread_timedsuspend)(pthread_descr, const struct timespec *) = __pthread_timedsuspend_old;
227 #endif /* __ASSUME_REALTIME_SIGNALS */
229 /* Communicate relevant LinuxThreads constants to gdb */
231 const int __pthread_threads_max = PTHREAD_THREADS_MAX;
232 const int __pthread_sizeof_handle = sizeof(struct pthread_handle_struct);
233 const int __pthread_offsetof_descr = offsetof(struct pthread_handle_struct,
235 const int __pthread_offsetof_pid = offsetof(struct _pthread_descr_struct,
237 const int __linuxthreads_pthread_sizeof_descr
238 = sizeof(struct _pthread_descr_struct);
240 const int __linuxthreads_initial_report_events;
242 const char __linuxthreads_version[] = VERSION;
244 /* Forward declarations */
246 static void pthread_onexit_process(int retcode, void *arg);
247 #ifndef HAVE_Z_NODELETE
248 static void pthread_atexit_process(void *arg, int retcode);
249 static void pthread_atexit_retcode(void *arg, int retcode);
251 static void pthread_handle_sigcancel(int sig);
252 static void pthread_handle_sigrestart(int sig);
253 static void pthread_handle_sigdebug(int sig);
255 /* Signal numbers used for the communication.
256 In these variables we keep track of the used variables. If the
257 platform does not support any real-time signals we will define the
258 values to some unreasonable value which will signal failing of all
259 the functions below. */
261 static int current_rtmin = -1;
262 static int current_rtmax = -1;
263 int __pthread_sig_restart = SIGUSR1;
264 int __pthread_sig_cancel = SIGUSR2;
265 int __pthread_sig_debug;
267 static int current_rtmin;
268 static int current_rtmax;
270 #if __SIGRTMAX - __SIGRTMIN >= 3
271 int __pthread_sig_restart = __SIGRTMIN;
272 int __pthread_sig_cancel = __SIGRTMIN + 1;
273 int __pthread_sig_debug = __SIGRTMIN + 2;
275 int __pthread_sig_restart = SIGUSR1;
276 int __pthread_sig_cancel = SIGUSR2;
277 int __pthread_sig_debug;
280 static int rtsigs_initialized;
282 #if !__ASSUME_REALTIME_SIGNALS
283 # include "testrtsig.h"
289 #if !__ASSUME_REALTIME_SIGNALS
290 if (__builtin_expect (!kernel_has_rtsig (), 0))
294 # if __SIGRTMAX - __SIGRTMIN >= 3
295 __pthread_sig_restart = SIGUSR1;
296 __pthread_sig_cancel = SIGUSR2;
297 __pthread_sig_debug = 0;
301 #endif /* __ASSUME_REALTIME_SIGNALS */
303 #if __SIGRTMAX - __SIGRTMIN >= 3
304 current_rtmin = __SIGRTMIN + 3;
305 # if !__ASSUME_REALTIME_SIGNALS
306 __pthread_restart = __pthread_restart_new;
307 __pthread_suspend = __pthread_wait_for_restart_signal;
308 __pthread_timedsuspend = __pthread_timedsuspend_new;
309 # endif /* __ASSUME_REALTIME_SIGNALS */
311 current_rtmin = __SIGRTMIN;
314 current_rtmax = __SIGRTMAX;
317 rtsigs_initialized = 1;
321 /* Return number of available real-time signal with highest priority. */
323 __libc_current_sigrtmin (void)
326 if (__builtin_expect (!rtsigs_initialized, 0))
329 return current_rtmin;
332 /* Return number of available real-time signal with lowest priority. */
334 __libc_current_sigrtmax (void)
337 if (__builtin_expect (!rtsigs_initialized, 0))
340 return current_rtmax;
343 /* Allocate real-time signal with highest/lowest available
344 priority. Please note that we don't use a lock since we assume
345 this function to be called at program start. */
347 __libc_allocate_rtsig (int high)
352 if (__builtin_expect (!rtsigs_initialized, 0))
354 if (__builtin_expect (current_rtmin == -1, 0)
355 || __builtin_expect (current_rtmin > current_rtmax, 0))
356 /* We don't have anymore signal available. */
359 return high ? current_rtmin++ : current_rtmax--;
363 /* The function we use to get the kernel revision. */
364 extern int __sysctl (int *name, int nlen, void *oldval, size_t *oldlenp,
365 void *newval, size_t newlen);
367 /* Test whether the machine has more than one processor. This is not the
368 best test but good enough. More complicated tests would require `malloc'
369 which is not available at that time. */
373 static const int sysctl_args[] = { CTL_KERN, KERN_VERSION };
375 size_t reslen = sizeof (buf);
377 /* Try reading the number using `sysctl' first. */
378 if (__sysctl ((int *) sysctl_args,
379 sizeof (sysctl_args) / sizeof (sysctl_args[0]),
380 buf, &reslen, NULL, 0) < 0)
382 /* This was not successful. Now try reading the /proc filesystem. */
383 int fd = __open ("/proc/sys/kernel/version", O_RDONLY);
384 if (__builtin_expect (fd, 0) == -1
385 || (reslen = __read (fd, buf, sizeof (buf))) <= 0)
386 /* This also didn't work. We give up and say it's a UP machine. */
392 return strstr (buf, "SMP") != NULL;
396 /* Initialize the pthread library.
397 Initialization is split in two functions:
398 - a constructor function that blocks the __pthread_sig_restart signal
399 (must do this very early, since the program could capture the signal
400 mask with e.g. sigsetjmp before creating the first thread);
401 - a regular function called from pthread_create when needed. */
403 static void pthread_initialize(void) __attribute__((constructor));
405 #ifndef HAVE_Z_NODELETE
406 extern void *__dso_handle __attribute__ ((weak));
410 #if defined USE_TLS && !defined SHARED
411 extern void __libc_setup_tls (size_t tcbsize, size_t tcbalign);
415 /* Do some minimal initialization which has to be done during the
416 startup of the C library. */
418 __pthread_initialize_minimal(void)
423 /* First of all init __pthread_handles[0] and [1] if needed. */
424 # if __LT_SPINLOCK_INIT != 0
425 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
426 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
429 /* Unlike in the dynamically linked case the dynamic linker has not
430 taken care of initializing the TLS data structures. */
431 __libc_setup_tls (TLS_TCB_SIZE, TLS_TCB_ALIGN);
436 /* The memory for the thread descriptor was allocated elsewhere as
437 part of the TLS allocation. We have to initialize the data
438 structure by hand. This initialization must mirror the struct
440 self->p_nextlive = self->p_prevlive = self;
441 self->p_tid = PTHREAD_THREADS_MAX;
442 self->p_lock = &__pthread_handles[0].h_lock;
443 # ifndef HAVE___THREAD
444 self->p_errnop = &_errno;
445 self->p_h_errnop = &_h_errno;
447 /* self->p_start_args need not be initialized, it's all zero. */
448 self->p_userstack = 1;
449 # if __LT_SPINLOCK_INIT != 0
450 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
453 /* Another variable which points to the thread descriptor. */
454 __pthread_main_thread = self;
456 /* And fill in the pointer the the thread __pthread_handles array. */
457 __pthread_handles[0].h_descr = self;
459 /* First of all init __pthread_handles[0] and [1]. */
460 # if __LT_SPINLOCK_INIT != 0
461 __pthread_handles[0].h_lock = __LOCK_INITIALIZER;
462 __pthread_handles[1].h_lock = __LOCK_INITIALIZER;
464 __pthread_handles[0].h_descr = &__pthread_initial_thread;
465 __pthread_handles[1].h_descr = &__pthread_manager_thread;
467 /* If we have special thread_self processing, initialize that for the
469 # ifdef INIT_THREAD_SELF
470 INIT_THREAD_SELF(&__pthread_initial_thread, 0);
476 self->p_cpuclock_offset = GL(dl_cpuclock_offset);
478 __pthread_initial_thread.p_cpuclock_offset = GL(dl_cpuclock_offset);
482 #if !(USE_TLS && HAVE___THREAD)
483 /* Initialize thread-locale current locale to point to the global one.
484 With __thread support, the variable's initializer takes care of this. */
485 __uselocale (LC_GLOBAL_LOCALE);
491 __pthread_init_max_stacksize(void)
496 getrlimit(RLIMIT_STACK, &limit);
497 #ifdef FLOATING_STACKS
498 if (limit.rlim_cur == RLIM_INFINITY)
499 limit.rlim_cur = ARCH_STACK_MAX_SIZE;
500 # ifdef NEED_SEPARATE_REGISTER_STACK
501 max_stack = limit.rlim_cur / 2;
503 max_stack = limit.rlim_cur;
506 /* Play with the stack size limit to make sure that no stack ever grows
507 beyond STACK_SIZE minus one page (to act as a guard page). */
508 # ifdef NEED_SEPARATE_REGISTER_STACK
509 /* STACK_SIZE bytes hold both the main stack and register backing
510 store. The rlimit value applies to each individually. */
511 max_stack = STACK_SIZE/2 - __getpagesize ();
513 max_stack = STACK_SIZE - __getpagesize();
515 if (limit.rlim_cur > max_stack) {
516 limit.rlim_cur = max_stack;
517 setrlimit(RLIMIT_STACK, &limit);
520 __pthread_max_stacksize = max_stack;
524 static void pthread_initialize(void)
529 /* If already done (e.g. by a constructor called earlier!), bail out */
530 if (__pthread_initial_thread_bos != NULL) return;
531 #ifdef TEST_FOR_COMPARE_AND_SWAP
532 /* Test if compare-and-swap is available */
533 __pthread_has_cas = compare_and_swap_is_available();
535 #ifdef FLOATING_STACKS
536 /* We don't need to know the bottom of the stack. Give the pointer some
537 value to signal that initialization happened. */
538 __pthread_initial_thread_bos = (void *) -1l;
540 /* Determine stack size limits . */
541 __pthread_init_max_stacksize ();
542 # ifdef _STACK_GROWS_UP
543 /* The initial thread already has all the stack it needs */
544 __pthread_initial_thread_bos = (char *)
545 ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1));
547 /* For the initial stack, reserve at least STACK_SIZE bytes of stack
548 below the current stack address, and align that on a
549 STACK_SIZE boundary. */
550 __pthread_initial_thread_bos =
551 (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1));
555 /* Update the descriptor for the initial thread. */
556 THREAD_SETMEM (((pthread_descr) NULL), p_pid, __getpid());
557 # ifndef HAVE___THREAD
558 /* Likewise for the resolver state _res. */
559 THREAD_SETMEM (((pthread_descr) NULL), p_resp, &_res);
562 /* Update the descriptor for the initial thread. */
563 __pthread_initial_thread.p_pid = __getpid();
564 /* Likewise for the resolver state _res. */
565 __pthread_initial_thread.p_resp = &_res;
568 /* Initialize real-time signals. */
571 /* Setup signal handlers for the initial thread.
572 Since signal handlers are shared between threads, these settings
573 will be inherited by all other threads. */
574 sa.sa_handler = pthread_handle_sigrestart;
575 sigemptyset(&sa.sa_mask);
577 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
578 sa.sa_handler = pthread_handle_sigcancel;
580 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
581 if (__pthread_sig_debug > 0) {
582 sa.sa_handler = pthread_handle_sigdebug;
583 sigemptyset(&sa.sa_mask);
585 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
587 /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */
589 sigaddset(&mask, __pthread_sig_restart);
590 sigprocmask(SIG_BLOCK, &mask, NULL);
591 /* Register an exit function to kill all other threads. */
592 /* Do it early so that user-registered atexit functions are called
593 before pthread_*exit_process. */
594 #ifndef HAVE_Z_NODELETE
595 if (__builtin_expect (&__dso_handle != NULL, 1))
596 __cxa_atexit ((void (*) (void *)) pthread_atexit_process, NULL,
600 __on_exit (pthread_onexit_process, NULL);
601 /* How many processors. */
602 __pthread_smp_kernel = is_smp_system ();
605 void __pthread_initialize(void)
607 pthread_initialize();
610 int __pthread_initialize_manager(void)
614 struct pthread_request request;
618 #ifndef HAVE_Z_NODELETE
619 if (__builtin_expect (&__dso_handle != NULL, 1))
620 __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL,
624 if (__pthread_max_stacksize == 0)
625 __pthread_init_max_stacksize ();
626 /* If basic initialization not done yet (e.g. we're called from a
627 constructor run before our constructor), do it now */
628 if (__pthread_initial_thread_bos == NULL) pthread_initialize();
629 /* Setup stack for thread manager */
630 __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
631 if (__pthread_manager_thread_bos == NULL) return -1;
632 __pthread_manager_thread_tos =
633 __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
634 /* Setup pipe to communicate with thread manager */
635 if (pipe(manager_pipe) == -1) {
636 free(__pthread_manager_thread_bos);
641 /* Allocate memory for the thread descriptor and the dtv. */
642 __pthread_handles[1].h_descr = manager_thread = tcb
643 = _dl_allocate_tls (NULL);
645 free(__pthread_manager_thread_bos);
646 __libc_close(manager_pipe[0]);
647 __libc_close(manager_pipe[1]);
651 /* Initialize the descriptor. */
652 tcb->p_header.data.tcb = tcb;
653 tcb->p_header.data.self = tcb;
654 tcb->p_lock = &__pthread_handles[1].h_lock;
655 # ifndef HAVE___THREAD
656 tcb->p_errnop = &tcb->p_errno;
658 tcb->p_start_args = (struct pthread_start_args) PTHREAD_START_ARGS_INITIALIZER(__pthread_manager);
660 # if __LT_SPINLOCK_INIT != 0
661 self->p_resume_count = (struct pthread_atomic) __ATOMIC_INITIALIZER;
664 tcb = &__pthread_manager_thread;
667 __pthread_manager_request = manager_pipe[1]; /* writing end */
668 __pthread_manager_reader = manager_pipe[0]; /* reading end */
670 /* Start the thread manager */
673 if (__linuxthreads_initial_report_events != 0)
674 THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
675 __linuxthreads_initial_report_events);
676 report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
678 if (__linuxthreads_initial_report_events != 0)
679 __pthread_initial_thread.p_report_events
680 = __linuxthreads_initial_report_events;
681 report_events = __pthread_initial_thread.p_report_events;
683 if (__builtin_expect (report_events, 0))
685 /* It's a bit more complicated. We have to report the creation of
686 the manager thread. */
687 int idx = __td_eventword (TD_CREATE);
688 uint32_t mask = __td_eventmask (TD_CREATE);
692 event_bits = THREAD_GETMEM_NC (((pthread_descr) NULL),
693 p_eventbuf.eventmask.event_bits[idx]);
695 event_bits = __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx];
698 if ((mask & (__pthread_threads_events.event_bits[idx] | event_bits))
701 __pthread_lock(tcb->p_lock, NULL);
703 #ifdef NEED_SEPARATE_REGISTER_STACK
704 pid = __clone2(__pthread_manager_event,
705 (void **) __pthread_manager_thread_bos,
706 THREAD_MANAGER_STACK_SIZE,
707 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
709 #elif _STACK_GROWS_UP
710 pid = __clone(__pthread_manager_event,
711 (void **) __pthread_manager_thread_bos,
712 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
715 pid = __clone(__pthread_manager_event,
716 (void **) __pthread_manager_thread_tos,
717 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
723 /* Now fill in the information about the new thread in
724 the newly created thread's data structure. We cannot let
725 the new thread do this since we don't know whether it was
726 already scheduled when we send the event. */
727 tcb->p_eventbuf.eventdata = tcb;
728 tcb->p_eventbuf.eventnum = TD_CREATE;
729 __pthread_last_event = tcb;
730 tcb->p_tid = 2* PTHREAD_THREADS_MAX + 1;
733 /* Now call the function which signals the event. */
734 __linuxthreads_create_event ();
737 /* Now restart the thread. */
738 __pthread_unlock(tcb->p_lock);
742 if (__builtin_expect (pid, 0) == 0)
744 #ifdef NEED_SEPARATE_REGISTER_STACK
745 pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
746 THREAD_MANAGER_STACK_SIZE,
747 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, tcb);
748 #elif _STACK_GROWS_UP
749 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
750 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, tcb);
752 pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
753 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, tcb);
756 if (__builtin_expect (pid, 0) == -1) {
757 free(__pthread_manager_thread_bos);
758 __libc_close(manager_pipe[0]);
759 __libc_close(manager_pipe[1]);
762 tcb->p_tid = 2* PTHREAD_THREADS_MAX + 1;
764 /* Make gdb aware of new thread manager */
765 if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0)
767 raise(__pthread_sig_debug);
768 /* We suspend ourself and gdb will wake us up when it is
769 ready to handle us. */
770 __pthread_wait_for_restart_signal(thread_self());
772 /* Synchronize debugging of the thread manager */
773 request.req_kind = REQ_DEBUG;
774 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
775 (char *) &request, sizeof(request)));
779 /* Thread creation */
781 int __pthread_create_2_1(pthread_t *thread, const pthread_attr_t *attr,
782 void * (*start_routine)(void *), void *arg)
784 pthread_descr self = thread_self();
785 struct pthread_request request;
787 if (__builtin_expect (__pthread_manager_request, 0) < 0) {
788 if (__pthread_initialize_manager() < 0) return EAGAIN;
790 request.req_thread = self;
791 request.req_kind = REQ_CREATE;
792 request.req_args.create.attr = attr;
793 request.req_args.create.fn = start_routine;
794 request.req_args.create.arg = arg;
795 sigprocmask(SIG_SETMASK, (const sigset_t *) NULL,
796 &request.req_args.create.mask);
797 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
798 (char *) &request, sizeof(request)));
800 retval = THREAD_GETMEM(self, p_retcode);
801 if (__builtin_expect (retval, 0) == 0)
802 *thread = (pthread_t) THREAD_GETMEM(self, p_retval);
806 versioned_symbol (libpthread, __pthread_create_2_1, pthread_create, GLIBC_2_1);
808 #if SHLIB_COMPAT (libpthread, GLIBC_2_0, GLIBC_2_1)
810 int __pthread_create_2_0(pthread_t *thread, const pthread_attr_t *attr,
811 void * (*start_routine)(void *), void *arg)
813 /* The ATTR attribute is not really of type `pthread_attr_t *'. It has
814 the old size and access to the new members might crash the program.
815 We convert the struct now. */
816 pthread_attr_t new_attr;
820 size_t ps = __getpagesize ();
822 memcpy (&new_attr, attr,
823 (size_t) &(((pthread_attr_t*)NULL)->__guardsize));
824 new_attr.__guardsize = ps;
825 new_attr.__stackaddr_set = 0;
826 new_attr.__stackaddr = NULL;
827 new_attr.__stacksize = STACK_SIZE - ps;
830 return __pthread_create_2_1 (thread, attr, start_routine, arg);
832 compat_symbol (libpthread, __pthread_create_2_0, pthread_create, GLIBC_2_0);
835 /* Simple operations on thread identifiers */
837 pthread_t pthread_self(void)
839 pthread_descr self = thread_self();
840 return THREAD_GETMEM(self, p_tid);
843 int pthread_equal(pthread_t thread1, pthread_t thread2)
845 return thread1 == thread2;
848 /* Helper function for thread_self in the case of user-provided stacks */
852 pthread_descr __pthread_find_self(void)
854 char * sp = CURRENT_STACK_FRAME;
857 /* __pthread_handles[0] is the initial thread, __pthread_handles[1] is
858 the manager threads handled specially in thread_self(), so start at 2 */
859 h = __pthread_handles + 2;
860 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom)) h++;
866 static pthread_descr thread_self_stack(void)
868 char *sp = CURRENT_STACK_FRAME;
871 if (sp >= __pthread_manager_thread_bos && sp < __pthread_manager_thread_tos)
872 return manager_thread;
873 h = __pthread_handles + 2;
875 while (h->h_descr == NULL
876 || ! (sp <= (char *) h->h_descr->p_stackaddr && sp >= h->h_bottom))
879 while (! (sp <= (char *) h->h_descr && sp >= h->h_bottom))
887 /* Thread scheduling */
889 int pthread_setschedparam(pthread_t thread, int policy,
890 const struct sched_param *param)
892 pthread_handle handle = thread_handle(thread);
895 __pthread_lock(&handle->h_lock, NULL);
896 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
897 __pthread_unlock(&handle->h_lock);
900 th = handle->h_descr;
901 if (__builtin_expect (__sched_setscheduler(th->p_pid, policy, param) == -1,
903 __pthread_unlock(&handle->h_lock);
906 th->p_priority = policy == SCHED_OTHER ? 0 : param->sched_priority;
907 __pthread_unlock(&handle->h_lock);
908 if (__pthread_manager_request >= 0)
909 __pthread_manager_adjust_prio(th->p_priority);
913 int pthread_getschedparam(pthread_t thread, int *policy,
914 struct sched_param *param)
916 pthread_handle handle = thread_handle(thread);
919 __pthread_lock(&handle->h_lock, NULL);
920 if (__builtin_expect (invalid_handle(handle, thread), 0)) {
921 __pthread_unlock(&handle->h_lock);
924 pid = handle->h_descr->p_pid;
925 __pthread_unlock(&handle->h_lock);
926 pol = __sched_getscheduler(pid);
927 if (__builtin_expect (pol, 0) == -1) return errno;
928 if (__sched_getparam(pid, param) == -1) return errno;
933 int __pthread_yield (void)
935 /* For now this is equivalent with the POSIX call. */
936 return sched_yield ();
938 weak_alias (__pthread_yield, pthread_yield)
940 /* Process-wide exit() request */
942 static void pthread_onexit_process(int retcode, void *arg)
944 if (__builtin_expect (__pthread_manager_request, 0) >= 0) {
945 struct pthread_request request;
946 pthread_descr self = thread_self();
948 request.req_thread = self;
949 request.req_kind = REQ_PROCESS_EXIT;
950 request.req_args.exit.code = retcode;
951 TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
952 (char *) &request, sizeof(request)));
954 /* Main thread should accumulate times for thread manager and its
955 children, so that timings for main thread account for all threads. */
956 if (self == __pthread_main_thread)
959 waitpid(manager_thread->p_pid, NULL, __WCLONE);
961 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
963 /* Since all threads have been asynchronously terminated
964 (possibly holding locks), free cannot be used any more. */
965 /*free (__pthread_manager_thread_bos);*/
966 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
971 #ifndef HAVE_Z_NODELETE
972 static int __pthread_atexit_retcode;
974 static void pthread_atexit_process(void *arg, int retcode)
976 pthread_onexit_process (retcode ?: __pthread_atexit_retcode, arg);
979 static void pthread_atexit_retcode(void *arg, int retcode)
981 __pthread_atexit_retcode = retcode;
985 /* The handler for the RESTART signal just records the signal received
986 in the thread descriptor, and optionally performs a siglongjmp
987 (for pthread_cond_timedwait). */
989 static void pthread_handle_sigrestart(int sig)
991 pthread_descr self = thread_self();
992 THREAD_SETMEM(self, p_signal, sig);
993 if (THREAD_GETMEM(self, p_signal_jmp) != NULL)
994 siglongjmp(*THREAD_GETMEM(self, p_signal_jmp), 1);
997 /* The handler for the CANCEL signal checks for cancellation
998 (in asynchronous mode), for process-wide exit and exec requests.
999 For the thread manager thread, redirect the signal to
1000 __pthread_manager_sighandler. */
1002 static void pthread_handle_sigcancel(int sig)
1004 pthread_descr self = thread_self();
1005 sigjmp_buf * jmpbuf;
1007 if (self == manager_thread)
1010 /* A new thread might get a cancel signal before it is fully
1011 initialized, so that the thread register might still point to the
1012 manager thread. Double check that this is really the manager
1014 pthread_descr real_self = thread_self_stack();
1015 if (real_self == manager_thread)
1017 __pthread_manager_sighandler(sig);
1020 /* Oops, thread_self() isn't working yet.. */
1022 # ifdef INIT_THREAD_SELF
1023 INIT_THREAD_SELF(self, self->p_nr);
1026 __pthread_manager_sighandler(sig);
1030 if (__builtin_expect (__pthread_exit_requested, 0)) {
1031 /* Main thread should accumulate times for thread manager and its
1032 children, so that timings for main thread account for all threads. */
1033 if (self == __pthread_main_thread) {
1035 waitpid(manager_thread->p_pid, NULL, __WCLONE);
1037 waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
1040 _exit(__pthread_exit_code);
1042 if (__builtin_expect (THREAD_GETMEM(self, p_canceled), 0)
1043 && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
1044 if (THREAD_GETMEM(self, p_canceltype) == PTHREAD_CANCEL_ASYNCHRONOUS)
1045 __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
1046 jmpbuf = THREAD_GETMEM(self, p_cancel_jmp);
1047 if (jmpbuf != NULL) {
1048 THREAD_SETMEM(self, p_cancel_jmp, NULL);
1049 siglongjmp(*jmpbuf, 1);
1054 /* Handler for the DEBUG signal.
1055 The debugging strategy is as follows:
1056 On reception of a REQ_DEBUG request (sent by new threads created to
1057 the thread manager under debugging mode), the thread manager throws
1058 __pthread_sig_debug to itself. The debugger (if active) intercepts
1059 this signal, takes into account new threads and continue execution
1060 of the thread manager by propagating the signal because it doesn't
1061 know what it is specifically done for. In the current implementation,
1062 the thread manager simply discards it. */
1064 static void pthread_handle_sigdebug(int sig)
1069 /* Reset the state of the thread machinery after a fork().
1070 Close the pipe used for requests and set the main thread to the forked
1072 Notice that we can't free the stack segments, as the forked thread
1073 may hold pointers into them. */
1075 void __pthread_reset_main_thread(void)
1077 pthread_descr self = thread_self();
1078 struct rlimit limit;
1080 if (__pthread_manager_request != -1) {
1081 /* Free the thread manager stack */
1082 free(__pthread_manager_thread_bos);
1083 __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
1084 /* Close the two ends of the pipe */
1085 __libc_close(__pthread_manager_request);
1086 __libc_close(__pthread_manager_reader);
1087 __pthread_manager_request = __pthread_manager_reader = -1;
1090 /* Update the pid of the main thread */
1091 THREAD_SETMEM(self, p_pid, __getpid());
1092 /* Make the forked thread the main thread */
1093 __pthread_main_thread = self;
1094 THREAD_SETMEM(self, p_nextlive, self);
1095 THREAD_SETMEM(self, p_prevlive, self);
1096 #if !(USE_TLS && HAVE___THREAD)
1097 /* Now this thread modifies the global variables. */
1098 THREAD_SETMEM(self, p_errnop, &_errno);
1099 THREAD_SETMEM(self, p_h_errnop, &_h_errno);
1100 THREAD_SETMEM(self, p_resp, &_res);
1103 if (getrlimit (RLIMIT_STACK, &limit) == 0
1104 && limit.rlim_cur != limit.rlim_max) {
1105 limit.rlim_cur = limit.rlim_max;
1106 setrlimit(RLIMIT_STACK, &limit);
1110 /* Process-wide exec() request */
1112 void __pthread_kill_other_threads_np(void)
1114 struct sigaction sa;
1115 /* Terminate all other threads and thread manager */
1116 pthread_onexit_process(0, NULL);
1117 /* Make current thread the main thread in case the calling thread
1118 changes its mind, does not exec(), and creates new threads instead. */
1119 __pthread_reset_main_thread();
1121 /* Reset the signal handlers behaviour for the signals the
1122 implementation uses since this would be passed to the new
1124 sigemptyset(&sa.sa_mask);
1126 sa.sa_handler = SIG_DFL;
1127 __libc_sigaction(__pthread_sig_restart, &sa, NULL);
1128 __libc_sigaction(__pthread_sig_cancel, &sa, NULL);
1129 if (__pthread_sig_debug > 0)
1130 __libc_sigaction(__pthread_sig_debug, &sa, NULL);
1132 weak_alias (__pthread_kill_other_threads_np, pthread_kill_other_threads_np)
1134 /* Concurrency symbol level. */
1135 static int current_level;
1137 int __pthread_setconcurrency(int level)
1139 /* We don't do anything unless we have found a useful interpretation. */
1140 current_level = level;
1143 weak_alias (__pthread_setconcurrency, pthread_setconcurrency)
1145 int __pthread_getconcurrency(void)
1147 return current_level;
1149 weak_alias (__pthread_getconcurrency, pthread_getconcurrency)
1151 /* Primitives for controlling thread execution */
1153 void __pthread_wait_for_restart_signal(pthread_descr self)
1157 sigprocmask(SIG_SETMASK, NULL, &mask); /* Get current signal mask */
1158 sigdelset(&mask, __pthread_sig_restart); /* Unblock the restart signal */
1159 THREAD_SETMEM(self, p_signal, 0);
1161 sigsuspend(&mask); /* Wait for signal */
1162 } while (THREAD_GETMEM(self, p_signal) !=__pthread_sig_restart);
1164 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1167 #if !__ASSUME_REALTIME_SIGNALS
1168 /* The _old variants are for 2.0 and early 2.1 kernels which don't have RT
1170 On these kernels, we use SIGUSR1 and SIGUSR2 for restart and cancellation.
1171 Since the restart signal does not queue, we use an atomic counter to create
1172 queuing semantics. This is needed to resolve a rare race condition in
1173 pthread_cond_timedwait_relative. */
1175 void __pthread_restart_old(pthread_descr th)
1177 if (atomic_increment(&th->p_resume_count) == -1)
1178 kill(th->p_pid, __pthread_sig_restart);
1181 void __pthread_suspend_old(pthread_descr self)
1183 if (atomic_decrement(&self->p_resume_count) <= 0)
1184 __pthread_wait_for_restart_signal(self);
1188 __pthread_timedsuspend_old(pthread_descr self, const struct timespec *abstime)
1190 sigset_t unblock, initial_mask;
1191 int was_signalled = 0;
1194 if (atomic_decrement(&self->p_resume_count) == 0) {
1195 /* Set up a longjmp handler for the restart signal, unblock
1196 the signal and sleep. */
1198 if (sigsetjmp(jmpbuf, 1) == 0) {
1199 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1200 THREAD_SETMEM(self, p_signal, 0);
1201 /* Unblock the restart signal */
1202 sigemptyset(&unblock);
1203 sigaddset(&unblock, __pthread_sig_restart);
1204 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1208 struct timespec reltime;
1210 /* Compute a time offset relative to now. */
1211 __gettimeofday (&now, NULL);
1212 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1213 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1214 if (reltime.tv_nsec < 0) {
1215 reltime.tv_nsec += 1000000000;
1216 reltime.tv_sec -= 1;
1219 /* Sleep for the required duration. If woken by a signal,
1220 resume waiting as required by Single Unix Specification. */
1221 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1225 /* Block the restart signal again */
1226 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1231 THREAD_SETMEM(self, p_signal_jmp, NULL);
1234 /* Now was_signalled is true if we exited the above code
1235 due to the delivery of a restart signal. In that case,
1236 we know we have been dequeued and resumed and that the
1237 resume count is balanced. Otherwise, there are some
1238 cases to consider. First, try to bump up the resume count
1239 back to zero. If it goes to 1, it means restart() was
1240 invoked on this thread. The signal must be consumed
1241 and the count bumped down and everything is cool. We
1242 can return a 1 to the caller.
1243 Otherwise, no restart was delivered yet, so a potential
1244 race exists; we return a 0 to the caller which must deal
1245 with this race in an appropriate way; for example by
1246 atomically removing the thread from consideration for a
1247 wakeup---if such a thing fails, it means a restart is
1250 if (!was_signalled) {
1251 if (atomic_increment(&self->p_resume_count) != -1) {
1252 __pthread_wait_for_restart_signal(self);
1253 atomic_decrement(&self->p_resume_count); /* should be zero now! */
1254 /* woke spontaneously and consumed restart signal */
1257 /* woke spontaneously but did not consume restart---caller must resolve */
1260 /* woken due to restart signal */
1263 #endif /* __ASSUME_REALTIME_SIGNALS */
1265 void __pthread_restart_new(pthread_descr th)
1267 /* The barrier is proabably not needed, in which case it still documents
1268 our assumptions. The intent is to commit previous writes to shared
1269 memory so the woken thread will have a consistent view. Complementary
1270 read barriers are present to the suspend functions. */
1271 WRITE_MEMORY_BARRIER();
1272 kill(th->p_pid, __pthread_sig_restart);
1275 /* There is no __pthread_suspend_new because it would just
1276 be a wasteful wrapper for __pthread_wait_for_restart_signal */
1279 __pthread_timedsuspend_new(pthread_descr self, const struct timespec *abstime)
1281 sigset_t unblock, initial_mask;
1282 int was_signalled = 0;
1285 if (sigsetjmp(jmpbuf, 1) == 0) {
1286 THREAD_SETMEM(self, p_signal_jmp, &jmpbuf);
1287 THREAD_SETMEM(self, p_signal, 0);
1288 /* Unblock the restart signal */
1289 sigemptyset(&unblock);
1290 sigaddset(&unblock, __pthread_sig_restart);
1291 sigprocmask(SIG_UNBLOCK, &unblock, &initial_mask);
1295 struct timespec reltime;
1297 /* Compute a time offset relative to now. */
1298 __gettimeofday (&now, NULL);
1299 reltime.tv_nsec = abstime->tv_nsec - now.tv_usec * 1000;
1300 reltime.tv_sec = abstime->tv_sec - now.tv_sec;
1301 if (reltime.tv_nsec < 0) {
1302 reltime.tv_nsec += 1000000000;
1303 reltime.tv_sec -= 1;
1306 /* Sleep for the required duration. If woken by a signal,
1307 resume waiting as required by Single Unix Specification. */
1308 if (reltime.tv_sec < 0 || __libc_nanosleep(&reltime, NULL) == 0)
1312 /* Block the restart signal again */
1313 sigprocmask(SIG_SETMASK, &initial_mask, NULL);
1318 THREAD_SETMEM(self, p_signal_jmp, NULL);
1320 /* Now was_signalled is true if we exited the above code
1321 due to the delivery of a restart signal. In that case,
1322 everything is cool. We have been removed from whatever
1323 we were waiting on by the other thread, and consumed its signal.
1325 Otherwise we this thread woke up spontaneously, or due to a signal other
1326 than restart. This is an ambiguous case that must be resolved by
1327 the caller; the thread is still eligible for a restart wakeup
1328 so there is a race. */
1330 READ_MEMORY_BARRIER(); /* See comment in __pthread_restart_new */
1331 return was_signalled;
1340 void __pthread_message(char * fmt, ...)
1344 sprintf(buffer, "%05d : ", __getpid());
1345 va_start(args, fmt);
1346 vsnprintf(buffer + 8, sizeof(buffer) - 8, fmt, args);
1348 TEMP_FAILURE_RETRY(__libc_write(2, buffer, strlen(buffer)));
1355 /* We need a hook to force the cancelation wrappers and file locking
1356 to be linked in when static libpthread is used. */
1357 extern const int __pthread_provide_wrappers;
1358 static const int *const __pthread_require_wrappers =
1359 &__pthread_provide_wrappers;
1360 extern const int __pthread_provide_lockfile;
1361 static const int *const __pthread_require_lockfile =
1362 &__pthread_provide_lockfile;