2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 * Copyright (c) 2000-2001 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 * Support code for LinuxThreads, the clone()-based kernel
18 * thread package for Linux which is included in libc6.
20 * This code relies on implementation details of LinuxThreads,
21 * (i.e. properties not guaranteed by the Pthread standard),
22 * though this version now does less of that than the other Pthreads
25 * Note that there is a lot of code duplication between linux_threads.c
26 * and thread support for some of the other Posix platforms; any changes
27 * made here may need to be reflected there too.
30 * Linux_threads.c now also includes some code to support HPUX and
31 * OSF1 (Compaq Tru64 Unix, really). The OSF1 support is not yet
32 * functional. The OSF1 code is based on Eric Benson's
33 * patch, though that was originally against hpux_irix_threads. The code
34 * here is completely untested. With 0.0000001% probability, it might
37 * Eric also suggested an alternate basis for a lock implementation in
39 * + #elif defined(OSF1)
40 * + unsigned long GC_allocate_lock = 0;
41 * + msemaphore GC_allocate_semaphore;
42 * + # define GC_TRY_LOCK() \
43 * + ((msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) == 0) \
44 * + ? (GC_allocate_lock = 1) \
46 * + # define GC_LOCK_TAKEN GC_allocate_lock
49 /* #define DEBUG_THREADS 1 */
51 /* ANSI C requires that a compilation unit contains something */
55 # if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
56 && !defined(GC_IRIX_THREADS)
58 # include "private/gc_priv.h"
60 # if defined(GC_HPUX_THREADS) && !defined(USE_PTHREAD_SPECIFIC) \
61 && !defined(USE_HPUX_TLS)
65 # ifdef THREAD_LOCAL_ALLOC
66 # if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_HPUX_TLS)
67 # include "private/specific.h"
69 # if defined(USE_PTHREAD_SPECIFIC)
70 # define GC_getspecific pthread_getspecific
71 # define GC_setspecific pthread_setspecific
72 # define GC_key_create pthread_key_create
73 typedef pthread_key_t GC_key_t;
75 # if defined(USE_HPUX_TLS)
76 # define GC_getspecific(x) (x)
77 # define GC_setspecific(key, v) ((key) = (v), 0)
78 # define GC_key_create(key, d) 0
79 typedef void * GC_key_t;
88 # include <sys/mman.h>
89 # include <sys/time.h>
90 # include <semaphore.h>
92 # include <sys/types.h>
93 # include <sys/stat.h>
100 #ifdef GC_USE_LD_WRAP
101 # define WRAP_FUNC(f) __wrap_##f
102 # define REAL_FUNC(f) __real_##f
104 # define WRAP_FUNC(f) GC_##f
105 # define REAL_FUNC(f) f
106 # undef pthread_create
107 # undef pthread_sigmask
109 # undef pthread_detach
116 void GC_print_sig_mask()
121 if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
122 ABORT("pthread_sigmask");
123 GC_printf0("Blocked: ");
124 for (i = 1; i <= MAXSIG; i++) {
125 if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
132 /* We use the allocation lock to protect thread-related data structures. */
134 /* The set of all known threads. We intercept thread creation and */
136 /* Protected by allocation/GC lock. */
137 /* Some of this should be declared volatile, but that's inconsistent */
138 /* with some library routine declarations. */
139 typedef struct GC_Thread_Rep {
140 struct GC_Thread_Rep * next; /* More recently allocated threads */
141 /* with a given pthread id come */
142 /* first. (All but the first are */
143 /* guaranteed to be dead, but we may */
144 /* not yet have registered the join.) */
147 # define FINISHED 1 /* Thread has exited. */
148 # define DETACHED 2 /* Thread is intended to be detached. */
149 # define MAIN_THREAD 4 /* True for the original thread only. */
150 short thread_blocked; /* Protected by GC lock. */
151 /* Treated as a boolean value. If set, */
152 /* thread will acquire GC lock before */
153 /* doing any pointer manipulations, and */
154 /* has set its sp value. Thus it does */
155 /* not need to be sent a signal to stop */
157 ptr_t stack_end; /* Cold end of the stack. */
158 ptr_t stack_ptr; /* Valid only when stopped. */
160 ptr_t backing_store_end;
161 ptr_t backing_store_ptr;
164 void * status; /* The value returned from the thread. */
165 /* Used only to avoid premature */
166 /* reclamation of any data it might */
168 # ifdef THREAD_LOCAL_ALLOC
169 # if CPP_WORDSZ == 64 && defined(ALIGN_DOUBLE)
170 # define GRANULARITY 16
171 # define NFREELISTS 49
173 # define GRANULARITY 8
174 # define NFREELISTS 65
176 /* The ith free list corresponds to size i*GRANULARITY */
177 # define INDEX_FROM_BYTES(n) ((ADD_SLOP(n) + GRANULARITY - 1)/GRANULARITY)
178 # define BYTES_FROM_INDEX(i) ((i) * GRANULARITY - EXTRA_BYTES)
179 # define SMALL_ENOUGH(bytes) (ADD_SLOP(bytes) <= \
180 (NFREELISTS-1)*GRANULARITY)
181 ptr_t ptrfree_freelists[NFREELISTS];
182 ptr_t normal_freelists[NFREELISTS];
183 # ifdef GC_GCJ_SUPPORT
184 ptr_t gcj_freelists[NFREELISTS];
186 /* Free lists contain either a pointer or a small count */
187 /* reflecting the number of granules allocated at that */
189 /* 0 ==> thread-local allocation in use, free list */
191 /* > 0, <= DIRECT_GRANULES ==> Using global allocation, */
192 /* too few objects of this size have been */
193 /* allocated by this thread. */
194 /* >= HBLKSIZE => pointer to nonempty free list. */
195 /* > DIRECT_GRANULES, < HBLKSIZE ==> transition to */
196 /* local alloc, equivalent to 0. */
197 # define DIRECT_GRANULES (HBLKSIZE/GRANULARITY)
198 /* Don't use local free lists for up to this much */
203 GC_thread GC_lookup_thread(pthread_t id);
205 static GC_bool parallel_initialized = FALSE;
207 # if defined(__GNUC__)
208 void GC_init_parallel() __attribute__ ((constructor));
210 void GC_init_parallel();
213 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
215 /* We don't really support thread-local allocation with DBG_HDRS_ALL */
220 GC_key_t GC_thread_key;
222 static GC_bool keys_initialized;
224 /* Recover the contents of the freelist array fl into the global one gfl.*/
225 /* Note that the indexing scheme differs, in that gfl has finer size */
226 /* resolution, even if not all entries are used. */
227 /* We hold the allocator lock. */
228 static void return_freelists(ptr_t *fl, ptr_t *gfl)
234 for (i = 1; i < NFREELISTS; ++i) {
235 nwords = i * (GRANULARITY/sizeof(word));
238 if ((word)q < HBLKSIZE) continue;
239 if (gfl[nwords] == 0) {
243 for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
248 /* Clear fl[i], since the thread structure may hang around. */
249 /* Do it in a way that is likely to trap if we access it. */
250 fl[i] = (ptr_t)HBLKSIZE;
254 /* We statically allocate a single "size 0" object. It is linked to */
255 /* itself, and is thus repeatedly reused for all size 0 allocation */
256 /* requests. (Size 0 gcj allocation requests are incorrect, and */
257 /* we arrange for those to fault asap.) */
258 static ptr_t size_zero_object = (ptr_t)(&size_zero_object);
260 /* Each thread structure must be initialized. */
261 /* This call must be made from the new thread. */
262 /* Caller holds allocation lock. */
263 void GC_init_thread_local(GC_thread p)
267 if (!keys_initialized) {
268 if (0 != GC_key_create(&GC_thread_key, 0)) {
269 ABORT("Failed to create key for local allocator");
271 keys_initialized = TRUE;
273 if (0 != GC_setspecific(GC_thread_key, p)) {
274 ABORT("Failed to set thread specific allocation pointers");
276 for (i = 1; i < NFREELISTS; ++i) {
277 p -> ptrfree_freelists[i] = (ptr_t)1;
278 p -> normal_freelists[i] = (ptr_t)1;
279 # ifdef GC_GCJ_SUPPORT
280 p -> gcj_freelists[i] = (ptr_t)1;
283 /* Set up the size 0 free lists. */
284 p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
285 p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
286 # ifdef GC_GCJ_SUPPORT
287 p -> gcj_freelists[0] = (ptr_t)(-1);
291 #ifdef GC_GCJ_SUPPORT
292 extern ptr_t * GC_gcjobjfreelist;
295 /* We hold the allocator lock. */
296 void GC_destroy_thread_local(GC_thread p)
298 /* We currently only do this from the thread itself. */
299 GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
300 return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
301 return_freelists(p -> normal_freelists, GC_objfreelist);
302 # ifdef GC_GCJ_SUPPORT
303 return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
307 extern GC_PTR GC_generic_malloc_many();
309 GC_PTR GC_local_malloc(size_t bytes)
311 if (EXPECT(!SMALL_ENOUGH(bytes),0)) {
312 return(GC_malloc(bytes));
314 int index = INDEX_FROM_BYTES(bytes);
317 GC_key_t k = GC_thread_key;
320 # if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
321 || !defined(__GNUC__)
322 if (EXPECT(0 == k, 0)) {
323 /* This can happen if we get called when the world is */
324 /* being initialized. Whether we can actually complete */
325 /* the initialization then is unclear. */
330 tsd = GC_getspecific(GC_thread_key);
331 # ifdef GC_ASSERTIONS
333 GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self()));
336 my_fl = ((GC_thread)tsd) -> normal_freelists + index;
338 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
339 ptr_t next = obj_link(my_entry);
340 GC_PTR result = (GC_PTR)my_entry;
342 obj_link(my_entry) = 0;
343 PREFETCH_FOR_WRITE(next);
345 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
346 *my_fl = my_entry + index + 1;
347 return GC_malloc(bytes);
349 GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
350 if (*my_fl == 0) return GC_oom_fn(bytes);
351 return GC_local_malloc(bytes);
356 GC_PTR GC_local_malloc_atomic(size_t bytes)
358 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
359 return(GC_malloc_atomic(bytes));
361 int index = INDEX_FROM_BYTES(bytes);
362 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
363 -> ptrfree_freelists + index;
364 ptr_t my_entry = *my_fl;
365 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
366 GC_PTR result = (GC_PTR)my_entry;
367 *my_fl = obj_link(my_entry);
369 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
370 *my_fl = my_entry + index + 1;
371 return GC_malloc_atomic(bytes);
373 GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
374 /* *my_fl is updated while the collector is excluded; */
375 /* the free list is always visible to the collector as */
377 if (*my_fl == 0) return GC_oom_fn(bytes);
378 return GC_local_malloc_atomic(bytes);
383 #ifdef GC_GCJ_SUPPORT
385 #include "include/gc_gcj.h"
388 extern GC_bool GC_gcj_malloc_initialized;
391 extern int GC_gcj_kind;
393 GC_PTR GC_local_gcj_malloc(size_t bytes,
394 void * ptr_to_struct_containing_descr)
396 GC_ASSERT(GC_gcj_malloc_initialized);
397 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
398 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
400 int index = INDEX_FROM_BYTES(bytes);
401 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
402 -> gcj_freelists + index;
403 ptr_t my_entry = *my_fl;
404 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
405 GC_PTR result = (GC_PTR)my_entry;
406 GC_ASSERT(!GC_incremental);
407 /* We assert that any concurrent marker will stop us. */
408 /* Thus it is impossible for a mark procedure to see the */
409 /* allocation of the next object, but to see this object */
410 /* still containing a free list pointer. Otherwise the */
411 /* marker might find a random "mark descriptor". */
412 *(volatile ptr_t *)my_fl = obj_link(my_entry);
413 /* We must update the freelist before we store the pointer. */
414 /* Otherwise a GC at this point would see a corrupted */
416 /* A memory barrier is probably never needed, since the */
417 /* action of stopping this thread will cause prior writes */
419 *(void * volatile *)result = ptr_to_struct_containing_descr;
421 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
422 *my_fl = my_entry + index + 1;
423 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
425 GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
426 if (*my_fl == 0) return GC_oom_fn(bytes);
427 return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
432 #endif /* GC_GCJ_SUPPORT */
434 # else /* !THREAD_LOCAL_ALLOC && !DBG_HDRS_ALL */
436 # define GC_destroy_thread_local(t)
438 # endif /* !THREAD_LOCAL_ALLOC */
441 * We use signals to stop threads during GC.
443 * Suspended threads wait in signal handler for SIG_THR_RESTART.
444 * That's more portable than semaphores or condition variables.
445 * (We do use sem_post from a signal handler, but that should be portable.)
447 * The thread suspension signal SIG_SUSPEND is now defined in gc_priv.h.
448 * Note that we can't just stop a thread; we need it to save its stack
449 * pointer(s) and acknowledge.
452 #ifndef SIG_THR_RESTART
453 # if defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS)
454 # define SIG_THR_RESTART _SIGRTMIN + 5
456 # define SIG_THR_RESTART SIGXCPU
460 sem_t GC_suspend_ack_sem;
464 To make sure that we're using LinuxThreads and not some other thread
465 package, we generate a dummy reference to `pthread_kill_other_threads_np'
466 (was `__pthread_initial_thread_bos' but that disappeared),
467 which is a symbol defined in LinuxThreads, but (hopefully) not in other
470 We no longer do this, since this code is now portable enough that it might
471 actually work for something else.
473 void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
476 #if defined(SPARC) || defined(IA64)
477 extern word GC_save_regs_in_stack();
480 long GC_nprocs = 1; /* Number of processors. We may not have */
481 /* access to all of them, but this is as good */
482 /* a guess as any ... */
487 # define MAX_MARKERS 16
490 static ptr_t marker_sp[MAX_MARKERS] = {0};
492 void * GC_mark_thread(void * id)
496 marker_sp[(word)id] = GC_approx_sp();
497 for (;; ++my_mark_no) {
498 /* GC_mark_no is passed only to allow GC_help_marker to terminate */
499 /* promptly. This is important if it were called from the signal */
500 /* handler or from the GC lock acquisition code. Under Linux, it's */
501 /* not safe to call it from a signal handler, since it uses mutexes */
502 /* and condition variables. Since it is called only here, the */
503 /* argument is unnecessary. */
504 if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {
505 /* resynchronize if we get far off, e.g. because GC_mark_no */
507 my_mark_no = GC_mark_no;
509 # ifdef DEBUG_THREADS
510 GC_printf1("Starting mark helper for mark number %ld\n", my_mark_no);
512 GC_help_marker(my_mark_no);
516 extern long GC_markers; /* Number of mark threads we would */
517 /* like to have. Includes the */
518 /* initiating thread. */
520 pthread_t GC_mark_threads[MAX_MARKERS];
522 #define PTHREAD_CREATE REAL_FUNC(pthread_create)
524 static void start_mark_threads()
529 if (GC_markers > MAX_MARKERS) {
530 WARN("Limiting number of mark threads\n", 0);
531 GC_markers = MAX_MARKERS;
533 if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
535 if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
536 ABORT("pthread_attr_setdetachstate failed");
539 /* Default stack size is usually too small: fix it. */
540 /* Otherwise marker threads or GC may run out of */
542 # define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))
547 if (pthread_attr_getstacksize(&attr, &old_size) != 0)
548 ABORT("pthread_attr_getstacksize failed\n");
549 if (old_size < MIN_STACK_SIZE) {
550 if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
551 ABORT("pthread_attr_getstacksize failed\n");
556 if (GC_print_stats) {
557 GC_printf1("Starting %ld marker threads\n", GC_markers - 1);
560 for (i = 0; i < GC_markers - 1; ++i) {
561 if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr,
562 GC_mark_thread, (void *)(word)i)) {
563 WARN("Marker thread creation failed, errno = %ld.\n", errno);
568 #else /* !PARALLEL_MARK */
570 static __inline__ void start_mark_threads()
574 #endif /* !PARALLEL_MARK */
576 void GC_suspend_handler(int sig)
579 pthread_t my_thread = pthread_self();
585 # ifdef PARALLEL_MARK
586 word my_mark_no = GC_mark_no;
587 /* Marker can't proceed until we acknowledge. Thus this is */
588 /* guaranteed to be the mark_no correspending to our */
589 /* suspension, i.e. the marker can't have incremented it yet. */
592 if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
595 GC_printf1("Suspending 0x%x\n", my_thread);
598 me = GC_lookup_thread(my_thread);
599 /* The lookup here is safe, since I'm doing this on behalf */
600 /* of a thread which holds the allocation lock in order */
601 /* to stop the world. Thus concurrent modification of the */
602 /* data structure is impossible. */
604 me -> stack_ptr = (ptr_t)GC_save_regs_in_stack();
606 me -> stack_ptr = (ptr_t)(&dummy);
609 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack();
612 /* Tell the thread that wants to stop the world that this */
613 /* thread has been stopped. Note that sem_post() is */
614 /* the only async-signal-safe primitive in LinuxThreads. */
615 sem_post(&GC_suspend_ack_sem);
617 /* Wait until that thread tells us to restart by sending */
618 /* this thread a SIG_THR_RESTART signal. */
619 /* SIG_THR_RESTART should be masked at this point. Thus there */
621 if (sigfillset(&mask) != 0) ABORT("sigfillset() failed");
622 if (sigdelset(&mask, SIG_THR_RESTART) != 0) ABORT("sigdelset() failed");
624 if (sigdelset(&mask, SIGINT) != 0) ABORT("sigdelset() failed");
625 if (sigdelset(&mask, SIGQUIT) != 0) ABORT("sigdelset() failed");
626 if (sigdelset(&mask, SIGTERM) != 0) ABORT("sigdelset() failed");
627 if (sigdelset(&mask, SIGABRT) != 0) ABORT("sigdelset() failed");
631 sigsuspend(&mask); /* Wait for signal */
632 } while (me->signal != SIG_THR_RESTART);
635 GC_printf1("Continuing 0x%x\n", my_thread);
639 void GC_restart_handler(int sig)
643 if (sig != SIG_THR_RESTART) ABORT("Bad signal in suspend_handler");
645 /* Let the GC_suspend_handler() know that we got a SIG_THR_RESTART. */
646 /* The lookup here is safe, since I'm doing this on behalf */
647 /* of a thread which holds the allocation lock in order */
648 /* to stop the world. Thus concurrent modification of the */
649 /* data structure is impossible. */
650 me = GC_lookup_thread(pthread_self());
651 me->signal = SIG_THR_RESTART;
654 ** Note: even if we didn't do anything useful here,
655 ** it would still be necessary to have a signal handler,
656 ** rather than ignoring the signals, otherwise
657 ** the signals will not be delivered at all, and
658 ** will thus not interrupt the sigsuspend() above.
662 GC_printf1("In GC_restart_handler for 0x%x\n", pthread_self());
666 /* Defining INSTALL_LOOPING_SEGV_HANDLER causes SIGSEGV and SIGBUS to */
667 /* result in an infinite loop in a signal handler. This can be very */
668 /* useful for debugging, since (as of RH7) gdb still seems to have */
669 /* serious problems with threads. */
670 #ifdef INSTALL_LOOPING_SEGV_HANDLER
671 void GC_looping_handler(int sig)
673 GC_printf3("Signal %ld in thread %lx, pid %ld\n",
674 sig, pthread_self(), getpid());
679 GC_bool GC_thr_initialized = FALSE;
681 # define THREAD_TABLE_SZ 128 /* Must be power of 2 */
682 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
684 void GC_push_thread_structures GC_PROTO((void))
686 GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
689 #ifdef THREAD_LOCAL_ALLOC
690 /* We must explicitly mark ptrfree and gcj free lists, since the free */
691 /* list links wouldn't otherwise be found. We also set them in the */
692 /* normal free lists, since that involves touching less memory than if */
693 /* we scanned them normally. */
694 void GC_mark_thread_local_free_lists(void)
700 for (i = 0; i < THREAD_TABLE_SZ; ++i) {
701 for (p = GC_threads[i]; 0 != p; p = p -> next) {
702 for (j = 1; j < NFREELISTS; ++j) {
703 q = p -> ptrfree_freelists[j];
704 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
705 q = p -> normal_freelists[j];
706 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
707 # ifdef GC_GCJ_SUPPORT
708 q = p -> gcj_freelists[j];
709 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
710 # endif /* GC_GCJ_SUPPORT */
715 #endif /* THREAD_LOCAL_ALLOC */
717 /* Add a thread to GC_threads. We assume it wasn't already there. */
718 /* Caller holds allocation lock. */
719 GC_thread GC_new_thread(pthread_t id)
721 int hv = ((word)id) % THREAD_TABLE_SZ;
723 static struct GC_Thread_Rep first_thread;
724 static GC_bool first_thread_used = FALSE;
726 if (!first_thread_used) {
727 result = &first_thread;
728 first_thread_used = TRUE;
730 result = (struct GC_Thread_Rep *)
731 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
733 if (result == 0) return(0);
735 result -> next = GC_threads[hv];
736 GC_threads[hv] = result;
737 GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
741 /* Delete a thread from GC_threads. We assume it is there. */
742 /* (The code intentionally traps if it wasn't.) */
743 /* Caller holds allocation lock. */
744 void GC_delete_thread(pthread_t id)
746 int hv = ((word)id) % THREAD_TABLE_SZ;
747 register GC_thread p = GC_threads[hv];
748 register GC_thread prev = 0;
750 while (!pthread_equal(p -> id, id)) {
755 GC_threads[hv] = p -> next;
757 prev -> next = p -> next;
762 /* If a thread has been joined, but we have not yet */
763 /* been notified, then there may be more than one thread */
764 /* in the table with the same pthread id. */
765 /* This is OK, but we need a way to delete a specific one. */
766 void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
768 int hv = ((word)id) % THREAD_TABLE_SZ;
769 register GC_thread p = GC_threads[hv];
770 register GC_thread prev = 0;
777 GC_threads[hv] = p -> next;
779 prev -> next = p -> next;
784 /* Return a GC_thread corresponding to a given thread_t. */
785 /* Returns 0 if it's not there. */
786 /* Caller holds allocation lock or otherwise inhibits */
788 /* If there is more than one thread with the given id we */
789 /* return the most recent one. */
790 GC_thread GC_lookup_thread(pthread_t id)
792 int hv = ((word)id) % THREAD_TABLE_SZ;
793 register GC_thread p = GC_threads[hv];
795 while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
799 /* There seems to be a very rare thread stopping problem. To help us */
800 /* debug that, we save the ids of the stopping thread. */
801 pthread_t GC_stopping_thread;
804 /* Caller holds allocation lock. */
807 pthread_t my_thread = pthread_self();
809 register GC_thread p;
810 register int n_live_threads = 0;
813 GC_stopping_thread = my_thread; /* debugging only. */
814 GC_stopping_pid = getpid(); /* debugging only. */
815 /* Make sure all free list construction has stopped before we start. */
816 /* No new construction can start, since free list construction is */
817 /* required to acquire and release the GC lock before it starts, */
818 /* and we have the lock. */
819 # ifdef PARALLEL_MARK
820 GC_acquire_mark_lock();
821 GC_ASSERT(GC_fl_builder_count == 0);
822 /* We should have previously waited for it to become zero. */
823 # endif /* PARALLEL_MARK */
824 for (i = 0; i < THREAD_TABLE_SZ; i++) {
825 for (p = GC_threads[i]; p != 0; p = p -> next) {
826 if (p -> id != my_thread) {
827 if (p -> flags & FINISHED) continue;
828 if (p -> thread_blocked) /* Will wait */ continue;
831 GC_printf1("Sending suspend signal to 0x%x\n", p -> id);
833 result = pthread_kill(p -> id, SIG_SUSPEND);
836 /* Not really there anymore. Possible? */
842 ABORT("pthread_kill failed");
847 for (i = 0; i < n_live_threads; i++) {
848 if (0 != sem_wait(&GC_suspend_ack_sem))
849 ABORT("sem_wait in handler failed");
851 # ifdef PARALLEL_MARK
852 GC_release_mark_lock();
855 GC_printf1("World stopped 0x%x\n", pthread_self());
857 GC_stopping_thread = 0; /* debugging only */
860 /* Caller holds allocation lock, and has held it continuously since */
861 /* the world stopped. */
862 void GC_start_world()
864 pthread_t my_thread = pthread_self();
866 register GC_thread p;
867 register int n_live_threads = 0;
871 GC_printf0("World starting\n");
874 for (i = 0; i < THREAD_TABLE_SZ; i++) {
875 for (p = GC_threads[i]; p != 0; p = p -> next) {
876 if (p -> id != my_thread) {
877 if (p -> flags & FINISHED) continue;
878 if (p -> thread_blocked) continue;
881 GC_printf1("Sending restart signal to 0x%x\n", p -> id);
883 result = pthread_kill(p -> id, SIG_THR_RESTART);
886 /* Not really there anymore. Possible? */
892 ABORT("pthread_kill failed");
898 GC_printf0("World started\n");
900 GC_stopping_thread = 0; /* debugging only */
904 # define IF_IA64(x) x
908 /* We hold allocation lock. Should do exactly the right thing if the */
909 /* world is stopped. Should not fail if it isn't. */
910 void GC_push_all_stacks()
914 ptr_t sp = GC_approx_sp();
916 /* On IA64, we also need to scan the register backing store. */
917 IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
918 pthread_t me = pthread_self();
920 if (!GC_thr_initialized) GC_thr_init();
922 GC_printf1("Pushing stacks from thread 0x%lx\n", (unsigned long) me);
924 for (i = 0; i < THREAD_TABLE_SZ; i++) {
925 for (p = GC_threads[i]; p != 0; p = p -> next) {
926 if (p -> flags & FINISHED) continue;
927 if (pthread_equal(p -> id, me)) {
929 lo = (ptr_t)GC_save_regs_in_stack();
933 IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
936 IF_IA64(bs_hi = p -> backing_store_ptr;)
938 if ((p -> flags & MAIN_THREAD) == 0) {
940 IF_IA64(bs_lo = p -> backing_store_end);
942 /* The original stack. */
944 IF_IA64(bs_lo = BACKING_STORE_BASE;)
947 GC_printf3("Stack for thread 0x%lx = [%lx,%lx)\n",
948 (unsigned long) p -> id,
949 (unsigned long) lo, (unsigned long) hi);
951 if (0 == lo) ABORT("GC_push_all_stacks: sp not set!\n");
952 # ifdef STACK_GROWS_UP
953 /* We got them backwards! */
954 GC_push_all_stack(hi, lo);
956 GC_push_all_stack(lo, hi);
959 if (pthread_equal(p -> id, me)) {
960 GC_push_all_eager(bs_lo, bs_hi);
962 GC_push_all_stack(bs_lo, bs_hi);
969 #ifdef USE_PROC_FOR_LIBRARIES
970 int GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
975 # ifdef PARALLEL_MARK
976 for (i = 0; i < GC_markers; ++i) {
977 if (marker_sp[i] > lo & marker_sp[i] < hi) return 1;
980 for (i = 0; i < THREAD_TABLE_SZ; i++) {
981 for (p = GC_threads[i]; p != 0; p = p -> next) {
982 if (0 != p -> stack_end) {
983 # ifdef STACK_GROWS_UP
984 if (p -> stack_end >= lo && p -> stack_end < hi) return 1;
985 # else /* STACK_GROWS_DOWN */
986 if (p -> stack_end > lo && p -> stack_end <= hi) return 1;
993 #endif /* USE_PROC_FOR_LIBRARIES */
995 #ifdef GC_LINUX_THREADS
996 /* Return the number of processors, or i<= 0 if it can't be determined. */
999 /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
1000 /* appears to be buggy in many cases. */
1001 /* We look for lines "cpu<n>" in /proc/stat. */
1002 # define STAT_BUF_SIZE 4096
1003 # if defined(GC_USE_LD_WRAP)
1004 # define STAT_READ __real_read
1006 # define STAT_READ read
1008 char stat_buf[STAT_BUF_SIZE];
1012 /* Some old kernels only have a single "cpu nnnn ..." */
1013 /* entry in /proc/stat. We identify those as */
1014 /* uniprocessors. */
1017 f = open("/proc/stat", O_RDONLY);
1018 if (f < 0 || (len = STAT_READ(f, stat_buf, STAT_BUF_SIZE)) < 100) {
1019 WARN("Couldn't read /proc/stat\n", 0);
1022 for (i = 0; i < len - 100; ++i) {
1023 if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
1024 && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
1025 int cpu_no = atoi(stat_buf + i + 4);
1026 if (cpu_no >= result) result = cpu_no + 1;
1031 #endif /* GC_LINUX_THREADS */
1033 /* We hold the allocation lock. */
1038 struct sigaction act;
1040 if (GC_thr_initialized) return;
1041 GC_thr_initialized = TRUE;
1043 if (sem_init(&GC_suspend_ack_sem, 0, 0) != 0)
1044 ABORT("sem_init failed");
1046 act.sa_flags = SA_RESTART;
1047 if (sigfillset(&act.sa_mask) != 0) {
1048 ABORT("sigfillset() failed");
1051 if (sigdelset(&act.sa_mask, SIGINT) != 0
1052 || sigdelset(&act.sa_mask, SIGQUIT != 0)
1053 || sigdelset(&act.sa_mask, SIGABRT != 0)
1054 || sigdelset(&act.sa_mask, SIGTERM != 0)) {
1055 ABORT("sigdelset() failed");
1059 /* SIG_THR_RESTART is unmasked by the handler when necessary. */
1060 act.sa_handler = GC_suspend_handler;
1061 if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
1062 ABORT("Cannot set SIG_SUSPEND handler");
1065 act.sa_handler = GC_restart_handler;
1066 if (sigaction(SIG_THR_RESTART, &act, NULL) != 0) {
1067 ABORT("Cannot set SIG_THR_RESTART handler");
1069 # ifdef INSTALL_LOOPING_SEGV_HANDLER
1070 act.sa_handler = GC_looping_handler;
1071 if (sigaction(SIGSEGV, &act, NULL) != 0
1072 || sigaction(SIGBUS, &act, NULL) != 0) {
1073 ABORT("Cannot set SIGSEGV or SIGBUS looping handler");
1075 # endif /* INSTALL_LOOPING_SEGV_HANDLER */
1077 /* Add the initial thread, so we can stop it. */
1078 t = GC_new_thread(pthread_self());
1079 t -> stack_ptr = (ptr_t)(&dummy);
1080 t -> flags = DETACHED | MAIN_THREAD;
1082 /* Set GC_nprocs. */
1084 char * nprocs_string = GETENV("GC_NPROCS");
1086 if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
1088 if (GC_nprocs <= 0) {
1089 # if defined(GC_HPUX_THREADS)
1090 GC_nprocs = pthread_num_processors_np();
1092 # if defined(GC_OSF1_THREADS) || defined(GC_FREEBSD_THREADS)
1095 # if defined(GC_LINUX_THREADS)
1096 GC_nprocs = GC_get_nprocs();
1099 if (GC_nprocs <= 0) {
1100 WARN("GC_get_nprocs() returned %ld\n", GC_nprocs);
1102 # ifdef PARALLEL_MARK
1106 # ifdef PARALLEL_MARK
1107 GC_markers = GC_nprocs;
1110 # ifdef PARALLEL_MARK
1112 if (GC_print_stats) {
1113 GC_printf2("Number of processors = %ld, "
1114 "number of marker threads = %ld\n", GC_nprocs, GC_markers);
1117 if (GC_markers == 1) {
1118 GC_parallel = FALSE;
1120 if (GC_print_stats) {
1121 GC_printf0("Single marker thread, turning off parallel marking\n");
1131 /* Perform all initializations, including those that */
1132 /* may require allocation. */
1133 /* Called as constructor without allocation lock. */
1134 /* Must be called before a second thread is created. */
1135 /* Called without allocation lock. */
1136 void GC_init_parallel()
1138 if (parallel_initialized) return;
1139 parallel_initialized = TRUE;
1140 /* GC_init() calls us back, so set flag first. */
1141 if (!GC_is_initialized) GC_init();
1142 /* If we are using a parallel marker, start the helper threads. */
1143 # ifdef PARALLEL_MARK
1144 if (GC_parallel) start_mark_threads();
1146 /* Initialize thread local free lists if used. */
1147 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1149 GC_init_thread_local(GC_lookup_thread(pthread_self()));
1155 int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
1157 sigset_t fudged_set;
1159 if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
1161 sigdelset(&fudged_set, SIG_SUSPEND);
1164 return(REAL_FUNC(pthread_sigmask)(how, set, oset));
1167 /* Wrappers for functions that are likely to block for an appreciable */
1168 /* length of time. Must be called in pairs, if at all. */
1169 /* Nothing much beyond the system call itself should be executed */
1170 /* between these. */
1172 void GC_start_blocking(void) {
1173 # define SP_SLOP 128
1176 me = GC_lookup_thread(pthread_self());
1177 GC_ASSERT(!(me -> thread_blocked));
1179 me -> stack_ptr = (ptr_t)GC_save_regs_in_stack();
1181 me -> stack_ptr = (ptr_t)GC_approx_sp();
1184 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack() + SP_SLOP;
1186 /* Add some slop to the stack pointer, since the wrapped call may */
1187 /* end up pushing more callee-save registers. */
1188 # ifdef STACK_GROWS_UP
1189 me -> stack_ptr += SP_SLOP;
1191 me -> stack_ptr -= SP_SLOP;
1193 me -> thread_blocked = TRUE;
1197 GC_end_blocking(void) {
1199 LOCK(); /* This will block if the world is stopped. */
1200 me = GC_lookup_thread(pthread_self());
1201 GC_ASSERT(me -> thread_blocked);
1202 me -> thread_blocked = FALSE;
1206 /* A wrapper for the standard C sleep function */
1207 int WRAP_FUNC(sleep) (unsigned int seconds)
1211 GC_start_blocking();
1212 result = REAL_FUNC(sleep)(seconds);
1218 void *(*start_routine)(void *);
1221 sem_t registered; /* 1 ==> in our thread table, but */
1222 /* parent hasn't yet noticed. */
1225 /* Called at thread exit. */
1226 /* Never called for main thread. That's OK, since it */
1227 /* results in at most a tiny one-time leak. And */
1228 /* linuxthreads doesn't reclaim the main threads */
1229 /* resources or id anyway. */
1230 void GC_thread_exit_proc(void *arg)
1235 me = GC_lookup_thread(pthread_self());
1236 GC_destroy_thread_local(me);
1237 if (me -> flags & DETACHED) {
1238 GC_delete_thread(pthread_self());
1240 me -> flags |= FINISHED;
1242 # if defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
1243 && !defined(USE_HPUX_TLS) && !defined(DBG_HDRS_ALL)
1244 GC_remove_specific(GC_thread_key);
1246 if (GC_incremental && GC_collection_in_progress()) {
1247 int old_gc_no = GC_gc_no;
1249 /* Make sure that no part of our stack is still on the mark stack, */
1250 /* since it's about to be unmapped. */
1251 while (GC_incremental && GC_collection_in_progress()
1252 && old_gc_no == GC_gc_no) {
1254 GC_collect_a_little_inner(1);
1264 int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
1267 GC_thread thread_gc_id;
1270 thread_gc_id = GC_lookup_thread(thread);
1271 /* This is guaranteed to be the intended one, since the thread id */
1272 /* cant have been recycled by pthreads. */
1274 result = REAL_FUNC(pthread_join)(thread, retval);
1277 /* Here the pthread thread id may have been recycled. */
1278 GC_delete_gc_thread(thread, thread_gc_id);
1285 WRAP_FUNC(pthread_detach)(pthread_t thread)
1288 GC_thread thread_gc_id;
1291 thread_gc_id = GC_lookup_thread(thread);
1293 result = REAL_FUNC(pthread_detach)(thread);
1296 thread_gc_id -> flags |= DETACHED;
1297 /* Here the pthread thread id may have been recycled. */
1298 if (thread_gc_id -> flags & FINISHED) {
1299 GC_delete_gc_thread(thread, thread_gc_id);
1306 void * GC_start_routine(void * arg)
1309 struct start_info * si = arg;
1312 pthread_t my_pthread;
1313 void *(*start)(void *);
1316 my_pthread = pthread_self();
1317 # ifdef DEBUG_THREADS
1318 GC_printf1("Starting thread 0x%lx\n", my_pthread);
1319 GC_printf1("pid = %ld\n", (long) getpid());
1320 GC_printf1("sp = 0x%lx\n", (long) &arg);
1323 me = GC_new_thread(my_pthread);
1324 me -> flags = si -> flags;
1325 me -> stack_ptr = 0;
1326 /* me -> stack_end = GC_linux_stack_base(); -- currently (11/99) */
1327 /* doesn't work because the stack base in /proc/self/stat is the */
1328 /* one for the main thread. There is a strong argument that that's */
1329 /* a kernel bug, but a pervasive one. */
1330 # ifdef STACK_GROWS_DOWN
1331 me -> stack_end = (ptr_t)(((word)(&dummy) + (GC_page_size - 1))
1332 & ~(GC_page_size - 1));
1333 me -> stack_ptr = me -> stack_end - 0x10;
1334 /* Needs to be plausible, since an asynchronous stack mark */
1335 /* should not crash. */
1337 me -> stack_end = (ptr_t)((word)(&dummy) & ~(GC_page_size - 1));
1338 me -> stack_ptr = me -> stack_end + 0x10;
1340 /* This is dubious, since we may be more than a page into the stack, */
1341 /* and hence skip some of it, though it's not clear that matters. */
1343 me -> backing_store_end = (ptr_t)
1344 (GC_save_regs_in_stack() & ~(GC_page_size - 1));
1345 /* This is also < 100% convincing. We should also read this */
1346 /* from /proc, but the hook to do so isn't there yet. */
1349 start = si -> start_routine;
1350 # ifdef DEBUG_THREADS
1351 GC_printf1("start_routine = 0x%lx\n", start);
1353 start_arg = si -> arg;
1354 sem_post(&(si -> registered)); /* Last action on si. */
1355 /* OK to deallocate. */
1356 pthread_cleanup_push(GC_thread_exit_proc, 0);
1357 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1359 GC_init_thread_local(me);
1362 result = (*start)(start_arg);
1364 GC_printf1("Finishing thread 0x%x\n", pthread_self());
1366 me -> status = result;
1367 me -> flags |= FINISHED;
1368 pthread_cleanup_pop(1);
1369 /* Cleanup acquires lock, ensuring that we can't exit */
1370 /* while a collection that thinks we're alive is trying to stop */
1376 WRAP_FUNC(pthread_create)(pthread_t *new_thread,
1377 const pthread_attr_t *attr,
1378 void *(*start_routine)(void *), void *arg)
1382 pthread_t my_new_thread;
1385 struct start_info * si;
1386 /* This is otherwise saved only in an area mmapped by the thread */
1387 /* library, which isn't visible to the collector. */
1390 si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info), NORMAL);
1392 if (!parallel_initialized) GC_init_parallel();
1393 if (0 == si) return(ENOMEM);
1394 sem_init(&(si -> registered), 0, 0);
1395 si -> start_routine = start_routine;
1398 if (!GC_thr_initialized) GC_thr_init();
1400 detachstate = PTHREAD_CREATE_JOINABLE;
1402 pthread_attr_getdetachstate(attr, &detachstate);
1404 if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
1405 si -> flags = my_flags;
1407 # ifdef DEBUG_THREADS
1408 GC_printf1("About to start new thread from thread 0x%X\n",
1411 result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
1412 # ifdef DEBUG_THREADS
1413 GC_printf1("Started thread 0x%X\n", *new_thread);
1415 /* Wait until child has been added to the thread table. */
1416 /* This also ensures that we hold onto si until the child is done */
1417 /* with it. Thus it doesn't matter whether it is otherwise */
1418 /* visible to the collector. */
1419 while (0 != sem_wait(&(si -> registered))) {
1420 if (EINTR != errno) ABORT("sem_wait failed");
1422 sem_destroy(&(si -> registered));
1424 GC_INTERNAL_FREE(si);
1429 #ifdef GENERIC_COMPARE_AND_SWAP
1430 pthread_mutex_t GC_compare_and_swap_lock = PTHREAD_MUTEX_INITIALIZER;
1432 GC_bool GC_compare_and_exchange(volatile GC_word *addr,
1433 GC_word old, GC_word new_val)
1436 pthread_mutex_lock(&GC_compare_and_swap_lock);
1443 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1447 GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much)
1450 pthread_mutex_lock(&GC_compare_and_swap_lock);
1452 *addr = old + how_much;
1453 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1457 #endif /* GENERIC_COMPARE_AND_SWAP */
1458 /* Spend a few cycles in a way that can't introduce contention with */
1459 /* othre threads. */
1463 volatile word dummy = 0;
1465 for (i = 0; i < 10; ++i) {
1467 __asm__ __volatile__ (" " : : : "memory");
1469 /* Something that's unlikely to be optimized away. */
1475 #define SPIN_MAX 1024 /* Maximum number of calls to GC_pause before */
1478 VOLATILE GC_bool GC_collecting = 0;
1479 /* A hint that we're in the collector and */
1480 /* holding the allocation lock for an */
1481 /* extended period. */
1483 #if !defined(USE_SPIN_LOCK) || defined(PARALLEL_MARK)
1484 /* If we don't want to use the below spinlock implementation, either */
1485 /* because we don't have a GC_test_and_set implementation, or because */
1486 /* we don't want to risk sleeping, we can still try spinning on */
1487 /* pthread_mutex_trylock for a while. This appears to be very */
1488 /* beneficial in many cases. */
1489 /* I suspect that under high contention this is nearly always better */
1490 /* than the spin lock. But it's a bit slower on a uniprocessor. */
1491 /* Hence we still default to the spin lock. */
1492 /* This is also used to acquire the mark lock for the parallel */
1495 /* Here we use a strict exponential backoff scheme. I don't know */
1496 /* whether that's better or worse than the above. We eventually */
1497 /* yield by calling pthread_mutex_lock(); it never makes sense to */
1498 /* explicitly sleep. */
1500 void GC_generic_lock(pthread_mutex_t * lock)
1502 unsigned pause_length = 1;
1505 if (0 == pthread_mutex_trylock(lock)) return;
1506 for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
1507 for (i = 0; i < pause_length; ++i) {
1510 switch(pthread_mutex_trylock(lock)) {
1516 ABORT("Unexpected error from pthread_mutex_trylock");
1519 pthread_mutex_lock(lock);
1522 #endif /* !USE_SPIN_LOCK || PARALLEL_MARK */
1524 #if defined(USE_SPIN_LOCK)
1526 /* Reasonably fast spin locks. Basically the same implementation */
1527 /* as STL alloc.h. This isn't really the right way to do this. */
1528 /* but until the POSIX scheduling mess gets straightened out ... */
1530 volatile unsigned int GC_allocate_lock = 0;
1535 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
1536 # define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
1537 static unsigned spin_max = low_spin_max;
1538 unsigned my_spin_max;
1539 static unsigned last_spins = 0;
1540 unsigned my_last_spins;
1543 if (!GC_test_and_set(&GC_allocate_lock)) {
1546 my_spin_max = spin_max;
1547 my_last_spins = last_spins;
1548 for (i = 0; i < my_spin_max; i++) {
1549 if (GC_collecting || GC_nprocs == 1) goto yield;
1550 if (i < my_last_spins/2 || GC_allocate_lock) {
1554 if (!GC_test_and_set(&GC_allocate_lock)) {
1557 * Spinning worked. Thus we're probably not being scheduled
1558 * against the other process with which we were contending.
1559 * Thus it makes sense to spin longer the next time.
1562 spin_max = high_spin_max;
1566 /* We are probably being scheduled against the other process. Sleep. */
1567 spin_max = low_spin_max;
1570 if (!GC_test_and_set(&GC_allocate_lock)) {
1573 # define SLEEP_THRESHOLD 12
1574 /* nanosleep(<= 2ms) just spins under Linux. We */
1575 /* want to be careful to avoid that behavior. */
1576 if (i < SLEEP_THRESHOLD) {
1582 /* Don't wait for more than about 15msecs, even */
1583 /* under extreme contention. */
1585 ts.tv_nsec = 1 << i;
1591 #else /* !USE_SPINLOCK */
1595 if (1 == GC_nprocs || GC_collecting) {
1596 pthread_mutex_lock(&GC_allocate_ml);
1598 GC_generic_lock(&GC_allocate_ml);
1602 #endif /* !USE_SPINLOCK */
1604 #if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
1606 #ifdef GC_ASSERTIONS
1607 pthread_t GC_mark_lock_holder = NO_THREAD;
1611 /* Ugly workaround for a linux threads bug in the final versions */
1612 /* of glibc2.1. Pthread_mutex_trylock sets the mutex owner */
1613 /* field even when it fails to acquire the mutex. This causes */
1614 /* pthread_cond_wait to die. Remove for glibc2.2. */
1615 /* According to the man page, we should use */
1616 /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */
1618 static pthread_mutex_t mark_mutex =
1619 {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
1621 static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
1624 static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
1626 void GC_acquire_mark_lock()
1629 if (pthread_mutex_lock(&mark_mutex) != 0) {
1630 ABORT("pthread_mutex_lock failed");
1633 GC_generic_lock(&mark_mutex);
1634 # ifdef GC_ASSERTIONS
1635 GC_mark_lock_holder = pthread_self();
1639 void GC_release_mark_lock()
1641 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1642 # ifdef GC_ASSERTIONS
1643 GC_mark_lock_holder = NO_THREAD;
1645 if (pthread_mutex_unlock(&mark_mutex) != 0) {
1646 ABORT("pthread_mutex_unlock failed");
1650 /* Collector must wait for a freelist builders for 2 reasons: */
1651 /* 1) Mark bits may still be getting examined without lock. */
1652 /* 2) Partial free lists referenced only by locals may not be scanned */
1653 /* correctly, e.g. if they contain "pointer-free" objects, since the */
1654 /* free-list link may be ignored. */
1655 void GC_wait_builder()
1657 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1658 # ifdef GC_ASSERTIONS
1659 GC_mark_lock_holder = NO_THREAD;
1661 if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
1662 ABORT("pthread_cond_wait failed");
1664 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1665 # ifdef GC_ASSERTIONS
1666 GC_mark_lock_holder = pthread_self();
1670 void GC_wait_for_reclaim()
1672 GC_acquire_mark_lock();
1673 while (GC_fl_builder_count > 0) {
1676 GC_release_mark_lock();
1679 void GC_notify_all_builder()
1681 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1682 if (pthread_cond_broadcast(&builder_cv) != 0) {
1683 ABORT("pthread_cond_broadcast failed");
1687 #endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
1689 #ifdef PARALLEL_MARK
1691 static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
1693 void GC_wait_marker()
1695 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1696 # ifdef GC_ASSERTIONS
1697 GC_mark_lock_holder = NO_THREAD;
1699 if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
1700 ABORT("pthread_cond_wait failed");
1702 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1703 # ifdef GC_ASSERTIONS
1704 GC_mark_lock_holder = pthread_self();
1708 void GC_notify_all_marker()
1710 if (pthread_cond_broadcast(&mark_cv) != 0) {
1711 ABORT("pthread_cond_broadcast failed");
1715 #endif /* PARALLEL_MARK */
1717 # endif /* GC_LINUX_THREADS and friends */