1 /* go-go.c -- the go function.
3 Copyright 2009 The Go Authors. All rights reserved.
4 Use of this source code is governed by a BSD-style
5 license that can be found in the LICENSE file. */
13 #include <semaphore.h>
16 #include "go-assert.h"
23 #ifdef USING_SPLIT_STACK
24 /* FIXME: This is not declared anywhere. */
25 extern void *__splitstack_find (void *, void *, size_t *, void **, void **,
29 /* We stop the threads by sending them the signal GO_SIG_STOP and we
30 start them by sending them the signal GO_SIG_START. */
32 #define GO_SIG_START (SIGRTMIN + 1)
33 #define GO_SIG_STOP (SIGRTMIN + 2)
39 /* A doubly linked list of the threads we have started. */
44 struct __go_thread_id *prev;
45 struct __go_thread_id *next;
46 /* True if the thread ID has not yet been filled in. */
50 /* Thread's M structure. */
52 /* If the thread ID has not been filled in, the function we are
55 /* If the thread ID has not been filled in, the argument to the
60 static struct __go_thread_id *__go_all_thread_ids;
62 /* A lock to control access to ALL_THREAD_IDS. */
64 static pthread_mutex_t __go_thread_ids_lock = PTHREAD_MUTEX_INITIALIZER;
66 /* A semaphore used to wait until all the threads have stopped. */
68 static sem_t __go_thread_ready_sem;
70 /* A signal set used to wait until garbage collection is complete. */
72 static sigset_t __go_thread_wait_sigset;
74 /* Remove the current thread from the list of threads. */
77 remove_current_thread (void *dummy __attribute__ ((unused)))
79 struct __go_thread_id *list_entry;
83 list_entry = m->list_entry;
86 i = pthread_mutex_lock (&__go_thread_ids_lock);
89 if (list_entry->prev != NULL)
90 list_entry->prev->next = list_entry->next;
92 __go_all_thread_ids = list_entry->next;
93 if (list_entry->next != NULL)
94 list_entry->next->prev = list_entry->prev;
96 /* This will lock runtime_mheap as needed. */
97 runtime_MCache_ReleaseAll (mcache);
99 /* This should never deadlock--there shouldn't be any code that
100 holds the runtime_mheap lock when locking __go_thread_ids_lock.
101 We don't want to do this after releasing __go_thread_ids_lock
102 because it will mean that the garbage collector might run, and
103 the garbage collector does not try to lock runtime_mheap in all
104 cases since it knows it is running single-threaded. */
105 runtime_lock (&runtime_mheap);
106 mstats.heap_alloc += mcache->local_alloc;
107 mstats.heap_objects += mcache->local_objects;
108 __builtin_memset (mcache, 0, sizeof (struct MCache));
109 runtime_FixAlloc_Free (&runtime_mheap.cachealloc, mcache);
110 runtime_unlock (&runtime_mheap);
112 /* As soon as we release this look, a GC could run. Since this
113 thread is no longer on the list, the GC will not find our M
114 structure, so it could get freed at any time. That means that
115 any code from here to thread exit must not assume that m is
119 i = pthread_mutex_unlock (&__go_thread_ids_lock);
120 __go_assert (i == 0);
125 /* Start the thread. */
128 start_go_thread (void *thread_arg)
130 struct M *newm = (struct M *) thread_arg;
131 void (*pfn) (void *);
133 struct __go_thread_id *list_entry;
137 __wrap_rtems_task_variable_add ((void **) &m);
138 __wrap_rtems_task_variable_add ((void **) &__go_panic_defer);
143 pthread_cleanup_push (remove_current_thread, NULL);
145 list_entry = newm->list_entry;
147 pfn = list_entry->pfn;
148 arg = list_entry->arg;
150 #ifndef USING_SPLIT_STACK
151 /* If we don't support split stack, record the current stack as the
152 top of the stack. There shouldn't be anything relevant to the
153 garbage collector above this point. */
154 m->gc_sp = (void *) &arg;
157 /* Finish up the entry on the thread list. */
159 i = pthread_mutex_lock (&__go_thread_ids_lock);
160 __go_assert (i == 0);
162 list_entry->id = pthread_self ();
163 list_entry->pfn = NULL;
164 list_entry->arg = NULL;
165 list_entry->tentative = 0;
167 i = pthread_mutex_unlock (&__go_thread_ids_lock);
168 __go_assert (i == 0);
172 pthread_cleanup_pop (1);
177 /* The runtime.Goexit function. */
179 void Goexit (void) asm ("libgo_runtime.runtime.Goexit");
188 /* Count of threads created. */
190 static volatile int mcount;
192 /* Implement the go statement. */
195 __go_go (void (*pfn) (void*), void *arg)
200 struct __go_thread_id *list_entry;
203 i = pthread_attr_init (&attr);
204 __go_assert (i == 0);
205 i = pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
206 __go_assert (i == 0);
208 #ifdef LINKER_SUPPORTS_SPLIT_STACK
209 /* The linker knows how to handle calls between code which uses
210 -fsplit-stack and code which does not. That means that we can
211 run with a smaller stack and rely on the -fsplit-stack support to
212 save us. The GNU/Linux glibc library won't let us have a very
213 small stack, but we make it as small as we can. */
214 #ifndef PTHREAD_STACK_MIN
215 #define PTHREAD_STACK_MIN 8192
217 i = pthread_attr_setstacksize (&attr, PTHREAD_STACK_MIN);
218 __go_assert (i == 0);
221 newm = __go_alloc (sizeof (M));
223 list_entry = malloc (sizeof (struct __go_thread_id));
224 list_entry->prev = NULL;
225 list_entry->next = NULL;
226 list_entry->tentative = 1;
227 list_entry->m = newm;
228 list_entry->pfn = pfn;
229 list_entry->arg = arg;
231 newm->list_entry = list_entry;
233 newm->id = __sync_fetch_and_add (&mcount, 1);
234 newm->fastrand = 0x49f6428aUL + newm->id;
236 newm->mcache = runtime_allocmcache ();
238 /* Add the thread to the list of all threads, marked as tentative
239 since it is not yet ready to go. */
240 i = pthread_mutex_lock (&__go_thread_ids_lock);
241 __go_assert (i == 0);
243 if (__go_all_thread_ids != NULL)
244 __go_all_thread_ids->prev = list_entry;
245 list_entry->next = __go_all_thread_ids;
246 __go_all_thread_ids = list_entry;
248 i = pthread_mutex_unlock (&__go_thread_ids_lock);
249 __go_assert (i == 0);
251 /* Start the thread. */
252 i = pthread_create (&tid, &attr, start_go_thread, newm);
253 __go_assert (i == 0);
255 i = pthread_attr_destroy (&attr);
256 __go_assert (i == 0);
259 /* This is the signal handler for GO_SIG_START. The garbage collector
260 will send this signal to a thread when it wants the thread to
261 start. We don't have to actually do anything here, but we need a
262 signal handler since ignoring the signal will mean that the
263 sigsuspend will never see it. */
266 gc_start_handler (int sig __attribute__ ((unused)))
270 /* Tell the garbage collector that we are ready, and wait for the
271 garbage collector to tell us that it is done. This may be called
272 by a signal handler, so it is restricted to using functions which
273 are async cancel safe. */
280 /* Tell the garbage collector about our stack. */
281 #ifdef USING_SPLIT_STACK
282 m->gc_sp = __splitstack_find (NULL, NULL, &m->gc_len,
283 &m->gc_next_segment, &m->gc_next_sp,
287 uintptr_t top = (uintptr_t) m->gc_sp;
288 uintptr_t bottom = (uintptr_t) ⊤
291 m->gc_next_sp = m->gc_sp;
292 m->gc_len = bottom - top;
296 m->gc_next_sp = (void *) bottom;
297 m->gc_len = top - bottom;
302 /* FIXME: Perhaps we should just move __go_panic_defer into M. */
303 m->gc_panic_defer = __go_panic_defer;
305 /* Tell the garbage collector that we are ready by posting to the
307 i = sem_post (&__go_thread_ready_sem);
308 __go_assert (i == 0);
310 /* Wait for the garbage collector to tell us to continue. */
311 sigsuspend (&__go_thread_wait_sigset);
314 /* This is the signal handler for GO_SIG_STOP. The garbage collector
315 will send this signal to a thread when it wants the thread to
319 gc_stop_handler (int sig __attribute__ ((unused)))
323 if (__sync_bool_compare_and_swap (&pm->holds_finlock, 1, 1))
325 /* We can't interrupt the thread while it holds the finalizer
326 lock. Otherwise we can get into a deadlock when mark calls
327 runtime_walkfintab. */
328 __sync_bool_compare_and_swap (&pm->gcing_for_finlock, 0, 1);
332 if (__sync_bool_compare_and_swap (&pm->mallocing, 1, 1))
334 /* m->mallocing was already non-zero. We can't interrupt the
335 thread while it is running an malloc. Instead, tell it to
336 call back to us when done. */
337 __sync_bool_compare_and_swap (&pm->gcing, 0, 1);
341 if (__sync_bool_compare_and_swap (&pm->nomemprof, 1, 1))
343 /* Similarly, we can't interrupt the thread while it is building
344 profiling information. Otherwise we can get into a deadlock
345 when sweepspan calls MProf_Free. */
346 __sync_bool_compare_and_swap (&pm->gcing_for_prof, 0, 1);
353 /* This is called by malloc when it gets a signal during the malloc
357 __go_run_goroutine_gc (int r)
359 /* Force callee-saved registers to be saved on the stack. This is
360 not needed if we are invoked from the signal handler, but it is
361 needed if we are called directly, since otherwise we might miss
362 something that a function somewhere up the call stack is holding
364 __builtin_unwind_init ();
368 /* This avoids tail recursion, to make sure that the saved registers
373 /* Stop all the other threads for garbage collection. */
376 runtime_stoptheworld (void)
381 struct __go_thread_id *p;
383 i = pthread_mutex_lock (&__go_thread_ids_lock);
384 __go_assert (i == 0);
386 me = pthread_self ();
388 p = __go_all_thread_ids;
391 if (p->tentative || pthread_equal (me, p->id))
395 i = pthread_kill (p->id, GO_SIG_STOP);
403 struct __go_thread_id *next;
405 /* This thread died somehow. Remove it from the
409 p->prev->next = next;
411 __go_all_thread_ids = next;
413 next->prev = p->prev;
422 /* Wait for each thread to receive the signal and post to the
423 semaphore. If a thread receives the signal but contrives to die
424 before it posts to the semaphore, then we will hang forever
429 i = sem_wait (&__go_thread_ready_sem);
430 if (i < 0 && errno == EINTR)
432 __go_assert (i == 0);
436 /* The gc_panic_defer field should now be set for all M's except the
437 one in this thread. Set this one now. */
438 m->gc_panic_defer = __go_panic_defer;
440 /* Leave with __go_thread_ids_lock held. */
443 /* Scan all the stacks for garbage collection. This should be called
444 with __go_thread_ids_lock held. */
447 __go_scanstacks (void (*scan) (byte *, int64))
450 struct __go_thread_id *p;
452 /* Make sure all the registers for this thread are on the stack. */
453 __builtin_unwind_init ();
455 me = pthread_self ();
456 for (p = __go_all_thread_ids; p != NULL; p = p->next)
460 /* The goroutine function and argument can be allocated on
461 the heap, so we have to scan them for a thread that has
463 scan ((void *) &p->pfn, sizeof (void *));
464 scan ((void *) &p->arg, sizeof (void *));
465 scan ((void *) &p->m, sizeof (void *));
469 #ifdef USING_SPLIT_STACK
477 if (pthread_equal (me, p->id))
482 sp = __splitstack_find (NULL, NULL, &len, &next_segment,
483 &next_sp, &initial_sp);
489 next_segment = p->m->gc_next_segment;
490 next_sp = p->m->gc_next_sp;
491 initial_sp = p->m->gc_initial_sp;
497 sp = __splitstack_find (next_segment, next_sp, &len,
498 &next_segment, &next_sp, &initial_sp);
501 #else /* !defined(USING_SPLIT_STACK) */
503 if (pthread_equal (me, p->id))
505 uintptr_t top = (uintptr_t) m->gc_sp;
506 uintptr_t bottom = (uintptr_t) ⊤
508 scan (m->gc_sp, bottom - top);
510 scan ((void *) bottom, top - bottom);
514 scan (p->m->gc_next_sp, p->m->gc_len);
517 #endif /* !defined(USING_SPLIT_STACK) */
519 /* Also scan the M structure while we're at it. */
521 scan ((void *) &p->m, sizeof (void *));
525 /* Release all the memory caches. This is called with
526 __go_thread_ids_lock held. */
529 __go_stealcache (void)
531 struct __go_thread_id *p;
533 for (p = __go_all_thread_ids; p != NULL; p = p->next)
534 runtime_MCache_ReleaseAll (p->m->mcache);
537 /* Gather memory cache statistics. This is called with
538 __go_thread_ids_lock held. */
541 __go_cachestats (void)
543 struct __go_thread_id *p;
545 for (p = __go_all_thread_ids; p != NULL; p = p->next)
550 runtime_purgecachedstats(p->m);
552 for (i = 0; i < NumSizeClasses; ++i)
554 mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc;
555 c->local_by_size[i].nmalloc = 0;
556 mstats.by_size[i].nfree += c->local_by_size[i].nfree;
557 c->local_by_size[i].nfree = 0;
562 /* Start the other threads after garbage collection. */
565 runtime_starttheworld (bool extra __attribute__ ((unused)))
569 struct __go_thread_id *p;
571 /* Here __go_thread_ids_lock should be held. */
573 me = pthread_self ();
574 p = __go_all_thread_ids;
577 if (p->tentative || pthread_equal (me, p->id))
581 i = pthread_kill (p->id, GO_SIG_START);
589 i = pthread_mutex_unlock (&__go_thread_ids_lock);
590 __go_assert (i == 0);
593 /* Initialize the interaction between goroutines and the garbage
597 __go_gc_goroutine_init (void *sp __attribute__ ((unused)))
599 struct __go_thread_id *list_entry;
602 struct sigaction act;
604 /* Add the initial thread to the list of all threads. */
606 list_entry = malloc (sizeof (struct __go_thread_id));
607 list_entry->prev = NULL;
608 list_entry->next = NULL;
609 list_entry->tentative = 0;
610 list_entry->id = pthread_self ();
612 list_entry->pfn = NULL;
613 list_entry->arg = NULL;
614 __go_all_thread_ids = list_entry;
616 /* Initialize the semaphore which signals when threads are ready for
619 i = sem_init (&__go_thread_ready_sem, 0, 0);
620 __go_assert (i == 0);
622 /* Fetch the current signal mask. */
624 i = sigemptyset (&sset);
625 __go_assert (i == 0);
626 i = sigprocmask (SIG_BLOCK, NULL, &sset);
627 __go_assert (i == 0);
629 /* Make sure that GO_SIG_START is not blocked and GO_SIG_STOP is
630 blocked, and save that set for use with later calls to sigsuspend
631 while waiting for GC to complete. */
633 i = sigdelset (&sset, GO_SIG_START);
634 __go_assert (i == 0);
635 i = sigaddset (&sset, GO_SIG_STOP);
636 __go_assert (i == 0);
637 __go_thread_wait_sigset = sset;
639 /* Block SIG_SET_START and unblock SIG_SET_STOP, and use that for
640 the process signal mask. */
642 i = sigaddset (&sset, GO_SIG_START);
643 __go_assert (i == 0);
644 i = sigdelset (&sset, GO_SIG_STOP);
645 __go_assert (i == 0);
646 i = sigprocmask (SIG_SETMASK, &sset, NULL);
647 __go_assert (i == 0);
649 /* Install the signal handlers. */
650 memset (&act, 0, sizeof act);
651 i = sigemptyset (&act.sa_mask);
652 __go_assert (i == 0);
654 act.sa_handler = gc_start_handler;
655 act.sa_flags = SA_RESTART;
656 i = sigaction (GO_SIG_START, &act, NULL);
657 __go_assert (i == 0);
659 /* We could consider using an alternate signal stack for this. The
660 function does not use much stack space, so it may be OK. */
661 act.sa_handler = gc_stop_handler;
662 i = sigaction (GO_SIG_STOP, &act, NULL);
663 __go_assert (i == 0);
665 #ifndef USING_SPLIT_STACK
666 /* If we don't support split stack, record the current stack as the