1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
12 #include <sys/types.h>
17 #include <semaphore.h>
20 #ifdef HAVE_SYS_MMAN_H
24 #include "interface.h"
27 #define _STRINGIFY2_(x) #x
28 #define _STRINGIFY_(x) _STRINGIFY2_(x)
29 #define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
31 /* This file supports C files copied from the 6g runtime library.
32 This is a version of the 6g runtime.h rewritten for gccgo's version
35 typedef signed int int8 __attribute__ ((mode (QI)));
36 typedef unsigned int uint8 __attribute__ ((mode (QI)));
37 typedef signed int int16 __attribute__ ((mode (HI)));
38 typedef unsigned int uint16 __attribute__ ((mode (HI)));
39 typedef signed int int32 __attribute__ ((mode (SI)));
40 typedef unsigned int uint32 __attribute__ ((mode (SI)));
41 typedef signed int int64 __attribute__ ((mode (DI)));
42 typedef unsigned int uint64 __attribute__ ((mode (DI)));
43 typedef float float32 __attribute__ ((mode (SF)));
44 typedef double float64 __attribute__ ((mode (DF)));
45 typedef signed int intptr __attribute__ ((mode (pointer)));
46 typedef unsigned int uintptr __attribute__ ((mode (pointer)));
48 typedef intptr intgo; // Go's int
49 typedef uintptr uintgo; // Go's uint
55 typedef struct Func Func;
57 typedef struct Lock Lock;
60 typedef struct Note Note;
61 typedef struct String String;
62 typedef struct FuncVal FuncVal;
63 typedef struct SigTab SigTab;
64 typedef struct MCache MCache;
65 typedef struct FixAlloc FixAlloc;
66 typedef struct Hchan Hchan;
67 typedef struct Timers Timers;
68 typedef struct Timer Timer;
69 typedef struct GCStats GCStats;
70 typedef struct LFNode LFNode;
71 typedef struct ParFor ParFor;
72 typedef struct ParForThread ParForThread;
73 typedef struct CgoMal CgoMal;
74 typedef struct PollDesc PollDesc;
76 typedef struct __go_open_array Slice;
77 typedef struct __go_interface Iface;
78 typedef struct __go_empty_interface Eface;
79 typedef struct __go_type_descriptor Type;
80 typedef struct __go_defer_stack Defer;
81 typedef struct __go_panic_stack Panic;
83 typedef struct __go_ptr_type PtrType;
84 typedef struct __go_func_type FuncType;
85 typedef struct __go_map_type MapType;
86 typedef struct __go_channel_type ChanType;
88 typedef struct Traceback Traceback;
90 typedef struct Location Location;
93 * Per-CPU declaration.
95 extern M* runtime_m(void);
96 extern G* runtime_g(void);
108 // If you add to this list, add to the list
109 // of "okay during garbage collection" status
116 Gmoribund_unused, // currently unused, but hardcoded in gdb scripts
135 PtrSize = sizeof(void*),
139 // Per-M stack segment cache size.
141 // Global <-> per-M stack segment cache transfer batch size.
142 StackCacheBatch = 16,
149 // Futex-based impl treats it as uint32 key,
150 // while sema-based impl as M* waitm.
151 // Used to be a union, but unions break precise GC.
156 // Futex-based impl treats it as uint32 key,
157 // while sema-based impl as M* waitm.
158 // Used to be a union, but unions break precise GC.
169 // variable-size, fn-specific data here
173 // the struct must consist of only uint64's,
174 // because it is casted to uint64[].
182 // A location in the program, used for backtraces.
193 void* closure; // Closure value.
196 void* exception; // current exception being thrown
197 bool is_foreign; // whether current exception from other language
198 void *gcstack; // if status==Gsyscall, gcstack = stackbase to use during gc
199 uintptr gcstack_size;
200 void* gcnext_segment;
204 byte* entry; // initial function
205 G* alllink; // on allg
206 void* param; // passed parameter on wakeup
207 bool fromgogo; // reached from gogo
210 uint32 selgen; // valid sudog pointer
211 const char* waitreason; // if status==Gwaiting
214 bool issystem; // do not output in stack dump
215 bool isbackground; // ignore in deadlock detector
216 bool blockingsyscall; // hint that the next syscall will block
217 M* m; // for debuggers, but offset not hard-coded
222 // DeferChunk *dchunk;
223 // DeferChunk *dchunknext;
227 uintptr gopc; // pc of go statement that created this goroutine
232 Traceback* traceback;
235 void* stack_context[10];
240 G* g0; // goroutine with scheduling stack
241 G* gsignal; // signal-handling G
243 size_t gsignalstacksize;
244 void (*mstartfn)(void);
245 G* curg; // current running goroutine
246 P* p; // attached P for executing Go code (nil if not executing Go code)
257 bool blockingsyscall;
260 uint64 ncgocall; // number of cgo calls in total
261 int32 ncgo; // number of cgo calls currently in progress
264 M* alllink; // on allm
268 Location createstack[32]; // Stack that created this thread.
269 uint32 locked; // tracking for LockOSThread
270 M* nextwaitm; // next M waiting for lock
271 uintptr waitsema; // semaphore for parking on locks
272 uint32 waitsemacount;
277 bool dropextram; // for gccgo: drop after call is done.
279 void (*waitunlockf)(Lock*);
282 uintptr settype_buf[1024];
283 uintptr settype_bufsize;
292 uint32 status; // one of Pidle/Prunning/...
294 uint32 tick; // incremented on every scheduler or system call
295 M* m; // back-link to associated M (nil if idle)
298 // Queue of runnable goroutines.
304 // Available G's (status == Gdead)
311 // The m->locked word holds a single bit saying whether
312 // external calls to LockOSThread are in effect, and then a counter
313 // of the internal nesting depth of lockOSThread / unlockOSThread.
327 SigNotify = 1<<0, // let signal.Notify have signal, even if from kernel
328 SigKill = 1<<1, // if signal.Notify doesn't take it, exit quietly
329 SigThrow = 1<<2, // if signal.Notify doesn't take it, exit loudly
330 SigPanic = 1<<3, // if the signal is from the kernel, panic
331 SigDefault = 1<<4, // if the signal isn't explicitly requested, don't monitor it
332 SigHandling = 1<<5, // our signal handler is registered
333 SigIgnored = 1<<6, // the signal was ignored before we registered for it
340 // NOTE(rsc): keep in sync with extern.go:/type.Func.
341 // Eventually, the loaded symbol table should be closer to this form.
345 uintptr entry; // entry pc
371 // Package time knows the layout of this structure.
372 // If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
375 int32 i; // heap index
377 // Timer wakes up at when, and then at when+period, ... (period > 0 only)
378 // each time calling f(now, arg) in the timer goroutine, so f must be
379 // a well-behaved function and not block.
386 // Lock-free stack node.
393 // Parallel for descriptor.
396 void (*body)(ParFor*, uint32); // executed for each element
397 uint32 done; // number of idle threads
398 uint32 nthr; // total number of threads
399 uint32 nthrmax; // maximum number of threads
400 uint32 thrseq; // thread id sequencer
401 uint32 cnt; // iteration space [0, cnt)
402 void *ctx; // arbitrary user context
403 bool wait; // if true, wait while all threads finish processing,
404 // otherwise parfor may return while other threads are still working
405 ParForThread *thr; // array of thread descriptors
406 uint32 pad; // to align ParForThread.pos for 64-bit atomic operations
415 // Track memory allocated by code not written in Go during a cgo call,
416 // so that the garbage collector can see them.
425 * you need super-gopher-guru privilege
428 #define nelem(x) (sizeof(x)/sizeof((x)[0]))
429 #define nil ((void*)0)
430 #define USED(v) ((void) v)
431 #define ROUND(x, n) (((x)+(n)-1)&~((n)-1)) /* all-caps to mark as macro: it evaluates n twice */
433 byte* runtime_startup_random_data;
434 uint32 runtime_startup_random_data_len;
435 void runtime_get_random_data(byte**, int32*);
438 // hashinit wants this many random bytes
441 void runtime_hashinit(void);
443 void runtime_traceback();
444 void runtime_tracebackothers(G*);
449 extern uintptr runtime_zerobase;
450 extern G* runtime_allg;
451 extern G* runtime_lastg;
452 extern M* runtime_allm;
453 extern P** runtime_allp;
454 extern int32 runtime_gomaxprocs;
455 extern uint32 runtime_needextram;
456 extern bool runtime_singleproc;
457 extern uint32 runtime_panicking;
458 extern uint32 runtime_gcwaiting; // gc is waiting to run
459 extern int8* runtime_goos;
460 extern int32 runtime_ncpu;
461 extern void (*runtime_sysargs)(int32, uint8**);
464 * common functions and data
466 #define runtime_strcmp(s1, s2) __builtin_strcmp((s1), (s2))
467 #define runtime_strstr(s1, s2) __builtin_strstr((s1), (s2))
468 intgo runtime_findnull(const byte*);
469 void runtime_dump(byte*, int32);
472 * very low level c-called
474 struct __go_func_type;
475 void runtime_args(int32, byte**);
476 void runtime_osinit();
477 void runtime_goargs(void);
478 void runtime_goenvs(void);
479 void runtime_goenvs_unix(void);
480 void runtime_throw(const char*) __attribute__ ((noreturn));
481 void runtime_panicstring(const char*) __attribute__ ((noreturn));
482 void runtime_prints(const char*);
483 void runtime_printf(const char*, ...);
484 #define runtime_mcmp(a, b, s) __builtin_memcmp((a), (b), (s))
485 #define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s))
486 void* runtime_mal(uintptr);
487 String runtime_gostring(const byte*);
488 String runtime_gostringnocopy(const byte*);
489 void runtime_schedinit(void);
490 void runtime_initsig(void);
491 void runtime_sigenable(uint32 sig);
492 void runtime_sigdisable(uint32 sig);
493 int32 runtime_gotraceback(bool *crash);
494 void runtime_goroutineheader(G*);
495 void runtime_goroutinetrailer(G*);
496 void runtime_printtrace(Location*, int32, bool);
497 #define runtime_open(p, f, m) open((p), (f), (m))
498 #define runtime_read(d, v, n) read((d), (v), (n))
499 #define runtime_write(d, v, n) write((d), (v), (n))
500 #define runtime_close(d) close(d)
501 #define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
502 #define runtime_cas64(pval, pold, new) __atomic_compare_exchange_n (pval, pold, new, 0, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
503 #define runtime_casp(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
504 // Don't confuse with XADD x86 instruction,
505 // this one is actually 'addx', that is, add-and-fetch.
506 #define runtime_xadd(p, v) __sync_add_and_fetch (p, v)
507 #define runtime_xadd64(p, v) __sync_add_and_fetch (p, v)
508 #define runtime_xchg(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
509 #define runtime_xchg64(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
510 #define runtime_atomicload(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
511 #define runtime_atomicstore(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
512 #define runtime_atomicstore64(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
513 #define runtime_atomicload64(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
514 #define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
515 #define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
516 void runtime_ready(G*);
517 const byte* runtime_getenv(const char*);
518 int32 runtime_atoi(const byte*);
519 void* runtime_mstart(void*);
520 G* runtime_malg(int32, byte**, size_t*);
521 void runtime_mpreinit(M*);
522 void runtime_minit(void);
523 void runtime_unminit(void);
524 void runtime_needm(void);
525 void runtime_dropm(void);
526 void runtime_signalstack(byte*, int32);
527 MCache* runtime_allocmcache(void);
528 void runtime_freemcache(MCache*);
529 void runtime_mallocinit(void);
530 void runtime_mprofinit(void);
531 #define runtime_malloc(s) __go_alloc(s)
532 #define runtime_free(p) __go_free(p)
533 bool runtime_addfinalizer(void*, FuncVal *fn, const struct __go_func_type *);
534 #define runtime_getcallersp(p) __builtin_frame_address(1)
535 int32 runtime_mcount(void);
536 int32 runtime_gcount(void);
537 uint32 runtime_fastrand1(void);
539 void runtime_setmg(M*, G*);
540 void runtime_newextram(void);
541 #define runtime_exit(s) exit(s)
542 #define runtime_breakpoint() __builtin_trap()
543 void runtime_gosched(void);
544 void runtime_park(void(*)(Lock*), Lock*, const char*);
545 void runtime_tsleep(int64, const char*);
546 M* runtime_newm(void);
547 void runtime_goexit(void);
548 void runtime_entersyscall(void) __asm__ (GOSYM_PREFIX "syscall.Entersyscall");
549 void runtime_entersyscallblock(void);
550 void runtime_exitsyscall(void) __asm__ (GOSYM_PREFIX "syscall.Exitsyscall");
551 G* __go_go(void (*pfn)(void*), void*);
553 bool __go_sigsend(int32 sig);
554 int32 runtime_callers(int32, Location*, int32);
555 int64 runtime_nanotime(void);
556 void runtime_dopanic(int32) __attribute__ ((noreturn));
557 void runtime_startpanic(void);
558 void runtime_sigprof();
559 void runtime_resetcpuprofiler(int32);
560 void runtime_setcpuprofilerate(void(*)(uintptr*, int32), int32);
561 void runtime_usleep(uint32);
562 int64 runtime_cputicks(void);
563 int64 runtime_tickspersecond(void);
564 void runtime_blockevent(int64, int32);
565 extern int64 runtime_blockprofilerate;
566 void runtime_addtimer(Timer*);
567 bool runtime_deltimer(Timer*);
568 G* runtime_netpoll(bool);
569 void runtime_netpollinit(void);
570 int32 runtime_netpollopen(int32, PollDesc*);
571 int32 runtime_netpollclose(int32);
572 void runtime_netpollready(G**, PollDesc*, int32);
573 void runtime_crash(void);
575 void runtime_stoptheworld(void);
576 void runtime_starttheworld(void);
577 extern uint32 runtime_worldsema;
580 * mutual exclusion locks. in the uncontended case,
581 * as fast as spin locks (just a few user-level instructions),
582 * but on the contention path they sleep in the kernel.
583 * a zeroed Lock is unlocked (no need to initialize each lock).
585 void runtime_lock(Lock*);
586 void runtime_unlock(Lock*);
589 * sleep and wakeup on one-time events.
590 * before any calls to notesleep or notewakeup,
591 * must call noteclear to initialize the Note.
592 * then, exactly one thread can call notesleep
593 * and exactly one thread can call notewakeup (once).
594 * once notewakeup has been called, the notesleep
595 * will return. future notesleep will return immediately.
596 * subsequent noteclear must be called only after
597 * previous notesleep has returned, e.g. it's disallowed
598 * to call noteclear straight after notewakeup.
600 * notetsleep is like notesleep but wakes up after
601 * a given number of nanoseconds even if the event
602 * has not yet happened. if a goroutine uses notetsleep to
603 * wake up early, it must wait to call noteclear until it
604 * can be sure that no other goroutine is calling
607 void runtime_noteclear(Note*);
608 void runtime_notesleep(Note*);
609 void runtime_notewakeup(Note*);
610 void runtime_notetsleep(Note*, int64);
613 * low-level synchronization for implementing the above
615 uintptr runtime_semacreate(void);
616 int32 runtime_semasleep(int64);
617 void runtime_semawakeup(M*);
619 void runtime_futexsleep(uint32*, uint32, int64);
620 void runtime_futexwakeup(uint32*, uint32);
624 * Initialize uint64 head to 0, compare with 0 to test for emptiness.
625 * The stack does not keep pointers to nodes,
626 * so they can be garbage collected if there are no other pointers to nodes.
628 void runtime_lfstackpush(uint64 *head, LFNode *node)
629 __asm__ (GOSYM_PREFIX "runtime.lfstackpush");
630 LFNode* runtime_lfstackpop(uint64 *head);
633 * Parallel for over [0, n).
634 * body() is executed for each iteration.
635 * nthr - total number of worker threads.
636 * ctx - arbitrary user context.
637 * if wait=true, threads return from parfor() when all work is done;
638 * otherwise, threads can return while other threads are still finishing processing.
640 ParFor* runtime_parforalloc(uint32 nthrmax);
641 void runtime_parforsetup(ParFor *desc, uint32 nthr, uint32 n, void *ctx, bool wait, void (*body)(ParFor*, uint32));
642 void runtime_parfordo(ParFor *desc) __asm__ (GOSYM_PREFIX "runtime.parfordo");
647 #define runtime_mmap mmap
648 #define runtime_munmap munmap
649 #define runtime_madvise madvise
650 #define runtime_memclr(buf, size) __builtin_memset((buf), 0, (size))
651 #define runtime_getcallerpc(p) __builtin_return_address(0)
654 void __wrap_rtems_task_variable_add(void **);
658 * Names generated by gccgo.
660 #define runtime_printbool __go_print_bool
661 #define runtime_printfloat __go_print_double
662 #define runtime_printint __go_print_int64
663 #define runtime_printiface __go_print_interface
664 #define runtime_printeface __go_print_empty_interface
665 #define runtime_printstring __go_print_string
666 #define runtime_printpointer __go_print_pointer
667 #define runtime_printuint __go_print_uint64
668 #define runtime_printslice __go_print_slice
669 #define runtime_printcomplex __go_print_complex
674 void runtime_printbool(_Bool);
675 void runtime_printbyte(int8);
676 void runtime_printfloat(double);
677 void runtime_printint(int64);
678 void runtime_printiface(Iface);
679 void runtime_printeface(Eface);
680 void runtime_printstring(String);
681 void runtime_printpc(void*);
682 void runtime_printpointer(void*);
683 void runtime_printuint(uint64);
684 void runtime_printhex(uint64);
685 void runtime_printslice(Slice);
686 void runtime_printcomplex(__complex double);
687 void reflect_call(const struct __go_func_type *, FuncVal *, _Bool, _Bool,
689 __asm__ (GOSYM_PREFIX "reflect.call");
690 #define runtime_panic __go_panic
693 * runtime c-called (but written in Go)
695 void runtime_printany(Eface)
696 __asm__ (GOSYM_PREFIX "runtime.Printany");
697 void runtime_newTypeAssertionError(const String*, const String*, const String*, const String*, Eface*)
698 __asm__ (GOSYM_PREFIX "runtime.NewTypeAssertionError");
699 void runtime_newErrorString(String, Eface*)
700 __asm__ (GOSYM_PREFIX "runtime.NewErrorString");
703 * wrapped for go users
705 void runtime_semacquire(uint32 volatile *);
706 void runtime_semrelease(uint32 volatile *);
707 int32 runtime_gomaxprocsfunc(int32 n);
708 void runtime_procyield(uint32);
709 void runtime_osyield(void);
710 void runtime_lockOSThread(void);
711 void runtime_unlockOSThread(void);
713 bool runtime_showframe(String, bool);
715 uintptr runtime_memlimit(void);
717 // If appropriate, ask the operating system to control whether this
718 // thread should receive profiling signals. This is only necessary on OS X.
719 // An operating system should not deliver a profiling signal to a
720 // thread that is not actually executing (what good is that?), but that's
721 // what OS X prefers to do. When profiling is turned on, we mask
722 // away the profiling signal when threads go to sleep, so that OS X
723 // is forced to deliver the signal to a thread that's actually running.
724 // This is a no-op on other systems.
725 void runtime_setprof(bool);
727 #define ISNAN(f) __builtin_isnan(f)
734 #define runtime_setitimer setitimer
736 void runtime_check(void);
738 // A list of global variables that the garbage collector must scan.
740 struct root_list *next;
747 void __go_register_gc_roots(struct root_list*);
749 // Size of stack space allocated using Go's allocator.
750 // This will be 0 when using split stacks, as in that case
751 // the stacks are allocated by the splitstack library.
752 extern uintptr runtime_stacks_sys;
754 struct backtrace_state;
755 extern struct backtrace_state *__go_get_backtrace_state(void);
756 extern _Bool __go_file_line(uintptr, String*, String*, intgo *);
757 extern byte* runtime_progname();
758 extern void runtime_main(void*);
760 int32 getproccount(void);
762 #define PREFETCH(p) __builtin_prefetch(p)
764 void __go_set_closure(void*);
765 void* __go_get_closure(void);