1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
15 #ifdef USING_SPLIT_STACK
17 extern void * __splitstack_find (void *, void *, size_t *, void **, void **,
20 extern void * __splitstack_find_context (void *context[10], size_t *, void **,
27 DebugMark = 0, // run second pass to check mark
29 // Four bits per word (see #defines below).
30 wordsPerBitmapWord = sizeof(void*)*8/4,
31 bitShift = sizeof(void*)*8/4,
34 IntermediateBufferCapacity = 64,
37 // Bits in per-word bitmap.
38 // #defines because enum might not be able to hold the values.
40 // Each word in the bitmap describes wordsPerBitmapWord words
41 // of heap memory. There are 4 bitmap bits dedicated to each heap word,
42 // so on a 64-bit system there is one bitmap word per 16 heap words.
43 // The bits in the word are packed together by type first, then by
44 // heap location, so each 64-bit bitmap word consists of, from top to bottom,
45 // the 16 bitSpecial bits for the corresponding heap words, then the 16 bitMarked bits,
46 // then the 16 bitNoPointers/bitBlockBoundary bits, then the 16 bitAllocated bits.
47 // This layout makes it easier to iterate over the bits of a given type.
49 // The bitmap starts at mheap.arena_start and extends *backward* from
50 // there. On a 64-bit system the off'th word in the arena is tracked by
51 // the off/16+1'th word before mheap.arena_start. (On a 32-bit system,
52 // the only difference is that the divisor is 8.)
54 // To pull out the bits corresponding to a given pointer p, we use:
56 // off = p - (uintptr*)mheap.arena_start; // word offset
57 // b = (uintptr*)mheap.arena_start - off/wordsPerBitmapWord - 1;
58 // shift = off % wordsPerBitmapWord
59 // bits = *b >> shift;
60 // /* then test bits & bitAllocated, bits & bitMarked, etc. */
62 #define bitAllocated ((uintptr)1<<(bitShift*0))
63 #define bitNoPointers ((uintptr)1<<(bitShift*1)) /* when bitAllocated is set */
64 #define bitMarked ((uintptr)1<<(bitShift*2)) /* when bitAllocated is set */
65 #define bitSpecial ((uintptr)1<<(bitShift*3)) /* when bitAllocated is set - has finalizer or being profiled */
66 #define bitBlockBoundary ((uintptr)1<<(bitShift*1)) /* when bitAllocated is NOT set */
68 #define bitMask (bitBlockBoundary | bitAllocated | bitMarked | bitSpecial)
70 // Holding worldsema grants an M the right to try to stop the world.
73 // runtime_semacquire(&runtime_worldsema);
75 // runtime_stoptheworld();
80 // runtime_semrelease(&runtime_worldsema);
81 // runtime_starttheworld();
83 uint32 runtime_worldsema = 1;
87 // The size of Workbuf is N*PageSize.
88 typedef struct Workbuf Workbuf;
91 #define SIZE (2*PageSize-sizeof(LFNode)-sizeof(uintptr))
92 LFNode node; // must be first
94 Obj obj[SIZE/sizeof(Obj) - 1];
95 uint8 _padding[SIZE%sizeof(Obj) + sizeof(Obj)];
99 typedef struct Finalizer Finalizer;
104 const struct __go_func_type *ft;
107 typedef struct FinBlock FinBlock;
118 static FinBlock *finq; // list of finalizers that are to be executed
119 static FinBlock *finc; // cache of free blocks
120 static FinBlock *allfin; // list of all blocks
122 static int32 fingwait;
124 static void runfinq(void*);
125 static Workbuf* getempty(Workbuf*);
126 static Workbuf* getfull(Workbuf*);
127 static void putempty(Workbuf*);
128 static Workbuf* handoff(Workbuf*);
131 uint64 full; // lock-free list of full blocks
132 uint64 empty; // lock-free list of empty blocks
133 byte pad0[CacheLineSize]; // prevents false-sharing between full/empty and nproc/nwait
135 volatile uint32 nwait;
136 volatile uint32 ndone;
137 volatile uint32 debugmarkdone;
152 // TODO(atom): to be expanded in a next CL
153 GC_DEFAULT_PTR = GC_NUM_INSTR,
156 // PtrTarget and BitTarget are structures used by intermediate buffers.
157 // The intermediate buffers hold GC data before it
158 // is moved/flushed to the work buffer (Workbuf).
159 // The size of an intermediate buffer is very small,
160 // such as 32 or 64 elements.
171 uintptr *bitp, shift;
176 struct PtrTarget ptrtarget[IntermediateBufferCapacity];
177 struct BitTarget bittarget[IntermediateBufferCapacity];
178 struct BufferList *next;
180 static struct BufferList *bufferList;
184 // flushptrbuf moves data from the PtrTarget buffer to the work buffer.
185 // The PtrTarget buffer contains blocks irrespective of whether the blocks have been marked or scanned,
186 // while the work buffer contains blocks which have been marked
187 // and are prepared to be scanned by the garbage collector.
189 // _wp, _wbuf, _nobj are input/output parameters and are specifying the work buffer.
190 // bitbuf holds temporary data generated by this function.
192 // A simplified drawing explaining how the todo-list moves from a structure to another:
196 // Obj ------> PtrTarget (pointer targets)
198 // | | flushptrbuf (1st part,
199 // | | find block start)
201 // `--------- BitTarget (pointer targets and the corresponding locations in bitmap)
203 // (2nd part, mark and enqueue)
205 flushptrbuf(struct PtrTarget *ptrbuf, uintptr n, Obj **_wp, Workbuf **_wbuf, uintptr *_nobj, struct BitTarget *bitbuf)
207 byte *p, *arena_start, *obj;
208 uintptr size, *bitp, bits, shift, j, x, xbits, off, nobj, ti;
213 struct PtrTarget *ptrbuf_end;
214 struct BitTarget *bitbufpos, *bt;
216 arena_start = runtime_mheap.arena_start;
222 ptrbuf_end = ptrbuf + n;
224 // If buffer is nearly full, get a new one.
225 if(wbuf == nil || nobj+n >= nelem(wbuf->obj)) {
228 wbuf = getempty(wbuf);
232 if(n >= nelem(wbuf->obj))
233 runtime_throw("ptrbuf has to be smaller than WorkBuf");
236 // TODO(atom): This block is a branch of an if-then-else statement.
237 // The single-threaded branch may be added in a next CL.
239 // Multi-threaded version.
243 while(ptrbuf < ptrbuf_end) {
248 // obj belongs to interval [mheap.arena_start, mheap.arena_used).
250 if(obj < runtime_mheap.arena_start || obj >= runtime_mheap.arena_used)
251 runtime_throw("object is outside of mheap");
254 // obj may be a pointer to a live object.
255 // Try to find the beginning of the object.
257 // Round down to word boundary.
258 if(((uintptr)obj & ((uintptr)PtrSize-1)) != 0) {
259 obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
263 // Find bits for this word.
264 off = (uintptr*)obj - (uintptr*)arena_start;
265 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
266 shift = off % wordsPerBitmapWord;
268 bits = xbits >> shift;
270 // Pointing at the beginning of a block?
271 if((bits & (bitAllocated|bitBlockBoundary)) != 0)
276 // Pointing just past the beginning?
277 // Scan backward a little to find a block boundary.
278 for(j=shift; j-->0; ) {
279 if(((xbits>>j) & (bitAllocated|bitBlockBoundary)) != 0) {
280 obj = (byte*)obj - (shift-j)*PtrSize;
287 // Otherwise consult span table to find beginning.
288 // (Manually inlined copy of MHeap_LookupMaybe.)
289 k = (uintptr)obj>>PageShift;
291 if(sizeof(void*) == 8)
292 x -= (uintptr)arena_start>>PageShift;
293 s = runtime_mheap.map[x];
294 if(s == nil || k < s->start || k - s->start >= s->npages || s->state != MSpanInUse)
296 p = (byte*)((uintptr)s->start<<PageShift);
297 if(s->sizeclass == 0) {
300 if((byte*)obj >= (byte*)s->limit)
303 int32 i = ((byte*)obj - p)/size;
307 // Now that we know the object header, reload bits.
308 off = (uintptr*)obj - (uintptr*)arena_start;
309 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
310 shift = off % wordsPerBitmapWord;
312 bits = xbits >> shift;
315 // Now we have bits, bitp, and shift correct for
316 // obj pointing at the base of the object.
317 // Only care about allocated and not marked.
318 if((bits & (bitAllocated|bitMarked)) != bitAllocated)
321 *bitbufpos = (struct BitTarget){obj, ti, bitp, shift};
326 for(bt=bitbuf; bt<bitbufpos; bt++){
328 bits = xbits >> bt->shift;
329 if((bits & bitMarked) != 0)
333 *bt->bitp = xbits | (bitMarked << bt->shift);
335 // If object has no pointers, don't need to scan further.
336 if((bits & bitNoPointers) != 0)
341 // Ask span about size class.
342 // (Manually inlined copy of MHeap_Lookup.)
343 x = (uintptr)obj >> PageShift;
344 if(sizeof(void*) == 8)
345 x -= (uintptr)arena_start>>PageShift;
346 s = runtime_mheap.map[x];
350 *wp = (Obj){obj, s->elemsize, bt->ti};
354 runtime_unlock(&lock);
356 // If another proc wants a pointer, give it some.
357 if(work.nwait > 0 && nobj > handoffThreshold && work.full == 0) {
359 wbuf = handoff(wbuf);
361 wp = wbuf->obj + nobj;
370 // Program that scans the whole block and treats every block element as a potential pointer
371 static uintptr defaultProg[2] = {PtrSize, GC_DEFAULT_PTR};
373 // scanblock scans a block of n bytes starting at pointer b for references
374 // to other objects, scanning any it finds recursively until there are no
375 // unscanned objects left. Instead of using an explicit recursion, it keeps
376 // a work list in the Workbuf* structures and loops in the main function
377 // body. Keeping an explicit work list is easier on the stack allocator and
380 // wbuf: current work buffer
381 // wp: storage for next queued pointer (write pointer)
382 // nobj: number of queued objects
384 scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
386 byte *b, *arena_start, *arena_used;
390 // TODO(atom): to be expanded in a next CL
391 struct Frame {uintptr count, b; uintptr *loop_or_ret;};
392 struct Frame stack_top;
396 struct BufferList *scanbuffers;
397 struct PtrTarget *ptrbuf, *ptrbuf_end;
398 struct BitTarget *bitbuf;
400 struct PtrTarget *ptrbufpos;
402 // End of local variable declarations.
404 if(sizeof(Workbuf) % PageSize != 0)
405 runtime_throw("scanblock: size of Workbuf is suboptimal");
407 // Memory arena parameters.
408 arena_start = runtime_mheap.arena_start;
409 arena_used = runtime_mheap.arena_used;
411 // Allocate ptrbuf, bitbuf
415 if(bufferList == nil) {
416 bufferList = runtime_SysAlloc(sizeof(*bufferList));
417 bufferList->next = nil;
419 scanbuffers = bufferList;
420 bufferList = bufferList->next;
422 ptrbuf = &scanbuffers->ptrtarget[0];
423 ptrbuf_end = &scanbuffers->ptrtarget[0] + nelem(scanbuffers->ptrtarget);
424 bitbuf = &scanbuffers->bittarget[0];
426 runtime_unlock(&lock);
434 // Each iteration scans the block b of length n, queueing pointers in
437 runtime_printf("scanblock %p %D\n", b, (int64)n);
440 // TODO(atom): to be replaced in a next CL
444 stack_top.b = (uintptr)b;
446 end_b = (uintptr)b + n - PtrSize;
449 // TODO(atom): to be expanded in a next CL
456 stack_top.b += PtrSize;
459 if((byte*)obj >= arena_start && (byte*)obj < arena_used) {
460 *ptrbufpos = (struct PtrTarget){obj, 0};
462 if(ptrbufpos == ptrbuf_end)
468 runtime_throw("scanblock: invalid GC instruction");
473 flushptrbuf(ptrbuf, ptrbufpos-ptrbuf, &wp, &wbuf, &nobj, bitbuf);
478 // Done scanning [b, b+n). Prepare for the next iteration of
479 // the loop by setting b, n to the parameters for the next block.
482 flushptrbuf(ptrbuf, ptrbufpos-ptrbuf, &wp, &wbuf, &nobj, bitbuf);
491 // Emptied our buffer: refill.
492 wbuf = getfull(wbuf);
496 wp = wbuf->obj + wbuf->nobj;
500 // Fetch b from the work buffer.
509 scanbuffers->next = bufferList;
510 bufferList = scanbuffers;
511 runtime_unlock(&lock);
514 // debug_scanblock is the debug copy of scanblock.
515 // it is simpler, slower, single-threaded, recursive,
516 // and uses bitSpecial as the mark bit.
518 debug_scanblock(byte *b, uintptr n)
522 uintptr size, *bitp, bits, shift, i, xbits, off;
526 runtime_throw("debug_scanblock without DebugMark");
529 runtime_printf("debug_scanblock %p %D\n", b, (int64)n);
530 runtime_throw("debug_scanblock");
533 // Align b to a word boundary.
534 off = (uintptr)b & (PtrSize-1);
542 for(i=0; i<(uintptr)n; i++) {
545 // Words outside the arena cannot be pointers.
546 if((byte*)obj < runtime_mheap.arena_start || (byte*)obj >= runtime_mheap.arena_used)
549 // Round down to word boundary.
550 obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
552 // Consult span table to find beginning.
553 s = runtime_MHeap_LookupMaybe(&runtime_mheap, obj);
557 p = (byte*)((uintptr)s->start<<PageShift);
559 if(s->sizeclass == 0) {
562 if((byte*)obj >= (byte*)s->limit)
564 int32 i = ((byte*)obj - p)/size;
568 // Now that we know the object header, reload bits.
569 off = (uintptr*)obj - (uintptr*)runtime_mheap.arena_start;
570 bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
571 shift = off % wordsPerBitmapWord;
573 bits = xbits >> shift;
575 // Now we have bits, bitp, and shift correct for
576 // obj pointing at the base of the object.
577 // If not allocated or already marked, done.
578 if((bits & bitAllocated) == 0 || (bits & bitSpecial) != 0) // NOTE: bitSpecial not bitMarked
580 *bitp |= bitSpecial<<shift;
581 if(!(bits & bitMarked))
582 runtime_printf("found unmarked block %p in %p\n", obj, vp+i);
584 // If object has no pointers, don't need to scan further.
585 if((bits & bitNoPointers) != 0)
588 debug_scanblock(obj, size);
592 // Append obj to the work buffer.
593 // _wbuf, _wp, _nobj are input/output parameters and are specifying the work buffer.
595 enqueue(Obj obj, Workbuf **_wbuf, Obj **_wp, uintptr *_nobj)
602 runtime_printf("append obj(%p %D %p)\n", obj.p, (int64)obj.n, obj.ti);
604 // Align obj.b to a word boundary.
605 off = (uintptr)obj.p & (PtrSize-1);
607 obj.p += PtrSize - off;
608 obj.n -= PtrSize - off;
612 if(obj.p == nil || obj.n == 0)
615 // Load work buffer state
620 // If another proc wants a pointer, give it some.
621 if(work.nwait > 0 && nobj > handoffThreshold && work.full == 0) {
623 wbuf = handoff(wbuf);
625 wp = wbuf->obj + nobj;
628 // If buffer is full, get a new one.
629 if(wbuf == nil || nobj >= nelem(wbuf->obj)) {
632 wbuf = getempty(wbuf);
641 // Save work buffer state
648 markroot(ParFor *desc, uint32 i)
658 enqueue(work.roots[i], &wbuf, &wp, &nobj);
659 scanblock(wbuf, wp, nobj, false);
662 // Get an empty work buffer off the work.empty list,
663 // allocating new buffers as needed.
668 runtime_lfstackpush(&work.full, &b->node);
669 b = (Workbuf*)runtime_lfstackpop(&work.empty);
673 if(work.nchunk < sizeof *b) {
675 work.chunk = runtime_SysAlloc(work.nchunk);
677 b = (Workbuf*)work.chunk;
678 work.chunk += sizeof *b;
679 work.nchunk -= sizeof *b;
680 runtime_unlock(&work);
689 runtime_lfstackpush(&work.empty, &b->node);
692 // Get a full work buffer off the work.full list, or return nil.
700 runtime_lfstackpush(&work.empty, &b->node);
701 b = (Workbuf*)runtime_lfstackpop(&work.full);
702 if(b != nil || work.nproc == 1)
706 runtime_xadd(&work.nwait, +1);
709 runtime_xadd(&work.nwait, -1);
710 b = (Workbuf*)runtime_lfstackpop(&work.full);
713 runtime_xadd(&work.nwait, +1);
715 if(work.nwait == work.nproc)
718 m->gcstats.nprocyield++;
719 runtime_procyield(20);
721 m->gcstats.nosyield++;
739 // Make new buffer with half of b's pointers.
744 runtime_memmove(b1->obj, b->obj+b->nobj, n*sizeof b1->obj[0]);
745 m->gcstats.nhandoff++;
746 m->gcstats.nhandoffcnt += n;
748 // Put b on full list - let first half of b get stolen.
749 runtime_lfstackpush(&work.full, &b->node);
759 if(work.nroot >= work.rootcap) {
760 cap = PageSize/sizeof(Obj);
761 if(cap < 2*work.rootcap)
762 cap = 2*work.rootcap;
763 new = (Obj*)runtime_SysAlloc(cap*sizeof(Obj));
764 if(work.roots != nil) {
765 runtime_memmove(new, work.roots, work.rootcap*sizeof(Obj));
766 runtime_SysFree(work.roots, work.rootcap*sizeof(Obj));
771 work.roots[work.nroot] = obj;
778 #ifdef USING_SPLIT_STACK
786 if(gp == runtime_g()) {
787 // Scanning our own stack.
788 sp = __splitstack_find(nil, nil, &spsize, &next_segment,
789 &next_sp, &initial_sp);
790 } else if((mp = gp->m) != nil && mp->helpgc) {
791 // gchelper's stack is in active use and has no interesting pointers.
794 // Scanning another goroutine's stack.
795 // The goroutine is usually asleep (the world is stopped).
797 // The exception is that if the goroutine is about to enter or might
798 // have just exited a system call, it may be executing code such
799 // as schedlock and may have needed to start a new stack segment.
800 // Use the stack segment and stack pointer at the time of
801 // the system call instead, since that won't change underfoot.
802 if(gp->gcstack != nil) {
804 spsize = gp->gcstack_size;
805 next_segment = gp->gcnext_segment;
806 next_sp = gp->gcnext_sp;
807 initial_sp = gp->gcinitial_sp;
809 sp = __splitstack_find_context(&gp->stack_context[0],
810 &spsize, &next_segment,
811 &next_sp, &initial_sp);
815 addroot((Obj){sp, spsize, 0});
816 while((sp = __splitstack_find(next_segment, next_sp,
817 &spsize, &next_segment,
818 &next_sp, &initial_sp)) != nil)
819 addroot((Obj){sp, spsize, 0});
826 if(gp == runtime_g()) {
827 // Scanning our own stack.
829 } else if((mp = gp->m) != nil && mp->helpgc) {
830 // gchelper's stack is in active use and has no interesting pointers.
833 // Scanning another goroutine's stack.
834 // The goroutine is usually asleep (the world is stopped).
835 bottom = (byte*)gp->gcnext_sp;
839 top = (byte*)gp->gcinitial_sp + gp->gcstack_size;
841 addroot((Obj){bottom, top - bottom, 0});
843 addroot((Obj){top, bottom - top, 0});
853 if(!runtime_mlookup(v, (byte**)&v, &size, nil) || !runtime_blockspecial(v))
854 runtime_throw("mark - finalizer inconsistency");
856 // do not mark the finalizer block itself. just mark the things it points at.
857 addroot((Obj){v, size, 0});
860 static struct root_list* roots;
863 __go_register_gc_roots (struct root_list* r)
865 // FIXME: This needs locking if multiple goroutines can call
866 // dlopen simultaneously.
874 struct root_list *pl;
877 MSpan *s, **allspans;
883 for(pl = roots; pl != nil; pl = pl->next) {
884 struct root* pr = &pl->roots[0];
886 void *decl = pr->decl;
889 addroot((Obj){decl, pr->size, 0});
894 addroot((Obj){(byte*)&runtime_m0, sizeof runtime_m0, 0});
895 addroot((Obj){(byte*)&runtime_g0, sizeof runtime_g0, 0});
896 addroot((Obj){(byte*)&runtime_allg, sizeof runtime_allg, 0});
897 addroot((Obj){(byte*)&runtime_allm, sizeof runtime_allm, 0});
898 runtime_MProf_Mark(addroot);
899 runtime_time_scan(addroot);
900 runtime_trampoline_scan(addroot);
903 allspans = runtime_mheap.allspans;
904 for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
905 s = allspans[spanidx];
906 if(s->state == MSpanInUse) {
907 switch(s->types.compression) {
913 // TODO(atom): consider using defaultProg instead of 0
914 addroot((Obj){(byte*)&s->types.data, sizeof(void*), 0});
921 for(gp=runtime_allg; gp!=nil; gp=gp->alllink) {
924 runtime_printf("unexpected G.status %d\n", gp->status);
925 runtime_throw("mark - bad status");
929 if(gp != runtime_g())
930 runtime_throw("mark - world not stopped");
941 runtime_walkfintab(addfinroots, addroot);
943 for(fb=allfin; fb; fb=fb->alllink)
944 addroot((Obj){(byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]), 0});
946 addroot((Obj){(byte*)&work, sizeof work, 0});
950 handlespecial(byte *p, uintptr size)
953 const struct __go_func_type *ft;
957 if(!runtime_getfinalizer(p, true, &fn, &ft)) {
958 runtime_setblockspecial(p, false);
959 runtime_MProf_Free(p, size);
963 runtime_lock(&finlock);
964 if(finq == nil || finq->cnt == finq->cap) {
966 finc = runtime_SysAlloc(PageSize);
967 finc->cap = (PageSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
968 finc->alllink = allfin;
976 f = &finq->fin[finq->cnt];
981 runtime_unlock(&finlock);
985 // Sweep frees or collects finalizers for blocks not marked in the mark phase.
986 // It clears the mark bits in preparation for the next GC round.
988 sweepspan(ParFor *desc, uint32 idx)
1000 uintptr type_data_inc;
1006 s = runtime_mheap.allspans[idx];
1007 // Stamp newly unused spans. The scavenger will use that
1008 // info to potentially give back some pages to the OS.
1009 if(s->state == MSpanFree && s->unusedsince == 0)
1010 s->unusedsince = runtime_nanotime();
1011 if(s->state != MSpanInUse)
1013 arena_start = runtime_mheap.arena_start;
1014 p = (byte*)(s->start << PageShift);
1020 // Chunk full of small blocks.
1021 npages = runtime_class_to_allocnpages[cl];
1022 n = (npages << PageShift) / size;
1028 type_data = (byte*)s->types.data;
1029 type_data_inc = sizeof(uintptr);
1030 compression = s->types.compression;
1031 switch(compression) {
1033 type_data += 8*sizeof(uintptr);
1038 // Sweep through n objects of given size starting at p.
1039 // This thread owns the span now, so it can manipulate
1040 // the block bitmap without atomic operations.
1041 for(; n > 0; n--, p += size, type_data+=type_data_inc) {
1042 uintptr off, *bitp, shift, bits;
1044 off = (uintptr*)p - (uintptr*)arena_start;
1045 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
1046 shift = off % wordsPerBitmapWord;
1047 bits = *bitp>>shift;
1049 if((bits & bitAllocated) == 0)
1052 if((bits & bitMarked) != 0) {
1054 if(!(bits & bitSpecial))
1055 runtime_printf("found spurious mark on %p\n", p);
1056 *bitp &= ~(bitSpecial<<shift);
1058 *bitp &= ~(bitMarked<<shift);
1062 // Special means it has a finalizer or is being profiled.
1063 // In DebugMark mode, the bit has been coopted so
1064 // we have to assume all blocks are special.
1065 if(DebugMark || (bits & bitSpecial) != 0) {
1066 if(handlespecial(p, size))
1070 // Mark freed; restore block boundary bit.
1071 *bitp = (*bitp & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
1075 runtime_unmarkspan(p, 1<<PageShift);
1076 *(uintptr*)p = 1; // needs zeroing
1077 runtime_MHeap_Free(&runtime_mheap, s, 1);
1078 c->local_alloc -= size;
1081 // Free small object.
1082 switch(compression) {
1084 *(uintptr*)type_data = 0;
1087 *(byte*)type_data = 0;
1090 if(size > sizeof(uintptr))
1091 ((uintptr*)p)[1] = 1; // mark as "needs to be zeroed"
1093 end->next = (MLink*)p;
1100 c->local_by_size[cl].nfree += nfree;
1101 c->local_alloc -= size * nfree;
1102 c->local_nfree += nfree;
1103 c->local_cachealloc -= nfree * size;
1104 c->local_objects -= nfree;
1105 runtime_MCentral_FreeSpan(&runtime_mheap.central[cl], s, nfree, head.next, end);
1110 dumpspan(uint32 idx)
1112 int32 sizeclass, n, npages, i, column;
1117 bool allocated, special;
1119 s = runtime_mheap.allspans[idx];
1120 if(s->state != MSpanInUse)
1122 arena_start = runtime_mheap.arena_start;
1123 p = (byte*)(s->start << PageShift);
1124 sizeclass = s->sizeclass;
1126 if(sizeclass == 0) {
1129 npages = runtime_class_to_allocnpages[sizeclass];
1130 n = (npages << PageShift) / size;
1133 runtime_printf("%p .. %p:\n", p, p+n*size);
1135 for(; n>0; n--, p+=size) {
1136 uintptr off, *bitp, shift, bits;
1138 off = (uintptr*)p - (uintptr*)arena_start;
1139 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
1140 shift = off % wordsPerBitmapWord;
1141 bits = *bitp>>shift;
1143 allocated = ((bits & bitAllocated) != 0);
1144 special = ((bits & bitSpecial) != 0);
1146 for(i=0; (uint32)i<size; i+=sizeof(void*)) {
1148 runtime_printf("\t");
1151 runtime_printf(allocated ? "(" : "[");
1152 runtime_printf(special ? "@" : "");
1153 runtime_printf("%p: ", p+i);
1155 runtime_printf(" ");
1158 runtime_printf("%p", *(void**)(p+i));
1160 if(i+sizeof(void*) >= size) {
1161 runtime_printf(allocated ? ") " : "] ");
1166 runtime_printf("\n");
1171 runtime_printf("\n");
1174 // A debugging function to dump the contents of memory
1176 runtime_memorydump(void)
1180 for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
1186 runtime_gchelper(void)
1188 // parallel mark for over gc roots
1189 runtime_parfordo(work.markfor);
1191 // help other threads scan secondary blocks
1192 scanblock(nil, nil, 0, true);
1195 // wait while the main thread executes mark(debug_scanblock)
1196 while(runtime_atomicload(&work.debugmarkdone) == 0)
1200 runtime_parfordo(work.sweepfor);
1201 if(runtime_xadd(&work.ndone, +1) == work.nproc-1)
1202 runtime_notewakeup(&work.alldone);
1205 // Initialized from $GOGC. GOGC=off means no gc.
1207 // Next gc is after we've allocated an extra amount of
1208 // memory proportional to the amount already in use.
1209 // If gcpercent=100 and we're using 4M, we'll gc again
1210 // when we get to 8M. This keeps the gc cost in linear
1211 // proportion to the allocation cost. Adjusting gcpercent
1212 // just changes the linear constant (and also the amount of
1213 // extra memory used).
1214 static int32 gcpercent = -2;
1221 for(mp=runtime_allm; mp; mp=mp->alllink)
1222 runtime_MCache_ReleaseAll(mp->mcache);
1226 cachestats(GCStats *stats)
1231 uint64 stacks_inuse;
1236 runtime_memclr((byte*)stats, sizeof(*stats));
1238 stacks_sys = runtime_stacks_sys;
1239 for(mp=runtime_allm; mp; mp=mp->alllink) {
1241 runtime_purgecachedstats(c);
1242 // stacks_inuse += mp->stackalloc->inuse;
1243 // stacks_sys += mp->stackalloc->sys;
1245 src = (uint64*)&mp->gcstats;
1246 dst = (uint64*)stats;
1247 for(i=0; i<sizeof(*stats)/sizeof(uint64); i++)
1249 runtime_memclr((byte*)&mp->gcstats, sizeof(mp->gcstats));
1251 for(i=0; i<nelem(c->local_by_size); i++) {
1252 mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc;
1253 c->local_by_size[i].nmalloc = 0;
1254 mstats.by_size[i].nfree += c->local_by_size[i].nfree;
1255 c->local_by_size[i].nfree = 0;
1258 mstats.stacks_inuse = stacks_inuse;
1259 mstats.stacks_sys = stacks_sys;
1262 // Structure of arguments passed to function gc().
1263 // This allows the arguments to be passed via reflect_call.
1269 static void gc(struct gc_args *args);
1272 runtime_gc(int32 force)
1276 struct gc_args a, *ap;
1278 // The atomic operations are not atomic if the uint64s
1279 // are not aligned on uint64 boundaries. This has been
1280 // a problem in the past.
1281 if((((uintptr)&work.empty) & 7) != 0)
1282 runtime_throw("runtime: gc work buffer is misaligned");
1284 // Make sure all registers are saved on stack so that
1285 // scanstack sees them.
1286 __builtin_unwind_init();
1288 // The gc is turned off (via enablegc) until
1289 // the bootstrap has completed.
1290 // Also, malloc gets called in the guts
1291 // of a number of libraries that might be
1292 // holding locks. To avoid priority inversion
1293 // problems, don't bother trying to run gc
1294 // while holding a lock. The next mallocgc
1295 // without a lock will do the gc instead.
1297 if(!mstats.enablegc || m->locks > 0 || runtime_panicking)
1300 if(gcpercent == -2) { // first time through
1301 p = runtime_getenv("GOGC");
1302 if(p == nil || p[0] == '\0')
1304 else if(runtime_strcmp((const char*)p, "off") == 0)
1307 gcpercent = runtime_atoi(p);
1309 p = runtime_getenv("GOGCTRACE");
1311 gctrace = runtime_atoi(p);
1316 // Run gc on a bigger stack to eliminate
1317 // a potentially large number of calls to runtime_morestack.
1318 // But not when using gccgo.
1323 if(gctrace > 1 && !force) {
1330 gc(struct gc_args *args)
1333 int64 t0, t1, t2, t3;
1334 uint64 heap0, heap1, obj0, obj1;
1339 runtime_semacquire(&runtime_worldsema);
1340 if(!args->force && mstats.heap_alloc < mstats.next_gc) {
1341 runtime_semrelease(&runtime_worldsema);
1347 t0 = runtime_nanotime();
1350 runtime_stoptheworld();
1352 for(mp=runtime_allm; mp; mp=mp->alllink)
1353 runtime_settype_flush(mp, false);
1359 heap0 = mstats.heap_alloc;
1360 obj0 = mstats.nmalloc - mstats.nfree;
1363 m->locks++; // disable gc during mallocs in parforalloc
1364 if(work.markfor == nil)
1365 work.markfor = runtime_parforalloc(MaxGcproc);
1366 if(work.sweepfor == nil)
1367 work.sweepfor = runtime_parforalloc(MaxGcproc);
1372 work.debugmarkdone = 0;
1373 work.nproc = runtime_gcprocs();
1375 runtime_parforsetup(work.markfor, work.nproc, work.nroot, nil, false, markroot);
1376 runtime_parforsetup(work.sweepfor, work.nproc, runtime_mheap.nspan, nil, true, sweepspan);
1377 if(work.nproc > 1) {
1378 runtime_noteclear(&work.alldone);
1379 runtime_helpgc(work.nproc);
1382 runtime_parfordo(work.markfor);
1383 scanblock(nil, nil, 0, true);
1386 for(i=0; i<work.nroot; i++)
1387 debug_scanblock(work.roots[i].p, work.roots[i].n);
1388 runtime_atomicstore(&work.debugmarkdone, 1);
1390 t1 = runtime_nanotime();
1392 runtime_parfordo(work.sweepfor);
1393 t2 = runtime_nanotime();
1399 runtime_notesleep(&work.alldone);
1401 stats.nprocyield += work.sweepfor->nprocyield;
1402 stats.nosyield += work.sweepfor->nosyield;
1403 stats.nsleep += work.sweepfor->nsleep;
1405 mstats.next_gc = mstats.heap_alloc+(mstats.heap_alloc-runtime_stacks_sys)*gcpercent/100;
1409 m->locks++; // disable gc during the mallocs in newproc
1410 // kick off or wake up goroutine to run queued finalizers
1412 fing = __go_go(runfinq, nil);
1415 runtime_ready(fing);
1420 heap1 = mstats.heap_alloc;
1421 obj1 = mstats.nmalloc - mstats.nfree;
1423 t3 = runtime_nanotime();
1424 mstats.last_gc = t3;
1425 mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t3 - t0;
1426 mstats.pause_total_ns += t3 - t0;
1429 runtime_printf("pause %D\n", t3-t0);
1432 runtime_printf("gc%d(%d): %D+%D+%D ms, %D -> %D MB %D -> %D (%D-%D) objects,"
1433 " %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
1434 mstats.numgc, work.nproc, (t1-t0)/1000000, (t2-t1)/1000000, (t3-t2)/1000000,
1435 heap0>>20, heap1>>20, obj0, obj1,
1436 mstats.nmalloc, mstats.nfree,
1437 stats.nhandoff, stats.nhandoffcnt,
1438 work.sweepfor->nsteal, work.sweepfor->nstealcnt,
1439 stats.nprocyield, stats.nosyield, stats.nsleep);
1443 runtime_semrelease(&runtime_worldsema);
1444 runtime_starttheworld();
1446 // give the queued finalizers, if any, a chance to run
1451 void runtime_ReadMemStats(MStats *)
1452 __asm__ (GOSYM_PREFIX "runtime.ReadMemStats");
1455 runtime_ReadMemStats(MStats *stats)
1459 // Have to acquire worldsema to stop the world,
1460 // because stoptheworld can only be used by
1461 // one goroutine at a time, and there might be
1462 // a pending garbage collection already calling it.
1463 runtime_semacquire(&runtime_worldsema);
1466 runtime_stoptheworld();
1470 runtime_semrelease(&runtime_worldsema);
1471 runtime_starttheworld();
1475 runfinq(void* dummy __attribute__ ((unused)))
1478 FinBlock *fb, *next;
1482 // There's no need for a lock in this section
1483 // because it only conflicts with the garbage
1484 // collector, and the garbage collector only
1485 // runs when everyone else is stopped, and
1486 // runfinq only stops at the gosched() or
1487 // during the calls in the for loop.
1492 runtime_park(nil, nil, "finalizer wait");
1496 runtime_racefingo();
1497 for(; fb; fb=next) {
1499 for(i=0; i<(uint32)fb->cnt; i++) {
1503 params[0] = &f->arg;
1504 reflect_call(f->ft, (void*)f->fn, 0, 0, params, nil);
1512 runtime_gc(1); // trigger another gc to clean up the finalized objects, if possible
1516 // mark the block at v of size n as allocated.
1517 // If noptr is true, mark it as having no pointers.
1519 runtime_markallocated(void *v, uintptr n, bool noptr)
1521 uintptr *b, obits, bits, off, shift;
1524 runtime_printf("markallocated %p+%p\n", v, n);
1526 if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1527 runtime_throw("markallocated: bad pointer");
1529 off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset
1530 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1531 shift = off % wordsPerBitmapWord;
1535 bits = (obits & ~(bitMask<<shift)) | (bitAllocated<<shift);
1537 bits |= bitNoPointers<<shift;
1538 if(runtime_singleproc) {
1542 // more than one goroutine is potentially running: use atomic op
1543 if(runtime_casp((void**)b, (void*)obits, (void*)bits))
1549 // mark the block at v of size n as freed.
1551 runtime_markfreed(void *v, uintptr n)
1553 uintptr *b, obits, bits, off, shift;
1556 runtime_printf("markallocated %p+%p\n", v, n);
1558 if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1559 runtime_throw("markallocated: bad pointer");
1561 off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset
1562 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1563 shift = off % wordsPerBitmapWord;
1567 bits = (obits & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
1568 if(runtime_singleproc) {
1572 // more than one goroutine is potentially running: use atomic op
1573 if(runtime_casp((void**)b, (void*)obits, (void*)bits))
1579 // check that the block at v of size n is marked freed.
1581 runtime_checkfreed(void *v, uintptr n)
1583 uintptr *b, bits, off, shift;
1585 if(!runtime_checking)
1588 if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1589 return; // not allocated, so okay
1591 off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start; // word offset
1592 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1593 shift = off % wordsPerBitmapWord;
1596 if((bits & bitAllocated) != 0) {
1597 runtime_printf("checkfreed %p+%p: off=%p have=%p\n",
1598 v, n, off, bits & bitMask);
1599 runtime_throw("checkfreed: not freed");
1603 // mark the span of memory at v as having n blocks of the given size.
1604 // if leftover is true, there is left over space at the end of the span.
1606 runtime_markspan(void *v, uintptr size, uintptr n, bool leftover)
1608 uintptr *b, off, shift;
1611 if((byte*)v+size*n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1612 runtime_throw("markspan: bad pointer");
1615 if(leftover) // mark a boundary just past end of last block too
1617 for(; n-- > 0; p += size) {
1618 // Okay to use non-atomic ops here, because we control
1619 // the entire span, and each bitmap word has bits for only
1620 // one span, so no other goroutines are changing these
1622 off = (uintptr*)p - (uintptr*)runtime_mheap.arena_start; // word offset
1623 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1624 shift = off % wordsPerBitmapWord;
1625 *b = (*b & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
1629 // unmark the span of memory at v of length n bytes.
1631 runtime_unmarkspan(void *v, uintptr n)
1633 uintptr *p, *b, off;
1635 if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1636 runtime_throw("markspan: bad pointer");
1639 off = p - (uintptr*)runtime_mheap.arena_start; // word offset
1640 if(off % wordsPerBitmapWord != 0)
1641 runtime_throw("markspan: unaligned pointer");
1642 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1644 if(n%wordsPerBitmapWord != 0)
1645 runtime_throw("unmarkspan: unaligned length");
1646 // Okay to use non-atomic ops here, because we control
1647 // the entire span, and each bitmap word has bits for only
1648 // one span, so no other goroutines are changing these
1650 n /= wordsPerBitmapWord;
1656 runtime_blockspecial(void *v)
1658 uintptr *b, off, shift;
1663 off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start;
1664 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1665 shift = off % wordsPerBitmapWord;
1667 return (*b & (bitSpecial<<shift)) != 0;
1671 runtime_setblockspecial(void *v, bool s)
1673 uintptr *b, off, shift, bits, obits;
1678 off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start;
1679 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1680 shift = off % wordsPerBitmapWord;
1685 bits = obits | (bitSpecial<<shift);
1687 bits = obits & ~(bitSpecial<<shift);
1688 if(runtime_singleproc) {
1692 // more than one goroutine is potentially running: use atomic op
1693 if(runtime_casp((void**)b, (void*)obits, (void*)bits))
1700 runtime_MHeap_MapBits(MHeap *h)
1704 // Caller has added extra mappings to the arena.
1705 // Add extra mappings of bitmap words as needed.
1706 // We allocate extra bitmap pieces in chunks of bitmapChunk.
1712 n = (h->arena_used - h->arena_start) / wordsPerBitmapWord;
1713 n = (n+bitmapChunk-1) & ~(bitmapChunk-1);
1714 if(h->bitmap_mapped >= n)
1717 page_size = getpagesize();
1718 n = (n+page_size-1) & ~(page_size-1);
1720 runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped);
1721 h->bitmap_mapped = n;