re PR go/46986 (Go is not supported on Darwin)
[platform/upstream/gcc.git] / libgo / runtime / mgc0.c
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4
5 // Garbage collector.
6
7 #include <unistd.h>
8
9 #include "runtime.h"
10 #include "arch.h"
11 #include "malloc.h"
12 #include "mgc0.h"
13 #include "race.h"
14
15 #ifdef USING_SPLIT_STACK
16
17 extern void * __splitstack_find (void *, void *, size_t *, void **, void **,
18                                  void **);
19
20 extern void * __splitstack_find_context (void *context[10], size_t *, void **,
21                                          void **, void **);
22
23 #endif
24
25 enum {
26         Debug = 0,
27         DebugMark = 0,  // run second pass to check mark
28
29         // Four bits per word (see #defines below).
30         wordsPerBitmapWord = sizeof(void*)*8/4,
31         bitShift = sizeof(void*)*8/4,
32
33         handoffThreshold = 4,
34         IntermediateBufferCapacity = 64,
35 };
36
37 // Bits in per-word bitmap.
38 // #defines because enum might not be able to hold the values.
39 //
40 // Each word in the bitmap describes wordsPerBitmapWord words
41 // of heap memory.  There are 4 bitmap bits dedicated to each heap word,
42 // so on a 64-bit system there is one bitmap word per 16 heap words.
43 // The bits in the word are packed together by type first, then by
44 // heap location, so each 64-bit bitmap word consists of, from top to bottom,
45 // the 16 bitSpecial bits for the corresponding heap words, then the 16 bitMarked bits,
46 // then the 16 bitNoPointers/bitBlockBoundary bits, then the 16 bitAllocated bits.
47 // This layout makes it easier to iterate over the bits of a given type.
48 //
49 // The bitmap starts at mheap.arena_start and extends *backward* from
50 // there.  On a 64-bit system the off'th word in the arena is tracked by
51 // the off/16+1'th word before mheap.arena_start.  (On a 32-bit system,
52 // the only difference is that the divisor is 8.)
53 //
54 // To pull out the bits corresponding to a given pointer p, we use:
55 //
56 //      off = p - (uintptr*)mheap.arena_start;  // word offset
57 //      b = (uintptr*)mheap.arena_start - off/wordsPerBitmapWord - 1;
58 //      shift = off % wordsPerBitmapWord
59 //      bits = *b >> shift;
60 //      /* then test bits & bitAllocated, bits & bitMarked, etc. */
61 //
62 #define bitAllocated            ((uintptr)1<<(bitShift*0))
63 #define bitNoPointers           ((uintptr)1<<(bitShift*1))      /* when bitAllocated is set */
64 #define bitMarked               ((uintptr)1<<(bitShift*2))      /* when bitAllocated is set */
65 #define bitSpecial              ((uintptr)1<<(bitShift*3))      /* when bitAllocated is set - has finalizer or being profiled */
66 #define bitBlockBoundary        ((uintptr)1<<(bitShift*1))      /* when bitAllocated is NOT set */
67
68 #define bitMask (bitBlockBoundary | bitAllocated | bitMarked | bitSpecial)
69
70 // Holding worldsema grants an M the right to try to stop the world.
71 // The procedure is:
72 //
73 //      runtime_semacquire(&runtime_worldsema);
74 //      m->gcing = 1;
75 //      runtime_stoptheworld();
76 //
77 //      ... do stuff ...
78 //
79 //      m->gcing = 0;
80 //      runtime_semrelease(&runtime_worldsema);
81 //      runtime_starttheworld();
82 //
83 uint32 runtime_worldsema = 1;
84
85 static int32 gctrace;
86
87 // The size of Workbuf is N*PageSize.
88 typedef struct Workbuf Workbuf;
89 struct Workbuf
90 {
91 #define SIZE (2*PageSize-sizeof(LFNode)-sizeof(uintptr))
92         LFNode  node; // must be first
93         uintptr nobj;
94         Obj     obj[SIZE/sizeof(Obj) - 1];
95         uint8   _padding[SIZE%sizeof(Obj) + sizeof(Obj)];
96 #undef SIZE
97 };
98
99 typedef struct Finalizer Finalizer;
100 struct Finalizer
101 {
102         void (*fn)(void*);
103         void *arg;
104         const struct __go_func_type *ft;
105 };
106
107 typedef struct FinBlock FinBlock;
108 struct FinBlock
109 {
110         FinBlock *alllink;
111         FinBlock *next;
112         int32 cnt;
113         int32 cap;
114         Finalizer fin[1];
115 };
116
117 static G *fing;
118 static FinBlock *finq; // list of finalizers that are to be executed
119 static FinBlock *finc; // cache of free blocks
120 static FinBlock *allfin; // list of all blocks
121 static Lock finlock;
122 static int32 fingwait;
123
124 static void runfinq(void*);
125 static Workbuf* getempty(Workbuf*);
126 static Workbuf* getfull(Workbuf*);
127 static void     putempty(Workbuf*);
128 static Workbuf* handoff(Workbuf*);
129
130 static struct {
131         uint64  full;  // lock-free list of full blocks
132         uint64  empty; // lock-free list of empty blocks
133         byte    pad0[CacheLineSize]; // prevents false-sharing between full/empty and nproc/nwait
134         uint32  nproc;
135         volatile uint32 nwait;
136         volatile uint32 ndone;
137         volatile uint32 debugmarkdone;
138         Note    alldone;
139         ParFor  *markfor;
140         ParFor  *sweepfor;
141
142         Lock;
143         byte    *chunk;
144         uintptr nchunk;
145
146         Obj     *roots;
147         uint32  nroot;
148         uint32  rootcap;
149 } work;
150
151 enum {
152         // TODO(atom): to be expanded in a next CL
153         GC_DEFAULT_PTR = GC_NUM_INSTR,
154 };
155
156 // PtrTarget and BitTarget are structures used by intermediate buffers.
157 // The intermediate buffers hold GC data before it
158 // is moved/flushed to the work buffer (Workbuf).
159 // The size of an intermediate buffer is very small,
160 // such as 32 or 64 elements.
161 struct PtrTarget
162 {
163         void *p;
164         uintptr ti;
165 };
166
167 struct BitTarget
168 {
169         void *p;
170         uintptr ti;
171         uintptr *bitp, shift;
172 };
173
174 struct BufferList
175 {
176         struct PtrTarget ptrtarget[IntermediateBufferCapacity];
177         struct BitTarget bittarget[IntermediateBufferCapacity];
178         struct BufferList *next;
179 };
180 static struct BufferList *bufferList;
181
182 static Lock lock;
183
184 // flushptrbuf moves data from the PtrTarget buffer to the work buffer.
185 // The PtrTarget buffer contains blocks irrespective of whether the blocks have been marked or scanned,
186 // while the work buffer contains blocks which have been marked
187 // and are prepared to be scanned by the garbage collector.
188 //
189 // _wp, _wbuf, _nobj are input/output parameters and are specifying the work buffer.
190 // bitbuf holds temporary data generated by this function.
191 //
192 // A simplified drawing explaining how the todo-list moves from a structure to another:
193 //
194 //     scanblock
195 //  (find pointers)
196 //    Obj ------> PtrTarget (pointer targets)
197 //     ↑          |
198 //     |          | flushptrbuf (1st part,
199 //     |          | find block start)
200 //     |          ↓
201 //     `--------- BitTarget (pointer targets and the corresponding locations in bitmap)
202 //  flushptrbuf
203 //  (2nd part, mark and enqueue)
204 static void
205 flushptrbuf(struct PtrTarget *ptrbuf, uintptr n, Obj **_wp, Workbuf **_wbuf, uintptr *_nobj, struct BitTarget *bitbuf)
206 {
207         byte *p, *arena_start, *obj;
208         uintptr size, *bitp, bits, shift, j, x, xbits, off, nobj, ti;
209         MSpan *s;
210         PageID k;
211         Obj *wp;
212         Workbuf *wbuf;
213         struct PtrTarget *ptrbuf_end;
214         struct BitTarget *bitbufpos, *bt;
215
216         arena_start = runtime_mheap.arena_start;
217
218         wp = *_wp;
219         wbuf = *_wbuf;
220         nobj = *_nobj;
221
222         ptrbuf_end = ptrbuf + n;
223
224         // If buffer is nearly full, get a new one.
225         if(wbuf == nil || nobj+n >= nelem(wbuf->obj)) {
226                 if(wbuf != nil)
227                         wbuf->nobj = nobj;
228                 wbuf = getempty(wbuf);
229                 wp = wbuf->obj;
230                 nobj = 0;
231
232                 if(n >= nelem(wbuf->obj))
233                         runtime_throw("ptrbuf has to be smaller than WorkBuf");
234         }
235
236         // TODO(atom): This block is a branch of an if-then-else statement.
237         //             The single-threaded branch may be added in a next CL.
238         {
239                 // Multi-threaded version.
240
241                 bitbufpos = bitbuf;
242
243                 while(ptrbuf < ptrbuf_end) {
244                         obj = ptrbuf->p;
245                         ti = ptrbuf->ti;
246                         ptrbuf++;
247
248                         // obj belongs to interval [mheap.arena_start, mheap.arena_used).
249                         if(Debug > 1) {
250                                 if(obj < runtime_mheap.arena_start || obj >= runtime_mheap.arena_used)
251                                         runtime_throw("object is outside of mheap");
252                         }
253
254                         // obj may be a pointer to a live object.
255                         // Try to find the beginning of the object.
256
257                         // Round down to word boundary.
258                         if(((uintptr)obj & ((uintptr)PtrSize-1)) != 0) {
259                                 obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
260                                 ti = 0;
261                         }
262
263                         // Find bits for this word.
264                         off = (uintptr*)obj - (uintptr*)arena_start;
265                         bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
266                         shift = off % wordsPerBitmapWord;
267                         xbits = *bitp;
268                         bits = xbits >> shift;
269
270                         // Pointing at the beginning of a block?
271                         if((bits & (bitAllocated|bitBlockBoundary)) != 0)
272                                 goto found;
273
274                         ti = 0;
275
276                         // Pointing just past the beginning?
277                         // Scan backward a little to find a block boundary.
278                         for(j=shift; j-->0; ) {
279                                 if(((xbits>>j) & (bitAllocated|bitBlockBoundary)) != 0) {
280                                         obj = (byte*)obj - (shift-j)*PtrSize;
281                                         shift = j;
282                                         bits = xbits>>shift;
283                                         goto found;
284                                 }
285                         }
286
287                         // Otherwise consult span table to find beginning.
288                         // (Manually inlined copy of MHeap_LookupMaybe.)
289                         k = (uintptr)obj>>PageShift;
290                         x = k;
291                         if(sizeof(void*) == 8)
292                                 x -= (uintptr)arena_start>>PageShift;
293                         s = runtime_mheap.map[x];
294                         if(s == nil || k < s->start || k - s->start >= s->npages || s->state != MSpanInUse)
295                                 continue;
296                         p = (byte*)((uintptr)s->start<<PageShift);
297                         if(s->sizeclass == 0) {
298                                 obj = p;
299                         } else {
300                                 if((byte*)obj >= (byte*)s->limit)
301                                         continue;
302                                 size = s->elemsize;
303                                 int32 i = ((byte*)obj - p)/size;
304                                 obj = p+i*size;
305                         }
306
307                         // Now that we know the object header, reload bits.
308                         off = (uintptr*)obj - (uintptr*)arena_start;
309                         bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
310                         shift = off % wordsPerBitmapWord;
311                         xbits = *bitp;
312                         bits = xbits >> shift;
313
314                 found:
315                         // Now we have bits, bitp, and shift correct for
316                         // obj pointing at the base of the object.
317                         // Only care about allocated and not marked.
318                         if((bits & (bitAllocated|bitMarked)) != bitAllocated)
319                                 continue;
320
321                         *bitbufpos = (struct BitTarget){obj, ti, bitp, shift};
322                         bitbufpos++;
323                 }
324
325                 runtime_lock(&lock);
326                 for(bt=bitbuf; bt<bitbufpos; bt++){
327                         xbits = *bt->bitp;
328                         bits = xbits >> bt->shift;
329                         if((bits & bitMarked) != 0)
330                                 continue;
331
332                         // Mark the block
333                         *bt->bitp = xbits | (bitMarked << bt->shift);
334
335                         // If object has no pointers, don't need to scan further.
336                         if((bits & bitNoPointers) != 0)
337                                 continue;
338
339                         obj = bt->p;
340
341                         // Ask span about size class.
342                         // (Manually inlined copy of MHeap_Lookup.)
343                         x = (uintptr)obj >> PageShift;
344                         if(sizeof(void*) == 8)
345                                 x -= (uintptr)arena_start>>PageShift;
346                         s = runtime_mheap.map[x];
347
348                         PREFETCH(obj);
349
350                         *wp = (Obj){obj, s->elemsize, bt->ti};
351                         wp++;
352                         nobj++;
353                 }
354                 runtime_unlock(&lock);
355
356                 // If another proc wants a pointer, give it some.
357                 if(work.nwait > 0 && nobj > handoffThreshold && work.full == 0) {
358                         wbuf->nobj = nobj;
359                         wbuf = handoff(wbuf);
360                         nobj = wbuf->nobj;
361                         wp = wbuf->obj + nobj;
362                 }
363         }
364
365         *_wp = wp;
366         *_wbuf = wbuf;
367         *_nobj = nobj;
368 }
369
370 // Program that scans the whole block and treats every block element as a potential pointer
371 static uintptr defaultProg[2] = {PtrSize, GC_DEFAULT_PTR};
372
373 // scanblock scans a block of n bytes starting at pointer b for references
374 // to other objects, scanning any it finds recursively until there are no
375 // unscanned objects left.  Instead of using an explicit recursion, it keeps
376 // a work list in the Workbuf* structures and loops in the main function
377 // body.  Keeping an explicit work list is easier on the stack allocator and
378 // more efficient.
379 //
380 // wbuf: current work buffer
381 // wp:   storage for next queued pointer (write pointer)
382 // nobj: number of queued objects
383 static void
384 scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
385 {
386         byte *b, *arena_start, *arena_used;
387         uintptr n, i, end_b;
388         void *obj;
389
390         // TODO(atom): to be expanded in a next CL
391         struct Frame {uintptr count, b; uintptr *loop_or_ret;};
392         struct Frame stack_top;
393
394         uintptr *pc;
395
396         struct BufferList *scanbuffers;
397         struct PtrTarget *ptrbuf, *ptrbuf_end;
398         struct BitTarget *bitbuf;
399
400         struct PtrTarget *ptrbufpos;
401
402         // End of local variable declarations.
403
404         if(sizeof(Workbuf) % PageSize != 0)
405                 runtime_throw("scanblock: size of Workbuf is suboptimal");
406
407         // Memory arena parameters.
408         arena_start = runtime_mheap.arena_start;
409         arena_used = runtime_mheap.arena_used;
410
411         // Allocate ptrbuf, bitbuf
412         {
413                 runtime_lock(&lock);
414
415                 if(bufferList == nil) {
416                         bufferList = runtime_SysAlloc(sizeof(*bufferList));
417                         bufferList->next = nil;
418                 }
419                 scanbuffers = bufferList;
420                 bufferList = bufferList->next;
421
422                 ptrbuf = &scanbuffers->ptrtarget[0];
423                 ptrbuf_end = &scanbuffers->ptrtarget[0] + nelem(scanbuffers->ptrtarget);
424                 bitbuf = &scanbuffers->bittarget[0];
425
426                 runtime_unlock(&lock);
427         }
428
429         ptrbufpos = ptrbuf;
430
431         goto next_block;
432
433         for(;;) {
434                 // Each iteration scans the block b of length n, queueing pointers in
435                 // the work buffer.
436                 if(Debug > 1) {
437                         runtime_printf("scanblock %p %D\n", b, (int64)n);
438                 }
439
440                 // TODO(atom): to be replaced in a next CL
441                 pc = defaultProg;
442
443                 pc++;
444                 stack_top.b = (uintptr)b;
445
446                 end_b = (uintptr)b + n - PtrSize;
447
448         next_instr:
449                 // TODO(atom): to be expanded in a next CL
450                 switch(pc[0]) {
451                 case GC_DEFAULT_PTR:
452                         while(true) {
453                                 i = stack_top.b;
454                                 if(i > end_b)
455                                         goto next_block;
456                                 stack_top.b += PtrSize;
457
458                                 obj = *(byte**)i;
459                                 if((byte*)obj >= arena_start && (byte*)obj < arena_used) {
460                                         *ptrbufpos = (struct PtrTarget){obj, 0};
461                                         ptrbufpos++;
462                                         if(ptrbufpos == ptrbuf_end)
463                                                 goto flush_buffers;
464                                 }
465                         }
466
467                 default:
468                         runtime_throw("scanblock: invalid GC instruction");
469                         return;
470                 }
471
472         flush_buffers:
473                 flushptrbuf(ptrbuf, ptrbufpos-ptrbuf, &wp, &wbuf, &nobj, bitbuf);
474                 ptrbufpos = ptrbuf;
475                 goto next_instr;
476
477         next_block:
478                 // Done scanning [b, b+n).  Prepare for the next iteration of
479                 // the loop by setting b, n to the parameters for the next block.
480
481                 if(nobj == 0) {
482                         flushptrbuf(ptrbuf, ptrbufpos-ptrbuf, &wp, &wbuf, &nobj, bitbuf);
483                         ptrbufpos = ptrbuf;
484
485                         if(nobj == 0) {
486                                 if(!keepworking) {
487                                         if(wbuf)
488                                                 putempty(wbuf);
489                                         goto endscan;
490                                 }
491                                 // Emptied our buffer: refill.
492                                 wbuf = getfull(wbuf);
493                                 if(wbuf == nil)
494                                         goto endscan;
495                                 nobj = wbuf->nobj;
496                                 wp = wbuf->obj + wbuf->nobj;
497                         }
498                 }
499
500                 // Fetch b from the work buffer.
501                 --wp;
502                 b = wp->p;
503                 n = wp->n;
504                 nobj--;
505         }
506
507 endscan:
508         runtime_lock(&lock);
509         scanbuffers->next = bufferList;
510         bufferList = scanbuffers;
511         runtime_unlock(&lock);
512 }
513
514 // debug_scanblock is the debug copy of scanblock.
515 // it is simpler, slower, single-threaded, recursive,
516 // and uses bitSpecial as the mark bit.
517 static void
518 debug_scanblock(byte *b, uintptr n)
519 {
520         byte *obj, *p;
521         void **vp;
522         uintptr size, *bitp, bits, shift, i, xbits, off;
523         MSpan *s;
524
525         if(!DebugMark)
526                 runtime_throw("debug_scanblock without DebugMark");
527
528         if((intptr)n < 0) {
529                 runtime_printf("debug_scanblock %p %D\n", b, (int64)n);
530                 runtime_throw("debug_scanblock");
531         }
532
533         // Align b to a word boundary.
534         off = (uintptr)b & (PtrSize-1);
535         if(off != 0) {
536                 b += PtrSize - off;
537                 n -= PtrSize - off;
538         }
539
540         vp = (void**)b;
541         n /= PtrSize;
542         for(i=0; i<(uintptr)n; i++) {
543                 obj = (byte*)vp[i];
544
545                 // Words outside the arena cannot be pointers.
546                 if((byte*)obj < runtime_mheap.arena_start || (byte*)obj >= runtime_mheap.arena_used)
547                         continue;
548
549                 // Round down to word boundary.
550                 obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
551
552                 // Consult span table to find beginning.
553                 s = runtime_MHeap_LookupMaybe(&runtime_mheap, obj);
554                 if(s == nil)
555                         continue;
556
557                 p =  (byte*)((uintptr)s->start<<PageShift);
558                 size = s->elemsize;
559                 if(s->sizeclass == 0) {
560                         obj = p;
561                 } else {
562                         if((byte*)obj >= (byte*)s->limit)
563                                 continue;
564                         int32 i = ((byte*)obj - p)/size;
565                         obj = p+i*size;
566                 }
567
568                 // Now that we know the object header, reload bits.
569                 off = (uintptr*)obj - (uintptr*)runtime_mheap.arena_start;
570                 bitp = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
571                 shift = off % wordsPerBitmapWord;
572                 xbits = *bitp;
573                 bits = xbits >> shift;
574
575                 // Now we have bits, bitp, and shift correct for
576                 // obj pointing at the base of the object.
577                 // If not allocated or already marked, done.
578                 if((bits & bitAllocated) == 0 || (bits & bitSpecial) != 0)  // NOTE: bitSpecial not bitMarked
579                         continue;
580                 *bitp |= bitSpecial<<shift;
581                 if(!(bits & bitMarked))
582                         runtime_printf("found unmarked block %p in %p\n", obj, vp+i);
583
584                 // If object has no pointers, don't need to scan further.
585                 if((bits & bitNoPointers) != 0)
586                         continue;
587
588                 debug_scanblock(obj, size);
589         }
590 }
591
592 // Append obj to the work buffer.
593 // _wbuf, _wp, _nobj are input/output parameters and are specifying the work buffer.
594 static void
595 enqueue(Obj obj, Workbuf **_wbuf, Obj **_wp, uintptr *_nobj)
596 {
597         uintptr nobj, off;
598         Obj *wp;
599         Workbuf *wbuf;
600
601         if(Debug > 1)
602                 runtime_printf("append obj(%p %D %p)\n", obj.p, (int64)obj.n, obj.ti);
603
604         // Align obj.b to a word boundary.
605         off = (uintptr)obj.p & (PtrSize-1);
606         if(off != 0) {
607                 obj.p += PtrSize - off;
608                 obj.n -= PtrSize - off;
609                 obj.ti = 0;
610         }
611
612         if(obj.p == nil || obj.n == 0)
613                 return;
614
615         // Load work buffer state
616         wp = *_wp;
617         wbuf = *_wbuf;
618         nobj = *_nobj;
619
620         // If another proc wants a pointer, give it some.
621         if(work.nwait > 0 && nobj > handoffThreshold && work.full == 0) {
622                 wbuf->nobj = nobj;
623                 wbuf = handoff(wbuf);
624                 nobj = wbuf->nobj;
625                 wp = wbuf->obj + nobj;
626         }
627
628         // If buffer is full, get a new one.
629         if(wbuf == nil || nobj >= nelem(wbuf->obj)) {
630                 if(wbuf != nil)
631                         wbuf->nobj = nobj;
632                 wbuf = getempty(wbuf);
633                 wp = wbuf->obj;
634                 nobj = 0;
635         }
636
637         *wp = obj;
638         wp++;
639         nobj++;
640
641         // Save work buffer state
642         *_wp = wp;
643         *_wbuf = wbuf;
644         *_nobj = nobj;
645 }
646
647 static void
648 markroot(ParFor *desc, uint32 i)
649 {
650         Obj *wp;
651         Workbuf *wbuf;
652         uintptr nobj;
653
654         USED(&desc);
655         wp = nil;
656         wbuf = nil;
657         nobj = 0;
658         enqueue(work.roots[i], &wbuf, &wp, &nobj);
659         scanblock(wbuf, wp, nobj, false);
660 }
661
662 // Get an empty work buffer off the work.empty list,
663 // allocating new buffers as needed.
664 static Workbuf*
665 getempty(Workbuf *b)
666 {
667         if(b != nil)
668                 runtime_lfstackpush(&work.full, &b->node);
669         b = (Workbuf*)runtime_lfstackpop(&work.empty);
670         if(b == nil) {
671                 // Need to allocate.
672                 runtime_lock(&work);
673                 if(work.nchunk < sizeof *b) {
674                         work.nchunk = 1<<20;
675                         work.chunk = runtime_SysAlloc(work.nchunk);
676                 }
677                 b = (Workbuf*)work.chunk;
678                 work.chunk += sizeof *b;
679                 work.nchunk -= sizeof *b;
680                 runtime_unlock(&work);
681         }
682         b->nobj = 0;
683         return b;
684 }
685
686 static void
687 putempty(Workbuf *b)
688 {
689         runtime_lfstackpush(&work.empty, &b->node);
690 }
691
692 // Get a full work buffer off the work.full list, or return nil.
693 static Workbuf*
694 getfull(Workbuf *b)
695 {
696         M *m;
697         int32 i;
698
699         if(b != nil)
700                 runtime_lfstackpush(&work.empty, &b->node);
701         b = (Workbuf*)runtime_lfstackpop(&work.full);
702         if(b != nil || work.nproc == 1)
703                 return b;
704
705         m = runtime_m();
706         runtime_xadd(&work.nwait, +1);
707         for(i=0;; i++) {
708                 if(work.full != 0) {
709                         runtime_xadd(&work.nwait, -1);
710                         b = (Workbuf*)runtime_lfstackpop(&work.full);
711                         if(b != nil)
712                                 return b;
713                         runtime_xadd(&work.nwait, +1);
714                 }
715                 if(work.nwait == work.nproc)
716                         return nil;
717                 if(i < 10) {
718                         m->gcstats.nprocyield++;
719                         runtime_procyield(20);
720                 } else if(i < 20) {
721                         m->gcstats.nosyield++;
722                         runtime_osyield();
723                 } else {
724                         m->gcstats.nsleep++;
725                         runtime_usleep(100);
726                 }
727         }
728 }
729
730 static Workbuf*
731 handoff(Workbuf *b)
732 {
733         M *m;
734         int32 n;
735         Workbuf *b1;
736
737         m = runtime_m();
738
739         // Make new buffer with half of b's pointers.
740         b1 = getempty(nil);
741         n = b->nobj/2;
742         b->nobj -= n;
743         b1->nobj = n;
744         runtime_memmove(b1->obj, b->obj+b->nobj, n*sizeof b1->obj[0]);
745         m->gcstats.nhandoff++;
746         m->gcstats.nhandoffcnt += n;
747
748         // Put b on full list - let first half of b get stolen.
749         runtime_lfstackpush(&work.full, &b->node);
750         return b1;
751 }
752
753 static void
754 addroot(Obj obj)
755 {
756         uint32 cap;
757         Obj *new;
758
759         if(work.nroot >= work.rootcap) {
760                 cap = PageSize/sizeof(Obj);
761                 if(cap < 2*work.rootcap)
762                         cap = 2*work.rootcap;
763                 new = (Obj*)runtime_SysAlloc(cap*sizeof(Obj));
764                 if(work.roots != nil) {
765                         runtime_memmove(new, work.roots, work.rootcap*sizeof(Obj));
766                         runtime_SysFree(work.roots, work.rootcap*sizeof(Obj));
767                 }
768                 work.roots = new;
769                 work.rootcap = cap;
770         }
771         work.roots[work.nroot] = obj;
772         work.nroot++;
773 }
774
775 static void
776 addstackroots(G *gp)
777 {
778 #ifdef USING_SPLIT_STACK
779         M *mp;
780         void* sp;
781         size_t spsize;
782         void* next_segment;
783         void* next_sp;
784         void* initial_sp;
785
786         if(gp == runtime_g()) {
787                 // Scanning our own stack.
788                 sp = __splitstack_find(nil, nil, &spsize, &next_segment,
789                                        &next_sp, &initial_sp);
790         } else if((mp = gp->m) != nil && mp->helpgc) {
791                 // gchelper's stack is in active use and has no interesting pointers.
792                 return;
793         } else {
794                 // Scanning another goroutine's stack.
795                 // The goroutine is usually asleep (the world is stopped).
796
797                 // The exception is that if the goroutine is about to enter or might
798                 // have just exited a system call, it may be executing code such
799                 // as schedlock and may have needed to start a new stack segment.
800                 // Use the stack segment and stack pointer at the time of
801                 // the system call instead, since that won't change underfoot.
802                 if(gp->gcstack != nil) {
803                         sp = gp->gcstack;
804                         spsize = gp->gcstack_size;
805                         next_segment = gp->gcnext_segment;
806                         next_sp = gp->gcnext_sp;
807                         initial_sp = gp->gcinitial_sp;
808                 } else {
809                         sp = __splitstack_find_context(&gp->stack_context[0],
810                                                        &spsize, &next_segment,
811                                                        &next_sp, &initial_sp);
812                 }
813         }
814         if(sp != nil) {
815                 addroot((Obj){sp, spsize, 0});
816                 while((sp = __splitstack_find(next_segment, next_sp,
817                                               &spsize, &next_segment,
818                                               &next_sp, &initial_sp)) != nil)
819                         addroot((Obj){sp, spsize, 0});
820         }
821 #else
822         M *mp;
823         byte* bottom;
824         byte* top;
825
826         if(gp == runtime_g()) {
827                 // Scanning our own stack.
828                 bottom = (byte*)&gp;
829         } else if((mp = gp->m) != nil && mp->helpgc) {
830                 // gchelper's stack is in active use and has no interesting pointers.
831                 return;
832         } else {
833                 // Scanning another goroutine's stack.
834                 // The goroutine is usually asleep (the world is stopped).
835                 bottom = (byte*)gp->gcnext_sp;
836                 if(bottom == nil)
837                         return;
838         }
839         top = (byte*)gp->gcinitial_sp + gp->gcstack_size;
840         if(top > bottom)
841                 addroot((Obj){bottom, top - bottom, 0});
842         else
843                 addroot((Obj){top, bottom - top, 0});
844 #endif
845 }
846
847 static void
848 addfinroots(void *v)
849 {
850         uintptr size;
851
852         size = 0;
853         if(!runtime_mlookup(v, (byte**)&v, &size, nil) || !runtime_blockspecial(v))
854                 runtime_throw("mark - finalizer inconsistency");
855
856         // do not mark the finalizer block itself.  just mark the things it points at.
857         addroot((Obj){v, size, 0});
858 }
859
860 static struct root_list* roots;
861
862 void
863 __go_register_gc_roots (struct root_list* r)
864 {
865         // FIXME: This needs locking if multiple goroutines can call
866         // dlopen simultaneously.
867         r->next = roots;
868         roots = r;
869 }
870
871 static void
872 addroots(void)
873 {
874         struct root_list *pl;
875         G *gp;
876         FinBlock *fb;
877         MSpan *s, **allspans;
878         uint32 spanidx;
879
880         work.nroot = 0;
881
882         // mark data+bss.
883         for(pl = roots; pl != nil; pl = pl->next) {
884                 struct root* pr = &pl->roots[0];
885                 while(1) {
886                         void *decl = pr->decl;
887                         if(decl == nil)
888                                 break;
889                         addroot((Obj){decl, pr->size, 0});
890                         pr++;
891                 }
892         }
893
894         addroot((Obj){(byte*)&runtime_m0, sizeof runtime_m0, 0});
895         addroot((Obj){(byte*)&runtime_g0, sizeof runtime_g0, 0});
896         addroot((Obj){(byte*)&runtime_allg, sizeof runtime_allg, 0});
897         addroot((Obj){(byte*)&runtime_allm, sizeof runtime_allm, 0});
898         runtime_MProf_Mark(addroot);
899         runtime_time_scan(addroot);
900         runtime_trampoline_scan(addroot);
901
902         // MSpan.types
903         allspans = runtime_mheap.allspans;
904         for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
905                 s = allspans[spanidx];
906                 if(s->state == MSpanInUse) {
907                         switch(s->types.compression) {
908                         case MTypes_Empty:
909                         case MTypes_Single:
910                                 break;
911                         case MTypes_Words:
912                         case MTypes_Bytes:
913                                 // TODO(atom): consider using defaultProg instead of 0
914                                 addroot((Obj){(byte*)&s->types.data, sizeof(void*), 0});
915                                 break;
916                         }
917                 }
918         }
919
920         // stacks
921         for(gp=runtime_allg; gp!=nil; gp=gp->alllink) {
922                 switch(gp->status){
923                 default:
924                         runtime_printf("unexpected G.status %d\n", gp->status);
925                         runtime_throw("mark - bad status");
926                 case Gdead:
927                         break;
928                 case Grunning:
929                         if(gp != runtime_g())
930                                 runtime_throw("mark - world not stopped");
931                         addstackroots(gp);
932                         break;
933                 case Grunnable:
934                 case Gsyscall:
935                 case Gwaiting:
936                         addstackroots(gp);
937                         break;
938                 }
939         }
940
941         runtime_walkfintab(addfinroots, addroot);
942
943         for(fb=allfin; fb; fb=fb->alllink)
944                 addroot((Obj){(byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]), 0});
945
946         addroot((Obj){(byte*)&work, sizeof work, 0});
947 }
948
949 static bool
950 handlespecial(byte *p, uintptr size)
951 {
952         void (*fn)(void*);
953         const struct __go_func_type *ft;
954         FinBlock *block;
955         Finalizer *f;
956         
957         if(!runtime_getfinalizer(p, true, &fn, &ft)) {
958                 runtime_setblockspecial(p, false);
959                 runtime_MProf_Free(p, size);
960                 return false;
961         }
962
963         runtime_lock(&finlock);
964         if(finq == nil || finq->cnt == finq->cap) {
965                 if(finc == nil) {
966                         finc = runtime_SysAlloc(PageSize);
967                         finc->cap = (PageSize - sizeof(FinBlock)) / sizeof(Finalizer) + 1;
968                         finc->alllink = allfin;
969                         allfin = finc;
970                 }
971                 block = finc;
972                 finc = block->next;
973                 block->next = finq;
974                 finq = block;
975         }
976         f = &finq->fin[finq->cnt];
977         finq->cnt++;
978         f->fn = fn;
979         f->ft = ft;
980         f->arg = p;
981         runtime_unlock(&finlock);
982         return true;
983 }
984
985 // Sweep frees or collects finalizers for blocks not marked in the mark phase.
986 // It clears the mark bits in preparation for the next GC round.
987 static void
988 sweepspan(ParFor *desc, uint32 idx)
989 {
990         M *m;
991         int32 cl, n, npages;
992         uintptr size;
993         byte *p;
994         MCache *c;
995         byte *arena_start;
996         MLink head, *end;
997         int32 nfree;
998         byte *type_data;
999         byte compression;
1000         uintptr type_data_inc;
1001         MSpan *s;
1002
1003         m = runtime_m();
1004
1005         USED(&desc);
1006         s = runtime_mheap.allspans[idx];
1007         // Stamp newly unused spans. The scavenger will use that
1008         // info to potentially give back some pages to the OS.
1009         if(s->state == MSpanFree && s->unusedsince == 0)
1010                 s->unusedsince = runtime_nanotime();
1011         if(s->state != MSpanInUse)
1012                 return;
1013         arena_start = runtime_mheap.arena_start;
1014         p = (byte*)(s->start << PageShift);
1015         cl = s->sizeclass;
1016         size = s->elemsize;
1017         if(cl == 0) {
1018                 n = 1;
1019         } else {
1020                 // Chunk full of small blocks.
1021                 npages = runtime_class_to_allocnpages[cl];
1022                 n = (npages << PageShift) / size;
1023         }
1024         nfree = 0;
1025         end = &head;
1026         c = m->mcache;
1027         
1028         type_data = (byte*)s->types.data;
1029         type_data_inc = sizeof(uintptr);
1030         compression = s->types.compression;
1031         switch(compression) {
1032         case MTypes_Bytes:
1033                 type_data += 8*sizeof(uintptr);
1034                 type_data_inc = 1;
1035                 break;
1036         }
1037
1038         // Sweep through n objects of given size starting at p.
1039         // This thread owns the span now, so it can manipulate
1040         // the block bitmap without atomic operations.
1041         for(; n > 0; n--, p += size, type_data+=type_data_inc) {
1042                 uintptr off, *bitp, shift, bits;
1043
1044                 off = (uintptr*)p - (uintptr*)arena_start;
1045                 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
1046                 shift = off % wordsPerBitmapWord;
1047                 bits = *bitp>>shift;
1048
1049                 if((bits & bitAllocated) == 0)
1050                         continue;
1051
1052                 if((bits & bitMarked) != 0) {
1053                         if(DebugMark) {
1054                                 if(!(bits & bitSpecial))
1055                                         runtime_printf("found spurious mark on %p\n", p);
1056                                 *bitp &= ~(bitSpecial<<shift);
1057                         }
1058                         *bitp &= ~(bitMarked<<shift);
1059                         continue;
1060                 }
1061
1062                 // Special means it has a finalizer or is being profiled.
1063                 // In DebugMark mode, the bit has been coopted so
1064                 // we have to assume all blocks are special.
1065                 if(DebugMark || (bits & bitSpecial) != 0) {
1066                         if(handlespecial(p, size))
1067                                 continue;
1068                 }
1069
1070                 // Mark freed; restore block boundary bit.
1071                 *bitp = (*bitp & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
1072
1073                 if(cl == 0) {
1074                         // Free large span.
1075                         runtime_unmarkspan(p, 1<<PageShift);
1076                         *(uintptr*)p = 1;       // needs zeroing
1077                         runtime_MHeap_Free(&runtime_mheap, s, 1);
1078                         c->local_alloc -= size;
1079                         c->local_nfree++;
1080                 } else {
1081                         // Free small object.
1082                         switch(compression) {
1083                         case MTypes_Words:
1084                                 *(uintptr*)type_data = 0;
1085                                 break;
1086                         case MTypes_Bytes:
1087                                 *(byte*)type_data = 0;
1088                                 break;
1089                         }
1090                         if(size > sizeof(uintptr))
1091                                 ((uintptr*)p)[1] = 1;   // mark as "needs to be zeroed"
1092                         
1093                         end->next = (MLink*)p;
1094                         end = (MLink*)p;
1095                         nfree++;
1096                 }
1097         }
1098
1099         if(nfree) {
1100                 c->local_by_size[cl].nfree += nfree;
1101                 c->local_alloc -= size * nfree;
1102                 c->local_nfree += nfree;
1103                 c->local_cachealloc -= nfree * size;
1104                 c->local_objects -= nfree;
1105                 runtime_MCentral_FreeSpan(&runtime_mheap.central[cl], s, nfree, head.next, end);
1106         }
1107 }
1108
1109 static void
1110 dumpspan(uint32 idx)
1111 {
1112         int32 sizeclass, n, npages, i, column;
1113         uintptr size;
1114         byte *p;
1115         byte *arena_start;
1116         MSpan *s;
1117         bool allocated, special;
1118
1119         s = runtime_mheap.allspans[idx];
1120         if(s->state != MSpanInUse)
1121                 return;
1122         arena_start = runtime_mheap.arena_start;
1123         p = (byte*)(s->start << PageShift);
1124         sizeclass = s->sizeclass;
1125         size = s->elemsize;
1126         if(sizeclass == 0) {
1127                 n = 1;
1128         } else {
1129                 npages = runtime_class_to_allocnpages[sizeclass];
1130                 n = (npages << PageShift) / size;
1131         }
1132         
1133         runtime_printf("%p .. %p:\n", p, p+n*size);
1134         column = 0;
1135         for(; n>0; n--, p+=size) {
1136                 uintptr off, *bitp, shift, bits;
1137
1138                 off = (uintptr*)p - (uintptr*)arena_start;
1139                 bitp = (uintptr*)arena_start - off/wordsPerBitmapWord - 1;
1140                 shift = off % wordsPerBitmapWord;
1141                 bits = *bitp>>shift;
1142
1143                 allocated = ((bits & bitAllocated) != 0);
1144                 special = ((bits & bitSpecial) != 0);
1145
1146                 for(i=0; (uint32)i<size; i+=sizeof(void*)) {
1147                         if(column == 0) {
1148                                 runtime_printf("\t");
1149                         }
1150                         if(i == 0) {
1151                                 runtime_printf(allocated ? "(" : "[");
1152                                 runtime_printf(special ? "@" : "");
1153                                 runtime_printf("%p: ", p+i);
1154                         } else {
1155                                 runtime_printf(" ");
1156                         }
1157
1158                         runtime_printf("%p", *(void**)(p+i));
1159
1160                         if(i+sizeof(void*) >= size) {
1161                                 runtime_printf(allocated ? ") " : "] ");
1162                         }
1163
1164                         column++;
1165                         if(column == 8) {
1166                                 runtime_printf("\n");
1167                                 column = 0;
1168                         }
1169                 }
1170         }
1171         runtime_printf("\n");
1172 }
1173
1174 // A debugging function to dump the contents of memory
1175 void
1176 runtime_memorydump(void)
1177 {
1178         uint32 spanidx;
1179
1180         for(spanidx=0; spanidx<runtime_mheap.nspan; spanidx++) {
1181                 dumpspan(spanidx);
1182         }
1183 }
1184
1185 void
1186 runtime_gchelper(void)
1187 {
1188         // parallel mark for over gc roots
1189         runtime_parfordo(work.markfor);
1190
1191         // help other threads scan secondary blocks
1192         scanblock(nil, nil, 0, true);
1193
1194         if(DebugMark) {
1195                 // wait while the main thread executes mark(debug_scanblock)
1196                 while(runtime_atomicload(&work.debugmarkdone) == 0)
1197                         runtime_usleep(10);
1198         }
1199
1200         runtime_parfordo(work.sweepfor);
1201         if(runtime_xadd(&work.ndone, +1) == work.nproc-1)
1202                 runtime_notewakeup(&work.alldone);
1203 }
1204
1205 // Initialized from $GOGC.  GOGC=off means no gc.
1206 //
1207 // Next gc is after we've allocated an extra amount of
1208 // memory proportional to the amount already in use.
1209 // If gcpercent=100 and we're using 4M, we'll gc again
1210 // when we get to 8M.  This keeps the gc cost in linear
1211 // proportion to the allocation cost.  Adjusting gcpercent
1212 // just changes the linear constant (and also the amount of
1213 // extra memory used).
1214 static int32 gcpercent = -2;
1215
1216 static void
1217 stealcache(void)
1218 {
1219         M *mp;
1220
1221         for(mp=runtime_allm; mp; mp=mp->alllink)
1222                 runtime_MCache_ReleaseAll(mp->mcache);
1223 }
1224
1225 static void
1226 cachestats(GCStats *stats)
1227 {
1228         M *mp;
1229         MCache *c;
1230         uint32 i;
1231         uint64 stacks_inuse;
1232         uint64 stacks_sys;
1233         uint64 *src, *dst;
1234
1235         if(stats)
1236                 runtime_memclr((byte*)stats, sizeof(*stats));
1237         stacks_inuse = 0;
1238         stacks_sys = runtime_stacks_sys;
1239         for(mp=runtime_allm; mp; mp=mp->alllink) {
1240                 c = mp->mcache;
1241                 runtime_purgecachedstats(c);
1242                 // stacks_inuse += mp->stackalloc->inuse;
1243                 // stacks_sys += mp->stackalloc->sys;
1244                 if(stats) {
1245                         src = (uint64*)&mp->gcstats;
1246                         dst = (uint64*)stats;
1247                         for(i=0; i<sizeof(*stats)/sizeof(uint64); i++)
1248                                 dst[i] += src[i];
1249                         runtime_memclr((byte*)&mp->gcstats, sizeof(mp->gcstats));
1250                 }
1251                 for(i=0; i<nelem(c->local_by_size); i++) {
1252                         mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc;
1253                         c->local_by_size[i].nmalloc = 0;
1254                         mstats.by_size[i].nfree += c->local_by_size[i].nfree;
1255                         c->local_by_size[i].nfree = 0;
1256                 }
1257         }
1258         mstats.stacks_inuse = stacks_inuse;
1259         mstats.stacks_sys = stacks_sys;
1260 }
1261
1262 // Structure of arguments passed to function gc().
1263 // This allows the arguments to be passed via reflect_call.
1264 struct gc_args
1265 {
1266         int32 force;
1267 };
1268
1269 static void gc(struct gc_args *args);
1270
1271 void
1272 runtime_gc(int32 force)
1273 {
1274         M *m;
1275         const byte *p;
1276         struct gc_args a, *ap;
1277
1278         // The atomic operations are not atomic if the uint64s
1279         // are not aligned on uint64 boundaries. This has been
1280         // a problem in the past.
1281         if((((uintptr)&work.empty) & 7) != 0)
1282                 runtime_throw("runtime: gc work buffer is misaligned");
1283
1284         // Make sure all registers are saved on stack so that
1285         // scanstack sees them.
1286         __builtin_unwind_init();
1287
1288         // The gc is turned off (via enablegc) until
1289         // the bootstrap has completed.
1290         // Also, malloc gets called in the guts
1291         // of a number of libraries that might be
1292         // holding locks.  To avoid priority inversion
1293         // problems, don't bother trying to run gc
1294         // while holding a lock.  The next mallocgc
1295         // without a lock will do the gc instead.
1296         m = runtime_m();
1297         if(!mstats.enablegc || m->locks > 0 || runtime_panicking)
1298                 return;
1299
1300         if(gcpercent == -2) {   // first time through
1301                 p = runtime_getenv("GOGC");
1302                 if(p == nil || p[0] == '\0')
1303                         gcpercent = 100;
1304                 else if(runtime_strcmp((const char*)p, "off") == 0)
1305                         gcpercent = -1;
1306                 else
1307                         gcpercent = runtime_atoi(p);
1308
1309                 p = runtime_getenv("GOGCTRACE");
1310                 if(p != nil)
1311                         gctrace = runtime_atoi(p);
1312         }
1313         if(gcpercent < 0)
1314                 return;
1315
1316         // Run gc on a bigger stack to eliminate
1317         // a potentially large number of calls to runtime_morestack.
1318         // But not when using gccgo.
1319         a.force = force;
1320         ap = &a;
1321         gc(ap);
1322
1323         if(gctrace > 1 && !force) {
1324                 a.force = 1;
1325                 gc(&a);
1326         }
1327 }
1328
1329 static void
1330 gc(struct gc_args *args)
1331 {
1332         M *m;
1333         int64 t0, t1, t2, t3;
1334         uint64 heap0, heap1, obj0, obj1;
1335         GCStats stats;
1336         M *mp;
1337         uint32 i;
1338
1339         runtime_semacquire(&runtime_worldsema);
1340         if(!args->force && mstats.heap_alloc < mstats.next_gc) {
1341                 runtime_semrelease(&runtime_worldsema);
1342                 return;
1343         }
1344
1345         m = runtime_m();
1346
1347         t0 = runtime_nanotime();
1348
1349         m->gcing = 1;
1350         runtime_stoptheworld();
1351
1352         for(mp=runtime_allm; mp; mp=mp->alllink)
1353                 runtime_settype_flush(mp, false);
1354
1355         heap0 = 0;
1356         obj0 = 0;
1357         if(gctrace) {
1358                 cachestats(nil);
1359                 heap0 = mstats.heap_alloc;
1360                 obj0 = mstats.nmalloc - mstats.nfree;
1361         }
1362
1363         m->locks++;     // disable gc during mallocs in parforalloc
1364         if(work.markfor == nil)
1365                 work.markfor = runtime_parforalloc(MaxGcproc);
1366         if(work.sweepfor == nil)
1367                 work.sweepfor = runtime_parforalloc(MaxGcproc);
1368         m->locks--;
1369
1370         work.nwait = 0;
1371         work.ndone = 0;
1372         work.debugmarkdone = 0;
1373         work.nproc = runtime_gcprocs();
1374         addroots();
1375         runtime_parforsetup(work.markfor, work.nproc, work.nroot, nil, false, markroot);
1376         runtime_parforsetup(work.sweepfor, work.nproc, runtime_mheap.nspan, nil, true, sweepspan);
1377         if(work.nproc > 1) {
1378                 runtime_noteclear(&work.alldone);
1379                 runtime_helpgc(work.nproc);
1380         }
1381
1382         runtime_parfordo(work.markfor);
1383         scanblock(nil, nil, 0, true);
1384
1385         if(DebugMark) {
1386                 for(i=0; i<work.nroot; i++)
1387                         debug_scanblock(work.roots[i].p, work.roots[i].n);
1388                 runtime_atomicstore(&work.debugmarkdone, 1);
1389         }
1390         t1 = runtime_nanotime();
1391
1392         runtime_parfordo(work.sweepfor);
1393         t2 = runtime_nanotime();
1394
1395         stealcache();
1396         cachestats(&stats);
1397
1398         if(work.nproc > 1)
1399                 runtime_notesleep(&work.alldone);
1400
1401         stats.nprocyield += work.sweepfor->nprocyield;
1402         stats.nosyield += work.sweepfor->nosyield;
1403         stats.nsleep += work.sweepfor->nsleep;
1404
1405         mstats.next_gc = mstats.heap_alloc+(mstats.heap_alloc-runtime_stacks_sys)*gcpercent/100;
1406         m->gcing = 0;
1407
1408         if(finq != nil) {
1409                 m->locks++;     // disable gc during the mallocs in newproc
1410                 // kick off or wake up goroutine to run queued finalizers
1411                 if(fing == nil)
1412                         fing = __go_go(runfinq, nil);
1413                 else if(fingwait) {
1414                         fingwait = 0;
1415                         runtime_ready(fing);
1416                 }
1417                 m->locks--;
1418         }
1419
1420         heap1 = mstats.heap_alloc;
1421         obj1 = mstats.nmalloc - mstats.nfree;
1422
1423         t3 = runtime_nanotime();
1424         mstats.last_gc = t3;
1425         mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t3 - t0;
1426         mstats.pause_total_ns += t3 - t0;
1427         mstats.numgc++;
1428         if(mstats.debuggc)
1429                 runtime_printf("pause %D\n", t3-t0);
1430
1431         if(gctrace) {
1432                 runtime_printf("gc%d(%d): %D+%D+%D ms, %D -> %D MB %D -> %D (%D-%D) objects,"
1433                                 " %D(%D) handoff, %D(%D) steal, %D/%D/%D yields\n",
1434                         mstats.numgc, work.nproc, (t1-t0)/1000000, (t2-t1)/1000000, (t3-t2)/1000000,
1435                         heap0>>20, heap1>>20, obj0, obj1,
1436                         mstats.nmalloc, mstats.nfree,
1437                         stats.nhandoff, stats.nhandoffcnt,
1438                         work.sweepfor->nsteal, work.sweepfor->nstealcnt,
1439                         stats.nprocyield, stats.nosyield, stats.nsleep);
1440         }
1441
1442         runtime_MProf_GC();
1443         runtime_semrelease(&runtime_worldsema);
1444         runtime_starttheworld();
1445
1446         // give the queued finalizers, if any, a chance to run
1447         if(finq != nil)
1448                 runtime_gosched();
1449 }
1450
1451 void runtime_ReadMemStats(MStats *)
1452   __asm__ (GOSYM_PREFIX "runtime.ReadMemStats");
1453
1454 void
1455 runtime_ReadMemStats(MStats *stats)
1456 {
1457         M *m;
1458
1459         // Have to acquire worldsema to stop the world,
1460         // because stoptheworld can only be used by
1461         // one goroutine at a time, and there might be
1462         // a pending garbage collection already calling it.
1463         runtime_semacquire(&runtime_worldsema);
1464         m = runtime_m();
1465         m->gcing = 1;
1466         runtime_stoptheworld();
1467         cachestats(nil);
1468         *stats = mstats;
1469         m->gcing = 0;
1470         runtime_semrelease(&runtime_worldsema);
1471         runtime_starttheworld();
1472 }
1473
1474 static void
1475 runfinq(void* dummy __attribute__ ((unused)))
1476 {
1477         Finalizer *f;
1478         FinBlock *fb, *next;
1479         uint32 i;
1480
1481         for(;;) {
1482                 // There's no need for a lock in this section
1483                 // because it only conflicts with the garbage
1484                 // collector, and the garbage collector only
1485                 // runs when everyone else is stopped, and
1486                 // runfinq only stops at the gosched() or
1487                 // during the calls in the for loop.
1488                 fb = finq;
1489                 finq = nil;
1490                 if(fb == nil) {
1491                         fingwait = 1;
1492                         runtime_park(nil, nil, "finalizer wait");
1493                         continue;
1494                 }
1495                 if(raceenabled)
1496                         runtime_racefingo();
1497                 for(; fb; fb=next) {
1498                         next = fb->next;
1499                         for(i=0; i<(uint32)fb->cnt; i++) {
1500                                 void *params[1];
1501
1502                                 f = &fb->fin[i];
1503                                 params[0] = &f->arg;
1504                                 reflect_call(f->ft, (void*)f->fn, 0, 0, params, nil);
1505                                 f->fn = nil;
1506                                 f->arg = nil;
1507                         }
1508                         fb->cnt = 0;
1509                         fb->next = finc;
1510                         finc = fb;
1511                 }
1512                 runtime_gc(1);  // trigger another gc to clean up the finalized objects, if possible
1513         }
1514 }
1515
1516 // mark the block at v of size n as allocated.
1517 // If noptr is true, mark it as having no pointers.
1518 void
1519 runtime_markallocated(void *v, uintptr n, bool noptr)
1520 {
1521         uintptr *b, obits, bits, off, shift;
1522
1523         if(0)
1524                 runtime_printf("markallocated %p+%p\n", v, n);
1525
1526         if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1527                 runtime_throw("markallocated: bad pointer");
1528
1529         off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start;  // word offset
1530         b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1531         shift = off % wordsPerBitmapWord;
1532
1533         for(;;) {
1534                 obits = *b;
1535                 bits = (obits & ~(bitMask<<shift)) | (bitAllocated<<shift);
1536                 if(noptr)
1537                         bits |= bitNoPointers<<shift;
1538                 if(runtime_singleproc) {
1539                         *b = bits;
1540                         break;
1541                 } else {
1542                         // more than one goroutine is potentially running: use atomic op
1543                         if(runtime_casp((void**)b, (void*)obits, (void*)bits))
1544                                 break;
1545                 }
1546         }
1547 }
1548
1549 // mark the block at v of size n as freed.
1550 void
1551 runtime_markfreed(void *v, uintptr n)
1552 {
1553         uintptr *b, obits, bits, off, shift;
1554
1555         if(0)
1556                 runtime_printf("markallocated %p+%p\n", v, n);
1557
1558         if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1559                 runtime_throw("markallocated: bad pointer");
1560
1561         off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start;  // word offset
1562         b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1563         shift = off % wordsPerBitmapWord;
1564
1565         for(;;) {
1566                 obits = *b;
1567                 bits = (obits & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
1568                 if(runtime_singleproc) {
1569                         *b = bits;
1570                         break;
1571                 } else {
1572                         // more than one goroutine is potentially running: use atomic op
1573                         if(runtime_casp((void**)b, (void*)obits, (void*)bits))
1574                                 break;
1575                 }
1576         }
1577 }
1578
1579 // check that the block at v of size n is marked freed.
1580 void
1581 runtime_checkfreed(void *v, uintptr n)
1582 {
1583         uintptr *b, bits, off, shift;
1584
1585         if(!runtime_checking)
1586                 return;
1587
1588         if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1589                 return; // not allocated, so okay
1590
1591         off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start;  // word offset
1592         b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1593         shift = off % wordsPerBitmapWord;
1594
1595         bits = *b>>shift;
1596         if((bits & bitAllocated) != 0) {
1597                 runtime_printf("checkfreed %p+%p: off=%p have=%p\n",
1598                         v, n, off, bits & bitMask);
1599                 runtime_throw("checkfreed: not freed");
1600         }
1601 }
1602
1603 // mark the span of memory at v as having n blocks of the given size.
1604 // if leftover is true, there is left over space at the end of the span.
1605 void
1606 runtime_markspan(void *v, uintptr size, uintptr n, bool leftover)
1607 {
1608         uintptr *b, off, shift;
1609         byte *p;
1610
1611         if((byte*)v+size*n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1612                 runtime_throw("markspan: bad pointer");
1613
1614         p = v;
1615         if(leftover)    // mark a boundary just past end of last block too
1616                 n++;
1617         for(; n-- > 0; p += size) {
1618                 // Okay to use non-atomic ops here, because we control
1619                 // the entire span, and each bitmap word has bits for only
1620                 // one span, so no other goroutines are changing these
1621                 // bitmap words.
1622                 off = (uintptr*)p - (uintptr*)runtime_mheap.arena_start;  // word offset
1623                 b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1624                 shift = off % wordsPerBitmapWord;
1625                 *b = (*b & ~(bitMask<<shift)) | (bitBlockBoundary<<shift);
1626         }
1627 }
1628
1629 // unmark the span of memory at v of length n bytes.
1630 void
1631 runtime_unmarkspan(void *v, uintptr n)
1632 {
1633         uintptr *p, *b, off;
1634
1635         if((byte*)v+n > (byte*)runtime_mheap.arena_used || (byte*)v < runtime_mheap.arena_start)
1636                 runtime_throw("markspan: bad pointer");
1637
1638         p = v;
1639         off = p - (uintptr*)runtime_mheap.arena_start;  // word offset
1640         if(off % wordsPerBitmapWord != 0)
1641                 runtime_throw("markspan: unaligned pointer");
1642         b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1643         n /= PtrSize;
1644         if(n%wordsPerBitmapWord != 0)
1645                 runtime_throw("unmarkspan: unaligned length");
1646         // Okay to use non-atomic ops here, because we control
1647         // the entire span, and each bitmap word has bits for only
1648         // one span, so no other goroutines are changing these
1649         // bitmap words.
1650         n /= wordsPerBitmapWord;
1651         while(n-- > 0)
1652                 *b-- = 0;
1653 }
1654
1655 bool
1656 runtime_blockspecial(void *v)
1657 {
1658         uintptr *b, off, shift;
1659
1660         if(DebugMark)
1661                 return true;
1662
1663         off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start;
1664         b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1665         shift = off % wordsPerBitmapWord;
1666
1667         return (*b & (bitSpecial<<shift)) != 0;
1668 }
1669
1670 void
1671 runtime_setblockspecial(void *v, bool s)
1672 {
1673         uintptr *b, off, shift, bits, obits;
1674
1675         if(DebugMark)
1676                 return;
1677
1678         off = (uintptr*)v - (uintptr*)runtime_mheap.arena_start;
1679         b = (uintptr*)runtime_mheap.arena_start - off/wordsPerBitmapWord - 1;
1680         shift = off % wordsPerBitmapWord;
1681
1682         for(;;) {
1683                 obits = *b;
1684                 if(s)
1685                         bits = obits | (bitSpecial<<shift);
1686                 else
1687                         bits = obits & ~(bitSpecial<<shift);
1688                 if(runtime_singleproc) {
1689                         *b = bits;
1690                         break;
1691                 } else {
1692                         // more than one goroutine is potentially running: use atomic op
1693                         if(runtime_casp((void**)b, (void*)obits, (void*)bits))
1694                                 break;
1695                 }
1696         }
1697 }
1698
1699 void
1700 runtime_MHeap_MapBits(MHeap *h)
1701 {
1702         size_t page_size;
1703
1704         // Caller has added extra mappings to the arena.
1705         // Add extra mappings of bitmap words as needed.
1706         // We allocate extra bitmap pieces in chunks of bitmapChunk.
1707         enum {
1708                 bitmapChunk = 8192
1709         };
1710         uintptr n;
1711
1712         n = (h->arena_used - h->arena_start) / wordsPerBitmapWord;
1713         n = (n+bitmapChunk-1) & ~(bitmapChunk-1);
1714         if(h->bitmap_mapped >= n)
1715                 return;
1716
1717         page_size = getpagesize();
1718         n = (n+page_size-1) & ~(page_size-1);
1719
1720         runtime_SysMap(h->arena_start - n, n - h->bitmap_mapped);
1721         h->bitmap_mapped = n;
1722 }