1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // See malloc.h for overview.
9 // When a MSpan is in the heap free list, state == MSpanFree
10 // and heapmap(s->start) == span, heapmap(s->start+s->npages-1) == span.
12 // When a MSpan is allocated, state == MSpanInUse
13 // and heapmap(i) == span for all s->start <= i < s->start+s->npages.
19 static MSpan *MHeap_AllocLocked(MHeap*, uintptr, int32);
20 static bool MHeap_Grow(MHeap*, uintptr);
21 static void MHeap_FreeLocked(MHeap*, MSpan*);
22 static MSpan *MHeap_AllocLarge(MHeap*, uintptr);
23 static MSpan *BestFit(MSpan*, uintptr, MSpan*);
26 RecordSpan(void *vh, byte *p)
35 if(h->nspan >= h->nspancap) {
36 cap = 64*1024/sizeof(all[0]);
37 if(cap < h->nspancap*3/2)
38 cap = h->nspancap*3/2;
39 all = (MSpan**)runtime_SysAlloc(cap*sizeof(all[0]));
41 runtime_throw("runtime: cannot allocate memory");
43 runtime_memmove(all, h->allspans, h->nspancap*sizeof(all[0]));
44 runtime_SysFree(h->allspans, h->nspancap*sizeof(all[0]));
49 h->allspans[h->nspan++] = s;
52 // Initialize the heap; fetch memory using alloc.
54 runtime_MHeap_Init(MHeap *h, void *(*alloc)(uintptr))
58 runtime_FixAlloc_Init(&h->spanalloc, sizeof(MSpan), alloc, RecordSpan, h);
59 runtime_FixAlloc_Init(&h->cachealloc, sizeof(MCache), alloc, nil, nil);
60 // h->mapcache needs no init
61 for(i=0; i<nelem(h->free); i++)
62 runtime_MSpanList_Init(&h->free[i]);
63 runtime_MSpanList_Init(&h->large);
64 for(i=0; i<nelem(h->central); i++)
65 runtime_MCentral_Init(&h->central[i], i);
68 // Allocate a new span of npage pages from the heap
69 // and record its size class in the HeapMap and HeapMapCache.
71 runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct, int32 zeroed)
76 runtime_purgecachedstats(runtime_m()->mcache);
77 s = MHeap_AllocLocked(h, npage, sizeclass);
79 mstats.heap_inuse += npage<<PageShift;
81 mstats.heap_objects++;
82 mstats.heap_alloc += npage<<PageShift;
86 if(s != nil && *(uintptr*)(s->start<<PageShift) != 0 && zeroed)
87 runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift);
92 MHeap_AllocLocked(MHeap *h, uintptr npage, int32 sizeclass)
98 // Try in fixed-size lists up to max.
99 for(n=npage; n < nelem(h->free); n++) {
100 if(!runtime_MSpanList_IsEmpty(&h->free[n])) {
106 // Best fit in list of large spans.
107 if((s = MHeap_AllocLarge(h, npage)) == nil) {
108 if(!MHeap_Grow(h, npage))
110 if((s = MHeap_AllocLarge(h, npage)) == nil)
116 if(s->state != MSpanFree)
117 runtime_throw("MHeap_AllocLocked - MSpan not free");
118 if(s->npages < npage)
119 runtime_throw("MHeap_AllocLocked - bad npages");
120 runtime_MSpanList_Remove(s);
121 s->state = MSpanInUse;
122 mstats.heap_idle -= s->npages<<PageShift;
123 mstats.heap_released -= s->npreleased<<PageShift;
124 if(s->npreleased > 0) {
125 // We have called runtime_SysUnused with these pages, and on
126 // Unix systems it called madvise. At this point at least
127 // some BSD-based kernels will return these pages either as
128 // zeros or with the old data. For our caller, the first word
129 // in the page indicates whether the span contains zeros or
130 // not (this word was set when the span was freed by
131 // MCentral_Free or runtime_MCentral_FreeSpan). If the first
132 // page in the span is returned as zeros, and some subsequent
133 // page is returned with the old data, then we will be
134 // returning a span that is assumed to be all zeros, but the
135 // actual data will not be all zeros. Avoid that problem by
136 // explicitly marking the span as not being zeroed, just in
137 // case. The beadbead constant we use here means nothing, it
138 // is just a unique constant not seen elsewhere in the
139 // runtime, as a clue in case it turns up unexpectedly in
140 // memory or in a stack trace.
141 *(uintptr*)(s->start<<PageShift) = (uintptr)0xbeadbeadbeadbeadULL;
145 if(s->npages > npage) {
146 // Trim extra and put it back in the heap.
147 t = runtime_FixAlloc_Alloc(&h->spanalloc);
148 mstats.mspan_inuse = h->spanalloc.inuse;
149 mstats.mspan_sys = h->spanalloc.sys;
150 runtime_MSpan_Init(t, s->start + npage, s->npages - npage);
153 if(sizeof(void*) == 8)
154 p -= ((uintptr)h->arena_start>>PageShift);
158 h->map[p+t->npages-1] = t;
159 *(uintptr*)(t->start<<PageShift) = *(uintptr*)(s->start<<PageShift); // copy "needs zeroing" mark
160 t->state = MSpanInUse;
161 MHeap_FreeLocked(h, t);
162 t->unusedsince = s->unusedsince; // preserve age
166 // Record span info, because gc needs to be
167 // able to map interior pointer to containing span.
168 s->sizeclass = sizeclass;
169 s->elemsize = (sizeclass==0 ? s->npages<<PageShift : (uintptr)runtime_class_to_size[sizeclass]);
170 s->types.compression = MTypes_Empty;
172 if(sizeof(void*) == 8)
173 p -= ((uintptr)h->arena_start>>PageShift);
174 for(n=0; n<npage; n++)
179 // Allocate a span of exactly npage pages from the list of large spans.
181 MHeap_AllocLarge(MHeap *h, uintptr npage)
183 return BestFit(&h->large, npage, nil);
186 // Search list for smallest span with >= npage pages.
187 // If there are multiple smallest spans, take the one
188 // with the earliest starting address.
190 BestFit(MSpan *list, uintptr npage, MSpan *best)
194 for(s=list->next; s != list; s=s->next) {
195 if(s->npages < npage)
198 || s->npages < best->npages
199 || (s->npages == best->npages && s->start < best->start))
205 // Try to add at least npage pages of memory to the heap,
206 // returning whether it worked.
208 MHeap_Grow(MHeap *h, uintptr npage)
215 // Ask for a big chunk, to reduce the number of mappings
216 // the operating system needs to track; also amortizes
217 // the overhead of an operating system mapping.
218 // Allocate a multiple of 64kB (16 pages).
219 npage = (npage+15)&~15;
220 ask = npage<<PageShift;
221 if(ask < HeapAllocChunk)
222 ask = HeapAllocChunk;
224 v = runtime_MHeap_SysAlloc(h, ask);
226 if(ask > (npage<<PageShift)) {
227 ask = npage<<PageShift;
228 v = runtime_MHeap_SysAlloc(h, ask);
231 runtime_printf("runtime: out of memory: cannot allocate %D-byte block (%D in use)\n", (uint64)ask, mstats.heap_sys);
235 mstats.heap_sys += ask;
237 // Create a fake "in use" span and free it, so that the
238 // right coalescing happens.
239 s = runtime_FixAlloc_Alloc(&h->spanalloc);
240 mstats.mspan_inuse = h->spanalloc.inuse;
241 mstats.mspan_sys = h->spanalloc.sys;
242 runtime_MSpan_Init(s, (uintptr)v>>PageShift, ask>>PageShift);
244 if(sizeof(void*) == 8)
245 p -= ((uintptr)h->arena_start>>PageShift);
247 h->map[p + s->npages - 1] = s;
248 s->state = MSpanInUse;
249 MHeap_FreeLocked(h, s);
253 // Look up the span at the given address.
254 // Address is guaranteed to be in map
255 // and is guaranteed to be start or end of span.
257 runtime_MHeap_Lookup(MHeap *h, void *v)
262 if(sizeof(void*) == 8)
263 p -= (uintptr)h->arena_start;
264 return h->map[p >> PageShift];
267 // Look up the span at the given address.
268 // Address is *not* guaranteed to be in map
269 // and may be anywhere in the span.
270 // Map entries for the middle of a span are only
271 // valid for allocated spans. Free spans may have
272 // other garbage in their middles, so we have to
275 runtime_MHeap_LookupMaybe(MHeap *h, void *v)
280 if((byte*)v < h->arena_start || (byte*)v >= h->arena_used)
282 p = (uintptr)v>>PageShift;
284 if(sizeof(void*) == 8)
285 q -= (uintptr)h->arena_start >> PageShift;
287 if(s == nil || p < s->start || p - s->start >= s->npages)
289 if(s->state != MSpanInUse)
294 // Free the span back into the heap.
296 runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct)
299 runtime_purgecachedstats(runtime_m()->mcache);
300 mstats.heap_inuse -= s->npages<<PageShift;
302 mstats.heap_alloc -= s->npages<<PageShift;
303 mstats.heap_objects--;
305 MHeap_FreeLocked(h, s);
310 MHeap_FreeLocked(MHeap *h, MSpan *s)
316 if(s->types.sysalloc)
317 runtime_settype_sysfree(s);
318 s->types.compression = MTypes_Empty;
320 if(s->state != MSpanInUse || s->ref != 0) {
321 runtime_printf("MHeap_FreeLocked - span %p ptr %p state %d ref %d\n", s, s->start<<PageShift, s->state, s->ref);
322 runtime_throw("MHeap_FreeLocked - invalid free");
324 mstats.heap_idle += s->npages<<PageShift;
325 s->state = MSpanFree;
326 runtime_MSpanList_Remove(s);
327 sp = (uintptr*)(s->start<<PageShift);
328 // Stamp newly unused spans. The scavenger will use that
329 // info to potentially give back some pages to the OS.
330 s->unusedsince = runtime_nanotime();
333 // Coalesce with earlier, later spans.
335 if(sizeof(void*) == 8)
336 p -= (uintptr)h->arena_start >> PageShift;
337 if(p > 0 && (t = h->map[p-1]) != nil && t->state != MSpanInUse) {
338 tp = (uintptr*)(t->start<<PageShift);
339 *tp |= *sp; // propagate "needs zeroing" mark
341 s->npages += t->npages;
342 s->npreleased = t->npreleased; // absorb released pages
345 runtime_MSpanList_Remove(t);
346 t->state = MSpanDead;
347 runtime_FixAlloc_Free(&h->spanalloc, t);
348 mstats.mspan_inuse = h->spanalloc.inuse;
349 mstats.mspan_sys = h->spanalloc.sys;
351 if(p+s->npages < nelem(h->map) && (t = h->map[p+s->npages]) != nil && t->state != MSpanInUse) {
352 tp = (uintptr*)(t->start<<PageShift);
353 *sp |= *tp; // propagate "needs zeroing" mark
354 s->npages += t->npages;
355 s->npreleased += t->npreleased;
356 h->map[p + s->npages - 1] = s;
357 runtime_MSpanList_Remove(t);
358 t->state = MSpanDead;
359 runtime_FixAlloc_Free(&h->spanalloc, t);
360 mstats.mspan_inuse = h->spanalloc.inuse;
361 mstats.mspan_sys = h->spanalloc.sys;
364 // Insert s into appropriate list.
365 if(s->npages < nelem(h->free))
366 runtime_MSpanList_Insert(&h->free[s->npages], s);
368 runtime_MSpanList_Insert(&h->large, s);
372 forcegchelper(void *vnote)
374 Note *note = (Note*)vnote;
377 runtime_notewakeup(note);
381 scavengelist(MSpan *list, uint64 now, uint64 limit)
383 uintptr released, sumreleased;
386 if(runtime_MSpanList_IsEmpty(list))
390 for(s=list->next; s != list; s=s->next) {
391 if((now - s->unusedsince) > limit) {
392 released = (s->npages - s->npreleased) << PageShift;
393 mstats.heap_released += released;
394 sumreleased += released;
395 s->npreleased = s->npages;
396 runtime_SysUnused((void*)(s->start << PageShift), s->npages << PageShift);
403 scavenge(uint64 now, uint64 limit)
411 for(i=0; i < nelem(h->free); i++)
412 sumreleased += scavengelist(&h->free[i], now, limit);
413 sumreleased += scavengelist(&h->large, now, limit);
417 // Release (part of) unused memory to OS.
418 // Goroutine created at startup.
421 runtime_MHeap_Scavenger(void* dummy)
425 uint64 tick, now, forcegc, limit;
436 g->isbackground = true;
438 // If we go two minutes without a garbage collection, force one to run.
440 // If a span goes unused for 5 minutes after a garbage collection,
441 // we hand it back to the operating system.
443 // Make wake-up period small enough for the sampling to be correct.
450 env = runtime_getenv("GOGCTRACE");
452 trace = runtime_atoi(env) > 0;
456 runtime_noteclear(¬e);
457 runtime_entersyscallblock();
458 runtime_notetsleep(¬e, tick);
459 runtime_exitsyscall();
462 now = runtime_nanotime();
463 if(now - mstats.last_gc > forcegc) {
465 // The scavenger can not block other goroutines,
466 // otherwise deadlock detector can fire spuriously.
467 // GC blocks other goroutines via the runtime_worldsema.
468 runtime_noteclear(¬e);
470 __go_go(forcegchelper, (void*)notep);
471 runtime_entersyscallblock();
472 runtime_notesleep(¬e);
473 runtime_exitsyscall();
475 runtime_printf("scvg%d: GC forced\n", k);
477 now = runtime_nanotime();
479 sumreleased = scavenge(now, limit);
484 runtime_printf("scvg%d: %p MB released\n", k, sumreleased>>20);
485 runtime_printf("scvg%d: inuse: %D, idle: %D, sys: %D, released: %D, consumed: %D (MB)\n",
486 k, mstats.heap_inuse>>20, mstats.heap_idle>>20, mstats.heap_sys>>20,
487 mstats.heap_released>>20, (mstats.heap_sys - mstats.heap_released)>>20);
492 void runtime_debug_freeOSMemory(void) __asm__("runtime_debug.freeOSMemory");
495 runtime_debug_freeOSMemory(void)
498 runtime_lock(runtime_mheap);
499 scavenge(~(uintptr)0, 0);
500 runtime_unlock(runtime_mheap);
503 // Initialize a new span with the given start and npages.
505 runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages)
510 span->npages = npages;
511 span->freelist = nil;
516 span->unusedsince = 0;
517 span->npreleased = 0;
518 span->types.compression = MTypes_Empty;
521 // Initialize an empty doubly-linked list.
523 runtime_MSpanList_Init(MSpan *list)
525 list->state = MSpanListHead;
531 runtime_MSpanList_Remove(MSpan *span)
533 if(span->prev == nil && span->next == nil)
535 span->prev->next = span->next;
536 span->next->prev = span->prev;
542 runtime_MSpanList_IsEmpty(MSpan *list)
544 return list->next == list;
548 runtime_MSpanList_Insert(MSpan *list, MSpan *span)
550 if(span->next != nil || span->prev != nil) {
551 runtime_printf("failed MSpanList_Insert %p %p %p\n", span, span->next, span->prev);
552 runtime_throw("MSpanList_Insert");
554 span->next = list->next;
556 span->next->prev = span;
557 span->prev->next = span;