2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_priv.h"
21 #ifdef GC_USE_ENTIRE_HEAP
22 int GC_use_entire_heap = TRUE;
24 int GC_use_entire_heap = FALSE;
28 * Free heap blocks are kept on one of several free lists,
29 * depending on the size of the block. Each free list is doubly linked.
30 * Adjacent free blocks are coalesced.
34 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
35 /* largest block we will allocate starting on a black */
36 /* listed block. Must be >= HBLKSIZE. */
39 # define UNIQUE_THRESHOLD 32
40 /* Sizes up to this many HBLKs each have their own free list */
41 # define HUGE_THRESHOLD 256
42 /* Sizes of at least this many heap blocks are mapped to a */
43 /* single free list. */
44 # define FL_COMPRESSION 8
45 /* In between sizes map this many distinct sizes to a single */
48 # define N_HBLK_FLS ((HUGE_THRESHOLD - UNIQUE_THRESHOLD) / FL_COMPRESSION \
51 #ifndef GC_GCJ_SUPPORT
54 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
55 /* List of completely empty heap blocks */
56 /* Linked through hb_next field of */
57 /* header structure associated with */
58 /* block. Remains externally visible */
59 /* as used by GNU GCJ currently. */
61 #ifndef GC_GCJ_SUPPORT
64 word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
65 /* Number of free bytes on each list. Remains visible to GCJ. */
67 /* Return the largest n such that the number of free bytes on lists */
68 /* n .. N_HBLK_FLS is greater or equal to GC_max_large_allocd_bytes */
69 /* minus GC_large_allocd_bytes. If there is no such n, return 0. */
70 GC_INLINE int GC_enough_large_bytes_left(void)
73 word bytes = GC_large_allocd_bytes;
75 GC_ASSERT(GC_max_large_allocd_bytes <= GC_heapsize);
76 for (n = N_HBLK_FLS; n >= 0; --n) {
77 bytes += GC_free_bytes[n];
78 if (bytes >= GC_max_large_allocd_bytes) return n;
83 /* Map a number of blocks to the appropriate large block free list index. */
84 STATIC int GC_hblk_fl_from_blocks(word blocks_needed)
86 if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed;
87 if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
88 return (int)(blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
93 # define PHDR(hhdr) HDR((hhdr) -> hb_prev)
94 # define NHDR(hhdr) HDR((hhdr) -> hb_next)
97 # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
99 # define IS_MAPPED(hhdr) TRUE
100 # endif /* !USE_MUNMAP */
102 #if !defined(NO_DEBUGGING) || defined(GC_ASSERTIONS)
103 /* Should return the same value as GC_large_free_bytes. */
104 GC_INNER word GC_compute_large_free_bytes(void)
109 for (i = 0; i <= N_HBLK_FLS; ++i) {
113 for (h = GC_hblkfreelist[i]; h != 0; h = hhdr->hb_next) {
115 total_free += hhdr->hb_sz;
120 #endif /* !NO_DEBUGGING || GC_ASSERTIONS */
122 # if !defined(NO_DEBUGGING)
123 void GC_print_hblkfreelist(void)
128 for (i = 0; i <= N_HBLK_FLS; ++i) {
129 struct hblk * h = GC_hblkfreelist[i];
131 if (0 != h) GC_printf("Free list %u (total size %lu):\n",
132 i, (unsigned long)GC_free_bytes[i]);
133 while (h /* != NULL */) { /* CPPCHECK */
136 GC_printf("\t%p size %lu %s black listed\n",
137 (void *)h, (unsigned long) hhdr -> hb_sz,
138 GC_is_black_listed(h, HBLKSIZE) != 0 ? "start" :
139 GC_is_black_listed(h, hhdr -> hb_sz) != 0 ? "partially" :
144 GC_printf("GC_large_free_bytes: %lu\n",
145 (unsigned long)GC_large_free_bytes);
147 if ((total = GC_compute_large_free_bytes()) != GC_large_free_bytes)
148 GC_err_printf("GC_large_free_bytes INCONSISTENT!! Should be: %lu\n",
149 (unsigned long)total);
152 /* Return the free list index on which the block described by the header */
153 /* appears, or -1 if it appears nowhere. */
154 static int free_list_index_of(hdr *wanted)
158 for (i = 0; i <= N_HBLK_FLS; ++i) {
162 for (h = GC_hblkfreelist[i]; h != 0; h = hhdr -> hb_next) {
164 if (hhdr == wanted) return i;
170 GC_API void GC_CALL GC_dump_regions(void)
174 for (i = 0; i < GC_n_heap_sects; ++i) {
175 ptr_t start = GC_heap_sects[i].hs_start;
176 size_t bytes = GC_heap_sects[i].hs_bytes;
177 ptr_t end = start + bytes;
180 /* Merge in contiguous sections. */
181 while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
183 end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
185 GC_printf("***Section from %p to %p\n", (void *)start, (void *)end);
186 for (p = start; (word)p < (word)end; ) {
189 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
190 GC_printf("\t%p Missing header!!(%p)\n",
191 (void *)p, (void *)hhdr);
195 if (HBLK_IS_FREE(hhdr)) {
196 int correct_index = GC_hblk_fl_from_blocks(
197 divHBLKSZ(hhdr -> hb_sz));
200 GC_printf("\t%p\tfree block of size 0x%lx bytes%s\n",
201 (void *)p, (unsigned long)(hhdr -> hb_sz),
202 IS_MAPPED(hhdr) ? "" : " (unmapped)");
203 actual_index = free_list_index_of(hhdr);
204 if (-1 == actual_index) {
205 GC_printf("\t\tBlock not on free list %d!!\n",
207 } else if (correct_index != actual_index) {
208 GC_printf("\t\tBlock on list %d, should be on %d!!\n",
209 actual_index, correct_index);
213 GC_printf("\t%p\tused for blocks of size 0x%lx bytes\n",
214 (void *)p, (unsigned long)(hhdr -> hb_sz));
215 p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
221 # endif /* NO_DEBUGGING */
223 /* Initialize hdr for a block containing the indicated size and */
224 /* kind of objects. */
225 /* Return FALSE on failure. */
226 static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
227 int kind, unsigned flags)
231 # ifdef MARK_BIT_PER_GRANULE
232 if (byte_sz > MAXOBJBYTES)
233 flags |= LARGE_BLOCK;
235 # ifdef ENABLE_DISCLAIM
236 if (GC_obj_kinds[kind].ok_disclaim_proc)
237 flags |= HAS_DISCLAIM;
238 if (GC_obj_kinds[kind].ok_mark_unconditionally)
239 flags |= MARK_UNCONDITIONALLY;
242 /* Set size, kind and mark proc fields */
243 hhdr -> hb_sz = byte_sz;
244 hhdr -> hb_obj_kind = (unsigned char)kind;
245 hhdr -> hb_flags = (unsigned char)flags;
246 hhdr -> hb_block = block;
247 descr = GC_obj_kinds[kind].ok_descriptor;
248 if (GC_obj_kinds[kind].ok_relocate_descr) descr += byte_sz;
249 hhdr -> hb_descr = descr;
251 # ifdef MARK_BIT_PER_OBJ
252 /* Set hb_inv_sz as portably as possible. */
253 /* We set it to the smallest value such that sz * inv_sz >= 2**32 */
254 /* This may be more precision than necessary. */
255 if (byte_sz > MAXOBJBYTES) {
256 hhdr -> hb_inv_sz = LARGE_INV_SZ;
260 # if CPP_WORDSZ == 64
261 inv_sz = ((word)1 << 32)/byte_sz;
262 if (((inv_sz*byte_sz) >> 32) == 0) ++inv_sz;
263 # else /* 32 bit words */
264 GC_ASSERT(byte_sz >= 4);
265 inv_sz = ((unsigned)1 << 31)/byte_sz;
267 while (inv_sz*byte_sz > byte_sz) ++inv_sz;
269 # ifdef INV_SZ_COMPUTATION_CHECK
270 GC_ASSERT(((1ULL << 32) + byte_sz - 1) / byte_sz == inv_sz);
272 hhdr -> hb_inv_sz = inv_sz;
275 # ifdef MARK_BIT_PER_GRANULE
277 size_t granules = BYTES_TO_GRANULES(byte_sz);
279 if (EXPECT(!GC_add_map_entry(granules), FALSE)) {
280 /* Make it look like a valid block. */
281 hhdr -> hb_sz = HBLKSIZE;
282 hhdr -> hb_descr = 0;
283 hhdr -> hb_flags |= LARGE_BLOCK;
287 hhdr -> hb_map = GC_obj_map[(hhdr -> hb_flags & LARGE_BLOCK) != 0 ?
290 # endif /* MARK_BIT_PER_GRANULE */
292 /* Clear mark bits */
293 GC_clear_hdr_marks(hhdr);
295 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
299 /* Remove hhdr from the free list (it is assumed to specified by index). */
300 STATIC void GC_remove_from_fl_at(hdr *hhdr, int index)
302 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
303 if (hhdr -> hb_prev == 0) {
304 GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
305 GC_hblkfreelist[index] = hhdr -> hb_next;
308 GET_HDR(hhdr -> hb_prev, phdr);
309 phdr -> hb_next = hhdr -> hb_next;
311 /* We always need index to maintain free counts. */
312 GC_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
313 GC_free_bytes[index] -= hhdr -> hb_sz;
314 if (0 != hhdr -> hb_next) {
316 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
317 GET_HDR(hhdr -> hb_next, nhdr);
318 nhdr -> hb_prev = hhdr -> hb_prev;
322 /* Remove hhdr from the appropriate free list (we assume it is on the */
323 /* size-appropriate free list). */
324 GC_INLINE void GC_remove_from_fl(hdr *hhdr)
326 GC_remove_from_fl_at(hhdr, GC_hblk_fl_from_blocks(divHBLKSZ(hhdr->hb_sz)));
329 /* Return a pointer to the free block ending just before h, if any. */
330 STATIC struct hblk * GC_free_block_ending_at(struct hblk *h)
332 struct hblk * p = h - 1;
336 while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
337 p = FORWARDED_ADDR(p,phdr);
341 if(HBLK_IS_FREE(phdr)) {
347 p = GC_prev_block(h - 1);
348 if (p /* != NULL */) { /* CPPCHECK */
350 if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
357 /* Add hhdr to the appropriate free list. */
358 /* We maintain individual free lists sorted by address. */
359 STATIC void GC_add_to_fl(struct hblk *h, hdr *hhdr)
361 int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
362 struct hblk *second = GC_hblkfreelist[index];
363 # if defined(GC_ASSERTIONS) && !defined(USE_MUNMAP)
364 struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
365 hdr * nexthdr = HDR(next);
366 struct hblk *prev = GC_free_block_ending_at(h);
367 hdr * prevhdr = HDR(prev);
369 GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr)
370 || (GC_heapsize & SIGNB) != 0);
371 /* In the last case, blocks may be too large to merge. */
372 GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr)
373 || (GC_heapsize & SIGNB) != 0);
375 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
376 GC_hblkfreelist[index] = h;
377 GC_free_bytes[index] += hhdr -> hb_sz;
378 GC_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes);
379 hhdr -> hb_next = second;
381 if (second /* != NULL */) { /* CPPCHECK */
384 GET_HDR(second, second_hdr);
385 second_hdr -> hb_prev = h;
387 hhdr -> hb_flags |= FREE_BLK;
392 # ifndef MUNMAP_THRESHOLD
393 # define MUNMAP_THRESHOLD 6
396 GC_INNER int GC_unmap_threshold = MUNMAP_THRESHOLD;
398 /* Unmap blocks that haven't been recently touched. This is the only way */
399 /* way blocks are ever unmapped. */
400 GC_INNER void GC_unmap_old(void)
404 if (GC_unmap_threshold == 0)
405 return; /* unmapping disabled */
407 for (i = 0; i <= N_HBLK_FLS; ++i) {
411 for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
413 if (!IS_MAPPED(hhdr)) continue;
415 /* Check that the interval is larger than the threshold (the */
416 /* truncated counter value wrapping is handled correctly). */
417 if ((unsigned short)(GC_gc_no - hhdr->hb_last_reclaimed) >
418 (unsigned short)GC_unmap_threshold) {
419 GC_unmap((ptr_t)h, (size_t)hhdr->hb_sz);
420 hhdr -> hb_flags |= WAS_UNMAPPED;
426 /* Merge all unmapped blocks that are adjacent to other free */
427 /* blocks. This may involve remapping, since all blocks are either */
428 /* fully mapped or fully unmapped. */
429 GC_INNER void GC_merge_unmapped(void)
433 for (i = 0; i <= N_HBLK_FLS; ++i) {
434 struct hblk *h = GC_hblkfreelist[i];
443 next = (struct hblk *)((word)h + size);
444 GET_HDR(next, nexthdr);
445 /* Coalesce with successor, if possible */
446 if (0 != nexthdr && HBLK_IS_FREE(nexthdr)
447 && (signed_word) (size + (nextsize = nexthdr->hb_sz)) > 0
448 /* no pot. overflow */) {
449 /* Note that we usually try to avoid adjacent free blocks */
450 /* that are either both mapped or both unmapped. But that */
451 /* isn't guaranteed to hold since we remap blocks when we */
452 /* split them, and don't merge at that point. It may also */
453 /* not hold if the merged block would be too big. */
454 if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
455 /* make both consistent, so that we can merge */
456 if (size > nextsize) {
457 GC_remap((ptr_t)next, nextsize);
459 GC_unmap((ptr_t)h, size);
460 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
461 hhdr -> hb_flags |= WAS_UNMAPPED;
463 } else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) {
464 if (size > nextsize) {
465 GC_unmap((ptr_t)next, nextsize);
466 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
468 GC_remap((ptr_t)h, size);
469 hhdr -> hb_flags &= ~WAS_UNMAPPED;
470 hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
472 } else if (!IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
473 /* Unmap any gap in the middle */
474 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
476 /* If they are both unmapped, we merge, but leave unmapped. */
477 GC_remove_from_fl_at(hhdr, i);
478 GC_remove_from_fl(nexthdr);
479 hhdr -> hb_sz += nexthdr -> hb_sz;
480 GC_remove_header(next);
481 GC_add_to_fl(h, hhdr);
482 /* Start over at beginning of list */
483 h = GC_hblkfreelist[i];
484 } else /* not mergable with successor */ {
487 } /* while (h != 0) ... */
491 #endif /* USE_MUNMAP */
494 * Return a pointer to a block starting at h of length bytes.
495 * Memory for the block is mapped.
496 * Remove the block from its free list, and return the remainder (if any)
497 * to its appropriate free list.
498 * May fail by returning 0.
499 * The header for the returned block must be set up by the caller.
500 * If the return value is not 0, then hhdr is the header for it.
502 STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
503 size_t bytes, int index)
505 word total_size = hhdr -> hb_sz;
509 GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
510 GC_remove_from_fl_at(hhdr, index);
511 if (total_size == bytes) return h;
512 rest = (struct hblk *)((word)h + bytes);
513 rest_hdr = GC_install_header(rest);
515 /* FIXME: This is likely to be very bad news ... */
516 WARN("Header allocation failed: dropping block\n", 0);
519 rest_hdr -> hb_sz = total_size - bytes;
520 rest_hdr -> hb_flags = 0;
521 # ifdef GC_ASSERTIONS
522 /* Mark h not free, to avoid assertion about adjacent free blocks. */
523 hhdr -> hb_flags &= ~FREE_BLK;
525 GC_add_to_fl(rest, rest_hdr);
530 * H is a free block. N points at an address inside it.
531 * A new header for n has already been set up. Fix up h's header
532 * to reflect the fact that it is being split, move it to the
533 * appropriate free list.
534 * N replaces h in the original free list.
536 * Nhdr is not completely filled in, since it is about to allocated.
537 * It may in fact end up on the wrong free list for its size.
538 * That's not a disaster, since n is about to be allocated
540 * (Hence adding it to a free list is silly. But this path is hopefully
541 * rare enough that it doesn't matter. The code is cleaner this way.)
543 STATIC void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
544 hdr *nhdr, int index /* Index of free list */)
546 word total_size = hhdr -> hb_sz;
547 word h_size = (word)n - (word)h;
548 struct hblk *prev = hhdr -> hb_prev;
549 struct hblk *next = hhdr -> hb_next;
551 /* Replace h with n on its freelist */
552 nhdr -> hb_prev = prev;
553 nhdr -> hb_next = next;
554 nhdr -> hb_sz = total_size - h_size;
555 nhdr -> hb_flags = 0;
556 if (prev /* != NULL */) { /* CPPCHECK */
557 HDR(prev) -> hb_next = n;
559 GC_hblkfreelist[index] = n;
561 if (next /* != NULL */) {
562 HDR(next) -> hb_prev = n;
564 GC_ASSERT(GC_free_bytes[index] > h_size);
565 GC_free_bytes[index] -= h_size;
567 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
569 hhdr -> hb_sz = h_size;
570 GC_add_to_fl(h, hhdr);
571 nhdr -> hb_flags |= FREE_BLK;
575 GC_allochblk_nth(size_t sz /* bytes */, int kind, unsigned flags, int n,
577 #define AVOID_SPLIT_REMAPPED 2
580 * Allocate (and return pointer to) a heap block
581 * for objects of size sz bytes, searching the nth free list.
583 * NOTE: We set obj_map field in header correctly.
584 * Caller is responsible for building an object freelist in block.
586 * The client is responsible for clearing the block, if necessary.
588 GC_INNER struct hblk *
589 GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
595 int split_limit; /* Highest index of free list whose blocks we */
598 GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0);
599 blocks = OBJ_SZ_TO_BLOCKS_CHECKED(sz);
600 if ((signed_word)(blocks * HBLKSIZE) < 0) {
603 start_list = GC_hblk_fl_from_blocks(blocks);
604 /* Try for an exact match first. */
605 result = GC_allochblk_nth(sz, kind, flags, start_list, FALSE);
606 if (0 != result) return result;
609 if (GC_use_entire_heap || GC_dont_gc
610 || USED_HEAP_SIZE < GC_requested_heapsize
611 || GC_incremental || !GC_should_collect()) {
612 /* Should use more of the heap, even if it requires splitting. */
613 split_limit = N_HBLK_FLS;
614 } else if (GC_finalizer_bytes_freed > (GC_heapsize >> 4)) {
615 /* If we are deallocating lots of memory from */
616 /* finalizers, fail and collect sooner rather */
620 /* If we have enough large blocks left to cover any */
621 /* previous request for large blocks, we go ahead */
622 /* and split. Assuming a steady state, that should */
623 /* be safe. It means that we can use the full */
624 /* heap if we allocate only small objects. */
625 split_limit = GC_enough_large_bytes_left();
628 may_split = AVOID_SPLIT_REMAPPED;
631 if (start_list < UNIQUE_THRESHOLD) {
632 /* No reason to try start_list again, since all blocks are exact */
636 for (; start_list <= split_limit; ++start_list) {
637 result = GC_allochblk_nth(sz, kind, flags, start_list, may_split);
644 STATIC long GC_large_alloc_warn_suppressed = 0;
645 /* Number of warnings suppressed so far. */
647 /* The same, but with search restricted to nth free list. Flags is */
648 /* IGNORE_OFF_PAGE or zero. sz is in bytes. The may_split flag */
649 /* indicates whether it is OK to split larger blocks (if set to */
650 /* AVOID_SPLIT_REMAPPED then memory remapping followed by splitting */
651 /* should be generally avoided). */
653 GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, int may_split)
656 hdr * hhdr; /* Header corr. to hbp */
657 struct hblk *thishbp;
658 hdr * thishdr; /* Header corr. to thishbp */
659 signed_word size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS_CHECKED(sz);
660 /* number of bytes in requested objects */
662 /* search for a big enough block in free list */
663 for (hbp = GC_hblkfreelist[n];; hbp = hhdr -> hb_next) {
664 signed_word size_avail; /* bytes available in this block */
666 if (hbp /* != NULL */) {
671 GET_HDR(hbp, hhdr); /* set hhdr value */
672 size_avail = (signed_word)hhdr->hb_sz;
673 if (size_avail < size_needed) continue;
674 if (size_avail != size_needed) {
675 if (!may_split) continue;
676 /* If the next heap block is obviously better, go on. */
677 /* This prevents us from disassembling a single large */
678 /* block to get tiny blocks. */
679 thishbp = hhdr -> hb_next;
680 if (thishbp /* != NULL */) { /* CPPCHECK */
681 signed_word next_size;
683 GET_HDR(thishbp, thishdr);
684 next_size = (signed_word)(thishdr -> hb_sz);
685 if (next_size < size_avail
686 && next_size >= size_needed
687 && !GC_is_black_listed(thishbp, (word)size_needed)) {
692 if (!IS_UNCOLLECTABLE(kind) && (kind != PTRFREE
693 || size_needed > (signed_word)MAX_BLACK_LIST_ALLOC)) {
694 struct hblk * lasthbp = hbp;
695 ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
696 signed_word orig_avail = size_avail;
697 signed_word eff_size_needed = (flags & IGNORE_OFF_PAGE) != 0 ?
698 (signed_word)HBLKSIZE
701 while ((word)lasthbp <= (word)search_end
702 && (thishbp = GC_is_black_listed(lasthbp,
703 (word)eff_size_needed)) != 0) {
706 size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
708 if (size_avail >= size_needed) {
709 if (thishbp != hbp) {
711 /* Avoid remapping followed by splitting. */
712 if (may_split == AVOID_SPLIT_REMAPPED && !IS_MAPPED(hhdr))
715 thishdr = GC_install_header(thishbp);
717 /* Make sure it's mapped before we mangle it. */
719 if (!IS_MAPPED(hhdr)) {
720 GC_remap((ptr_t)hbp, (size_t)hhdr->hb_sz);
721 hhdr -> hb_flags &= ~WAS_UNMAPPED;
724 /* Split the block at thishbp */
725 GC_split_block(hbp, hhdr, thishbp, thishdr, n);
726 /* Advance to thishbp */
729 /* We must now allocate thishbp, since it may */
730 /* be on the wrong free list. */
733 } else if (size_needed > (signed_word)BL_LIMIT
734 && orig_avail - size_needed
735 > (signed_word)BL_LIMIT) {
736 /* Punt, since anything else risks unreasonable heap growth. */
737 if (++GC_large_alloc_warn_suppressed
738 >= GC_large_alloc_warn_interval) {
739 WARN("Repeated allocation of very large block "
740 "(appr. size %" WARN_PRIdPTR "):\n"
741 "\tMay lead to memory leak and poor performance\n",
743 GC_large_alloc_warn_suppressed = 0;
745 size_avail = orig_avail;
746 } else if (size_avail == 0
747 && size_needed == (signed_word)HBLKSIZE
748 && IS_MAPPED(hhdr)) {
750 static unsigned count = 0;
752 /* The block is completely blacklisted. We need */
753 /* to drop some such blocks, since otherwise we spend */
754 /* all our time traversing them if pointer-free */
755 /* blocks are unpopular. */
756 /* A dropped block will be reconsidered at next GC. */
757 if ((++count & 3) == 0) {
758 /* Allocate and drop the block in small chunks, to */
759 /* maximize the chance that we will recover some */
761 word total_size = hhdr -> hb_sz;
762 struct hblk * limit = hbp + divHBLKSZ(total_size);
764 struct hblk * prev = hhdr -> hb_prev;
766 GC_large_free_bytes -= total_size;
767 GC_bytes_dropped += total_size;
768 GC_remove_from_fl_at(hhdr, n);
769 for (h = hbp; (word)h < (word)limit; h++) {
771 hhdr = GC_install_header(h);
774 (void)setup_header(hhdr, h, HBLKSIZE, PTRFREE, 0);
776 if (GC_debugging_started) {
781 /* Restore hbp to point at free block */
784 return GC_allochblk_nth(sz, kind, flags, n, may_split);
791 if( size_avail >= size_needed ) {
793 if (!IS_MAPPED(hhdr)) {
794 GC_remap((ptr_t)hbp, (size_t)hhdr->hb_sz);
795 hhdr -> hb_flags &= ~WAS_UNMAPPED;
796 /* Note: This may leave adjacent, mapped free blocks. */
799 /* hbp may be on the wrong freelist; the parameter n */
801 hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
806 if (0 == hbp) return 0;
808 /* Add it to map of valid blocks */
809 if (!GC_install_counts(hbp, (word)size_needed)) return(0);
810 /* This leaks memory under very rare conditions. */
813 if (!setup_header(hhdr, hbp, sz, kind, flags)) {
814 GC_remove_counts(hbp, (word)size_needed);
815 return(0); /* ditto */
817 # ifndef GC_DISABLE_INCREMENTAL
818 /* Notify virtual dirty bit implementation that we are about to */
819 /* write. Ensure that pointer-free objects are not protected */
820 /* if it is avoidable. This also ensures that newly allocated */
821 /* blocks are treated as dirty. Necessary since we don't */
822 /* protect free blocks. */
823 GC_ASSERT((size_needed & (HBLKSIZE-1)) == 0);
824 GC_remove_protection(hbp, divHBLKSZ(size_needed),
825 (hhdr -> hb_descr == 0) /* pointer-free */);
827 /* We just successfully allocated a block. Restart count of */
828 /* consecutive failures. */
831 GC_large_free_bytes -= size_needed;
832 GC_ASSERT(IS_MAPPED(hhdr));
839 * Coalesce the block with its neighbors if possible.
841 * All mark words are assumed to be cleared.
843 GC_INNER void GC_freehblk(struct hblk *hbp)
845 struct hblk *next, *prev;
846 hdr *hhdr, *prevhdr, *nexthdr;
850 size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr->hb_sz);
851 if ((signed_word)size <= 0)
852 ABORT("Deallocating excessively large block. Too large an allocation?");
853 /* Probably possible if we try to allocate more than half the address */
854 /* space at once. If we don't catch it here, strange things happen */
856 GC_remove_counts(hbp, size);
859 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
862 /* Check for duplicate deallocation in the easy case */
863 if (HBLK_IS_FREE(hhdr)) {
864 ABORT_ARG1("Duplicate large block deallocation",
865 " of %p", (void *)hbp);
868 GC_ASSERT(IS_MAPPED(hhdr));
869 hhdr -> hb_flags |= FREE_BLK;
870 next = (struct hblk *)((ptr_t)hbp + size);
871 GET_HDR(next, nexthdr);
872 prev = GC_free_block_ending_at(hbp);
873 /* Coalesce with successor, if possible */
874 if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)
875 && (signed_word)(hhdr -> hb_sz + nexthdr -> hb_sz) > 0
877 GC_remove_from_fl(nexthdr);
878 hhdr -> hb_sz += nexthdr -> hb_sz;
879 GC_remove_header(next);
881 /* Coalesce with predecessor, if possible. */
882 if (prev /* != NULL */) { /* CPPCHECK */
884 if (IS_MAPPED(prevhdr)
885 && (signed_word)(hhdr -> hb_sz + prevhdr -> hb_sz) > 0) {
886 GC_remove_from_fl(prevhdr);
887 prevhdr -> hb_sz += hhdr -> hb_sz;
889 prevhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
891 GC_remove_header(hbp);
896 /* FIXME: It is not clear we really always want to do these merges */
897 /* with USE_MUNMAP, since it updates ages and hence prevents */
900 GC_large_free_bytes += size;
901 GC_add_to_fl(hbp, hhdr);