2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
19 #include "private/gc_priv.h"
21 GC_bool GC_use_entire_heap = 0;
24 * Free heap blocks are kept on one of several free lists,
25 * depending on the size of the block. Each free list is doubly linked.
26 * Adjacent free blocks are coalesced.
30 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
31 /* largest block we will allocate starting on a black */
32 /* listed block. Must be >= HBLKSIZE. */
35 # define UNIQUE_THRESHOLD 32
36 /* Sizes up to this many HBLKs each have their own free list */
37 # define HUGE_THRESHOLD 256
38 /* Sizes of at least this many heap blocks are mapped to a */
39 /* single free list. */
40 # define FL_COMPRESSION 8
41 /* In between sizes map this many distinct sizes to a single */
44 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
47 struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
51 word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
52 /* Number of free bytes on each list. */
54 /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */
55 /* > GC_max_large_allocd_bytes? */
59 static GC_bool GC_enough_large_bytes_left(bytes,n)
64 for (i = N_HBLK_FLS; i >= n; --i) {
65 bytes += GC_free_bytes[i];
66 if (bytes > GC_max_large_allocd_bytes) return TRUE;
71 # define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
73 # define FREE_ASSERT(e) GC_ASSERT(e)
75 #else /* USE_MUNMAP */
77 # define INCR_FREE_BYTES(n, b)
78 # define FREE_ASSERT(e)
80 #endif /* USE_MUNMAP */
82 /* Map a number of blocks to the appropriate large block free list index. */
83 int GC_hblk_fl_from_blocks(blocks_needed)
86 if (blocks_needed <= UNIQUE_THRESHOLD) return blocks_needed;
87 if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
88 return (blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
93 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
94 # define NHDR(hhdr) HDR(hhdr -> hb_next)
97 # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
98 # else /* !USE_MMAP */
99 # define IS_MAPPED(hhdr) 1
100 # endif /* USE_MUNMAP */
102 # if !defined(NO_DEBUGGING)
103 void GC_print_hblkfreelist()
111 for (i = 0; i <= N_HBLK_FLS; ++i) {
112 h = GC_hblkfreelist[i];
114 if (0 != h) GC_printf1("Free list %ld:\n",
117 if (0 != h) GC_printf2("Free list %ld (Total size %ld):\n",
119 (unsigned long)GC_free_bytes[i]);
124 GC_printf2("\t0x%lx size %lu ", (unsigned long)h, (unsigned long)sz);
126 if (GC_is_black_listed(h, HBLKSIZE) != 0) {
127 GC_printf0("start black listed\n");
128 } else if (GC_is_black_listed(h, hhdr -> hb_sz) != 0) {
129 GC_printf0("partially black listed\n");
131 GC_printf0("not black listed\n");
137 if (total_free != GC_large_free_bytes) {
138 GC_printf1("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
139 (unsigned long) GC_large_free_bytes);
142 GC_printf1("Total of %lu bytes on free list\n", (unsigned long)total_free);
145 /* Return the free list index on which the block described by the header */
146 /* appears, or -1 if it appears nowhere. */
147 int free_list_index_of(wanted)
154 for (i = 0; i <= N_HBLK_FLS; ++i) {
155 h = GC_hblkfreelist[i];
158 if (hhdr == wanted) return i;
165 void GC_dump_regions()
172 for (i = 0; i < GC_n_heap_sects; ++i) {
173 start = GC_heap_sects[i].hs_start;
174 bytes = GC_heap_sects[i].hs_bytes;
176 /* Merge in contiguous sections. */
177 while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
179 end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
181 GC_printf2("***Section from 0x%lx to 0x%lx\n", start, end);
182 for (p = start; p < end;) {
184 GC_printf1("\t0x%lx ", (unsigned long)p);
185 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
186 GC_printf1("Missing header!!(%ld)\n", hhdr);
190 if (HBLK_IS_FREE(hhdr)) {
191 int correct_index = GC_hblk_fl_from_blocks(
192 divHBLKSZ(hhdr -> hb_sz));
195 GC_printf1("\tfree block of size 0x%lx bytes",
196 (unsigned long)(hhdr -> hb_sz));
197 if (IS_MAPPED(hhdr)) {
200 GC_printf0("(unmapped)\n");
202 actual_index = free_list_index_of(hhdr);
203 if (-1 == actual_index) {
204 GC_printf1("\t\tBlock not on free list %ld!!\n",
206 } else if (correct_index != actual_index) {
207 GC_printf2("\t\tBlock on list %ld, should be on %ld!!\n",
208 actual_index, correct_index);
212 GC_printf1("\tused for blocks of size 0x%lx bytes\n",
213 (unsigned long)WORDS_TO_BYTES(hhdr -> hb_sz));
214 p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
220 # endif /* NO_DEBUGGING */
222 /* Initialize hdr for a block containing the indicated size and */
223 /* kind of objects. */
224 /* Return FALSE on failure. */
225 static GC_bool setup_header(hhdr, sz, kind, flags)
227 word sz; /* object size in words */
233 /* Add description of valid object pointers */
234 if (!GC_add_map_entry(sz)) return(FALSE);
235 hhdr -> hb_map = GC_obj_map[sz > MAXOBJSZ? 0 : sz];
237 /* Set size, kind and mark proc fields */
239 hhdr -> hb_obj_kind = kind;
240 hhdr -> hb_flags = flags;
241 descr = GC_obj_kinds[kind].ok_descriptor;
242 if (GC_obj_kinds[kind].ok_relocate_descr) descr += WORDS_TO_BYTES(sz);
243 hhdr -> hb_descr = descr;
245 /* Clear mark bits */
246 GC_clear_hdr_marks(hhdr);
248 hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
252 #define FL_UNKNOWN -1
254 * Remove hhdr from the appropriate free list.
255 * We assume it is on the nth free list, or on the size
256 * appropriate free list if n is FL_UNKNOWN.
258 void GC_remove_from_fl(hhdr, n)
264 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
266 /* We always need index to mainatin free counts. */
267 if (FL_UNKNOWN == n) {
268 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
273 if (hhdr -> hb_prev == 0) {
275 if (FL_UNKNOWN == n) {
276 index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
281 GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
282 GC_hblkfreelist[index] = hhdr -> hb_next;
285 GET_HDR(hhdr -> hb_prev, phdr);
286 phdr -> hb_next = hhdr -> hb_next;
288 FREE_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
289 INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
290 if (0 != hhdr -> hb_next) {
292 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
293 GET_HDR(hhdr -> hb_next, nhdr);
294 nhdr -> hb_prev = hhdr -> hb_prev;
299 * Return a pointer to the free block ending just before h, if any.
301 struct hblk * GC_free_block_ending_at(h)
304 struct hblk * p = h - 1;
308 while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
309 p = FORWARDED_ADDR(p,phdr);
313 if(HBLK_IS_FREE(phdr)) {
319 p = GC_prev_block(h - 1);
322 if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
330 * Add hhdr to the appropriate free list.
331 * We maintain individual free lists sorted by address.
333 void GC_add_to_fl(h, hhdr)
337 int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
338 struct hblk *second = GC_hblkfreelist[index];
340 # ifdef GC_ASSERTIONS
341 struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
342 hdr * nexthdr = HDR(next);
343 struct hblk *prev = GC_free_block_ending_at(h);
344 hdr * prevhdr = HDR(prev);
345 GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr) || !IS_MAPPED(nexthdr));
346 GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr) || !IS_MAPPED(prevhdr));
348 GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
349 GC_hblkfreelist[index] = h;
350 INCR_FREE_BYTES(index, hhdr -> hb_sz);
351 FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes)
352 hhdr -> hb_next = second;
355 GET_HDR(second, second_hdr);
356 second_hdr -> hb_prev = h;
358 GC_invalidate_map(hhdr);
363 /* Unmap blocks that haven't been recently touched. This is the only way */
364 /* way blocks are ever unmapped. */
365 void GC_unmap_old(void)
370 unsigned short last_rec, threshold;
372 # define UNMAP_THRESHOLD 6
374 for (i = 0; i <= N_HBLK_FLS; ++i) {
375 for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
377 if (!IS_MAPPED(hhdr)) continue;
378 threshold = (unsigned short)(GC_gc_no - UNMAP_THRESHOLD);
379 last_rec = hhdr -> hb_last_reclaimed;
380 if ((last_rec > GC_gc_no || last_rec < threshold)
381 && threshold < GC_gc_no /* not recently wrapped */) {
383 GC_unmap((ptr_t)h, sz);
384 hhdr -> hb_flags |= WAS_UNMAPPED;
390 /* Merge all unmapped blocks that are adjacent to other free */
391 /* blocks. This may involve remapping, since all blocks are either */
392 /* fully mapped or fully unmapped. */
393 void GC_merge_unmapped(void)
395 struct hblk * h, *next;
396 hdr * hhdr, *nexthdr;
400 for (i = 0; i <= N_HBLK_FLS; ++i) {
401 h = GC_hblkfreelist[i];
405 next = (struct hblk *)((word)h + size);
406 GET_HDR(next, nexthdr);
407 /* Coalesce with successor, if possible */
408 if (0 != nexthdr && HBLK_IS_FREE(nexthdr)) {
409 nextsize = nexthdr -> hb_sz;
410 if (IS_MAPPED(hhdr)) {
411 GC_ASSERT(!IS_MAPPED(nexthdr));
412 /* make both consistent, so that we can merge */
413 if (size > nextsize) {
414 GC_remap((ptr_t)next, nextsize);
416 GC_unmap((ptr_t)h, size);
417 hhdr -> hb_flags |= WAS_UNMAPPED;
419 } else if (IS_MAPPED(nexthdr)) {
420 GC_ASSERT(!IS_MAPPED(hhdr));
421 if (size > nextsize) {
422 GC_unmap((ptr_t)next, nextsize);
424 GC_remap((ptr_t)h, size);
425 hhdr -> hb_flags &= ~WAS_UNMAPPED;
426 hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
429 /* Unmap any gap in the middle */
430 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nexthdr -> hb_sz);
432 /* If they are both unmapped, we merge, but leave unmapped. */
433 GC_remove_from_fl(hhdr, i);
434 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
435 hhdr -> hb_sz += nexthdr -> hb_sz;
436 GC_remove_header(next);
437 GC_add_to_fl(h, hhdr);
438 /* Start over at beginning of list */
439 h = GC_hblkfreelist[i];
440 } else /* not mergable with successor */ {
443 } /* while (h != 0) ... */
447 #endif /* USE_MUNMAP */
450 * Return a pointer to a block starting at h of length bytes.
451 * Memory for the block is mapped.
452 * Remove the block from its free list, and return the remainder (if any)
453 * to its appropriate free list.
454 * May fail by returning 0.
455 * The header for the returned block must be set up by the caller.
456 * If the return value is not 0, then hhdr is the header for it.
458 struct hblk * GC_get_first_part(h, hhdr, bytes, index)
464 word total_size = hhdr -> hb_sz;
468 GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
469 GC_remove_from_fl(hhdr, index);
470 if (total_size == bytes) return h;
471 rest = (struct hblk *)((word)h + bytes);
472 rest_hdr = GC_install_header(rest);
474 /* This may be very bad news ... */
475 WARN("Header allocation failed: Dropping block.\n", 0);
478 rest_hdr -> hb_sz = total_size - bytes;
479 rest_hdr -> hb_flags = 0;
480 # ifdef GC_ASSERTIONS
481 /* Mark h not free, to avoid assertion about adjacent free blocks. */
484 GC_add_to_fl(rest, rest_hdr);
489 * H is a free block. N points at an address inside it.
490 * A new header for n has already been set up. Fix up h's header
491 * to reflect the fact that it is being split, move it to the
492 * appropriate free list.
493 * N replaces h in the original free list.
495 * Nhdr is not completely filled in, since it is about to allocated.
496 * It may in fact end up on the wrong free list for its size.
497 * (Hence adding it to a free list is silly. But this path is hopefully
498 * rare enough that it doesn't matter. The code is cleaner this way.)
500 void GC_split_block(h, hhdr, n, nhdr, index)
505 int index; /* Index of free list */
507 word total_size = hhdr -> hb_sz;
508 word h_size = (word)n - (word)h;
509 struct hblk *prev = hhdr -> hb_prev;
510 struct hblk *next = hhdr -> hb_next;
512 /* Replace h with n on its freelist */
513 nhdr -> hb_prev = prev;
514 nhdr -> hb_next = next;
515 nhdr -> hb_sz = total_size - h_size;
516 nhdr -> hb_flags = 0;
518 HDR(prev) -> hb_next = n;
520 GC_hblkfreelist[index] = n;
523 HDR(next) -> hb_prev = n;
525 INCR_FREE_BYTES(index, -(signed_word)h_size);
526 FREE_ASSERT(GC_free_bytes[index] > 0);
527 # ifdef GC_ASSERTIONS
528 nhdr -> hb_map = 0; /* Don't fail test for consecutive */
529 /* free blocks in GC_add_to_fl. */
532 hhdr -> hb_last_reclaimed = GC_gc_no;
534 hhdr -> hb_sz = h_size;
535 GC_add_to_fl(h, hhdr);
536 GC_invalidate_map(nhdr);
539 struct hblk * GC_allochblk_nth();
542 * Allocate (and return pointer to) a heap block
543 * for objects of size sz words, searching the nth free list.
545 * NOTE: We set obj_map field in header correctly.
546 * Caller is responsible for building an object freelist in block.
548 * Unlike older versions of the collectors, the client is responsible
549 * for clearing the block, if necessary.
552 GC_allochblk(sz, kind, flags)
555 unsigned flags; /* IGNORE_OFF_PAGE or 0 */
557 word blocks = OBJ_SZ_TO_BLOCKS(sz);
558 int start_list = GC_hblk_fl_from_blocks(blocks);
560 for (i = start_list; i <= N_HBLK_FLS; ++i) {
561 struct hblk * result = GC_allochblk_nth(sz, kind, flags, i);
569 * The same, but with search restricted to nth free list.
572 GC_allochblk_nth(sz, kind, flags, n)
575 unsigned char flags; /* IGNORE_OFF_PAGE or 0 */
578 register struct hblk *hbp;
579 register hdr * hhdr; /* Header corr. to hbp */
580 register struct hblk *thishbp;
581 register hdr * thishdr; /* Header corr. to hbp */
582 signed_word size_needed; /* number of bytes in requested objects */
583 signed_word size_avail; /* bytes available in this block */
585 size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
587 /* search for a big enough block in free list */
588 hbp = GC_hblkfreelist[n];
589 for(; 0 != hbp; hbp = hhdr -> hb_next) {
591 size_avail = hhdr->hb_sz;
592 if (size_avail < size_needed) continue;
593 if (size_avail != size_needed
594 && !GC_use_entire_heap
596 && USED_HEAP_SIZE >= GC_requested_heapsize
597 && !TRUE_INCREMENTAL && GC_should_collect()) {
601 /* If we have enough large blocks left to cover any */
602 /* previous request for large blocks, we go ahead */
603 /* and split. Assuming a steady state, that should */
604 /* be safe. It means that we can use the full */
605 /* heap if we allocate only small objects. */
606 if (!GC_enough_large_bytes_left(GC_large_allocd_bytes, n)) {
609 /* If we are deallocating lots of memory from */
610 /* finalizers, fail and collect sooner rather */
612 if (WORDS_TO_BYTES(GC_finalizer_mem_freed)
613 > (GC_heapsize >> 4)) {
616 # endif /* !USE_MUNMAP */
618 /* If the next heap block is obviously better, go on. */
619 /* This prevents us from disassembling a single large block */
620 /* to get tiny blocks. */
622 signed_word next_size;
624 thishbp = hhdr -> hb_next;
626 GET_HDR(thishbp, thishdr);
627 next_size = (signed_word)(thishdr -> hb_sz);
628 if (next_size < size_avail
629 && next_size >= size_needed
630 && !GC_is_black_listed(thishbp, (word)size_needed)) {
635 if ( !IS_UNCOLLECTABLE(kind) &&
636 (kind != PTRFREE || size_needed > MAX_BLACK_LIST_ALLOC)) {
637 struct hblk * lasthbp = hbp;
638 ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
639 signed_word orig_avail = size_avail;
640 signed_word eff_size_needed = ((flags & IGNORE_OFF_PAGE)?
645 while ((ptr_t)lasthbp <= search_end
646 && (thishbp = GC_is_black_listed(lasthbp,
647 (word)eff_size_needed))
651 size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
653 if (size_avail >= size_needed) {
654 if (thishbp != hbp &&
655 0 != (thishdr = GC_install_header(thishbp))) {
656 /* Make sure it's mapped before we mangle it. */
658 if (!IS_MAPPED(hhdr)) {
659 GC_remap((ptr_t)hbp, hhdr -> hb_sz);
660 hhdr -> hb_flags &= ~WAS_UNMAPPED;
663 /* Split the block at thishbp */
664 GC_split_block(hbp, hhdr, thishbp, thishdr, n);
665 /* Advance to thishbp */
668 /* We must now allocate thishbp, since it may */
669 /* be on the wrong free list. */
671 } else if (size_needed > (signed_word)BL_LIMIT
672 && orig_avail - size_needed
673 > (signed_word)BL_LIMIT) {
674 /* Punt, since anything else risks unreasonable heap growth. */
675 if (++GC_large_alloc_warn_suppressed
676 >= GC_large_alloc_warn_interval) {
677 WARN("Repeated allocation of very large block "
678 "(appr. size %ld):\n"
679 "\tMay lead to memory leak and poor performance.\n",
681 GC_large_alloc_warn_suppressed = 0;
683 size_avail = orig_avail;
684 } else if (size_avail == 0 && size_needed == HBLKSIZE
685 && IS_MAPPED(hhdr)) {
687 static unsigned count = 0;
689 /* The block is completely blacklisted. We need */
690 /* to drop some such blocks, since otherwise we spend */
691 /* all our time traversing them if pointerfree */
692 /* blocks are unpopular. */
693 /* A dropped block will be reconsidered at next GC. */
694 if ((++count & 3) == 0) {
695 /* Allocate and drop the block in small chunks, to */
696 /* maximize the chance that we will recover some */
698 word total_size = hhdr -> hb_sz;
699 struct hblk * limit = hbp + divHBLKSZ(total_size);
701 struct hblk * prev = hhdr -> hb_prev;
703 GC_words_wasted += BYTES_TO_WORDS(total_size);
704 GC_large_free_bytes -= total_size;
705 GC_remove_from_fl(hhdr, n);
706 for (h = hbp; h < limit; h++) {
707 if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
710 BYTES_TO_WORDS(HBLKSIZE),
711 PTRFREE, 0); /* Cant fail */
712 if (GC_debugging_started) {
717 /* Restore hbp to point at free block */
720 return GC_allochblk_nth(sz, kind, flags, n);
727 if( size_avail >= size_needed ) {
729 if (!IS_MAPPED(hhdr)) {
730 GC_remap((ptr_t)hbp, hhdr -> hb_sz);
731 hhdr -> hb_flags &= ~WAS_UNMAPPED;
734 /* hbp may be on the wrong freelist; the parameter n */
736 hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
741 if (0 == hbp) return 0;
743 /* Add it to map of valid blocks */
744 if (!GC_install_counts(hbp, (word)size_needed)) return(0);
745 /* This leaks memory under very rare conditions. */
748 if (!setup_header(hhdr, sz, kind, flags)) {
749 GC_remove_counts(hbp, (word)size_needed);
750 return(0); /* ditto */
753 /* Notify virtual dirty bit implementation that we are about to write. */
754 /* Ensure that pointerfree objects are not protected if it's avoidable. */
755 GC_remove_protection(hbp, divHBLKSZ(size_needed),
756 (hhdr -> hb_descr == 0) /* pointer-free */);
758 /* We just successfully allocated a block. Restart count of */
759 /* consecutive failures. */
761 extern unsigned GC_fail_count;
766 GC_large_free_bytes -= size_needed;
768 GC_ASSERT(IS_MAPPED(hhdr));
772 struct hblk * GC_freehblk_ptr = 0; /* Search position hint for GC_freehblk */
777 * Coalesce the block with its neighbors if possible.
779 * All mark words are assumed to be cleared.
785 struct hblk *next, *prev;
786 hdr *hhdr, *prevhdr, *nexthdr;
792 size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
793 GC_remove_counts(hbp, (word)size);
796 hhdr -> hb_last_reclaimed = GC_gc_no;
799 /* Check for duplicate deallocation in the easy case */
800 if (HBLK_IS_FREE(hhdr)) {
801 GC_printf1("Duplicate large block deallocation of 0x%lx\n",
802 (unsigned long) hbp);
803 ABORT("Duplicate large block deallocation");
806 GC_ASSERT(IS_MAPPED(hhdr));
807 GC_invalidate_map(hhdr);
808 next = (struct hblk *)((word)hbp + size);
809 GET_HDR(next, nexthdr);
810 prev = GC_free_block_ending_at(hbp);
811 /* Coalesce with successor, if possible */
812 if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)) {
813 GC_remove_from_fl(nexthdr, FL_UNKNOWN);
814 hhdr -> hb_sz += nexthdr -> hb_sz;
815 GC_remove_header(next);
817 /* Coalesce with predecessor, if possible. */
820 if (IS_MAPPED(prevhdr)) {
821 GC_remove_from_fl(prevhdr, FL_UNKNOWN);
822 prevhdr -> hb_sz += hhdr -> hb_sz;
824 prevhdr -> hb_last_reclaimed = GC_gc_no;
826 GC_remove_header(hbp);
831 /* FIXME: It is not clear we really always want to do these merges */
832 /* with -DUSE_MUNMAP, since it updates ages and hence prevents */
835 GC_large_free_bytes += size;
836 GC_add_to_fl(hbp, hhdr);