2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
16 #include "private/gc_priv.h"
20 * 1. allocation of heap block headers
21 * 2. A map from addresses to heap block addresses to heap block headers
23 * Access speed is crucial. We implement an index structure based on a 2
27 STATIC bottom_index * GC_all_bottom_indices = 0;
28 /* Pointer to the first (lowest address) */
29 /* bottom_index. Assumes the lock is held. */
31 STATIC bottom_index * GC_all_bottom_indices_end = 0;
32 /* Pointer to the last (highest address) */
33 /* bottom_index. Assumes the lock is held. */
35 /* Non-macro version of header location routine */
36 GC_INNER hdr * GC_find_header(ptr_t h)
47 /* Handle a header cache miss. Returns a pointer to the */
48 /* header corresponding to p, if p can possibly be a valid */
49 /* object pointer, and 0 otherwise. */
50 /* GUARANTEED to return 0 for a pointer past the first page */
51 /* of an object unless both GC_all_interior_pointers is set */
52 /* and p is in fact a valid object pointer. */
53 /* Never returns a pointer to a free hblk. */
55 #ifdef PRINT_BLACK_LIST
56 GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce, ptr_t source)
58 GC_header_cache_miss(ptr_t p, hdr_cache_entry *hce)
64 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
65 if (GC_all_interior_pointers) {
69 current = (ptr_t)HBLKPTR(current);
71 current = current - HBLKSIZE*(word)hhdr;
73 } while(IS_FORWARDING_ADDR_OR_NIL(hhdr));
74 /* current points to near the start of the large object */
75 if (hhdr -> hb_flags & IGNORE_OFF_PAGE)
77 if (HBLK_IS_FREE(hhdr)
78 || p - current >= (ptrdiff_t)(hhdr->hb_sz)) {
79 GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
80 /* Pointer past the end of the block */
84 GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
85 /* And return zero: */
87 GC_ASSERT(hhdr == 0 || !HBLK_IS_FREE(hhdr));
89 /* Pointers past the first page are probably too rare */
90 /* to add them to the cache. We don't. */
91 /* And correctness relies on the fact that we don't. */
94 GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
99 if (HBLK_IS_FREE(hhdr)) {
100 GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
103 hce -> block_addr = (word)(p) >> LOG_HBLKSIZE;
104 hce -> hce_hdr = hhdr;
110 /* Routines to dynamically allocate collector data structures that will */
111 /* never be freed. */
113 static ptr_t scratch_free_ptr = 0;
115 /* GC_scratch_last_end_ptr is end point of last obtained scratch area. */
116 /* GC_scratch_end_ptr is end point of current scratch area. */
118 GC_INNER ptr_t GC_scratch_alloc(size_t bytes)
120 ptr_t result = scratch_free_ptr;
123 bytes = ROUNDUP_GRANULE_SIZE(bytes);
125 scratch_free_ptr += bytes;
126 if ((word)scratch_free_ptr <= (word)GC_scratch_end_ptr) {
127 /* Unallocated space of scratch buffer has enough size. */
131 if (bytes >= MINHINCR * HBLKSIZE) {
132 bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(bytes);
133 result = (ptr_t)GET_MEM(bytes_to_get);
134 GC_add_to_our_memory(result, bytes_to_get);
135 /* Undo scratch free area pointer update; get memory directly. */
136 scratch_free_ptr -= bytes;
137 if (result != NULL) {
138 /* Update end point of last obtained area (needed only */
139 /* by GC_register_dynamic_libraries for some targets). */
140 GC_scratch_last_end_ptr = result + bytes;
145 bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(MINHINCR * HBLKSIZE);
146 /* round up for safety */
147 result = (ptr_t)GET_MEM(bytes_to_get);
148 GC_add_to_our_memory(result, bytes_to_get);
149 if (NULL == result) {
150 WARN("Out of memory - trying to allocate requested amount"
151 " (%" WARN_PRIdPTR " bytes)...\n", (word)bytes);
152 scratch_free_ptr -= bytes; /* Undo free area pointer update */
153 bytes_to_get = ROUNDUP_PAGESIZE_IF_MMAP(bytes);
154 result = (ptr_t)GET_MEM(bytes_to_get);
155 GC_add_to_our_memory(result, bytes_to_get);
158 /* Update scratch area pointers and retry. */
159 scratch_free_ptr = result;
160 GC_scratch_end_ptr = scratch_free_ptr + bytes_to_get;
161 GC_scratch_last_end_ptr = GC_scratch_end_ptr;
165 static hdr * hdr_free_list = 0;
167 /* Return an uninitialized header */
168 static hdr * alloc_hdr(void)
172 if (NULL == hdr_free_list) {
173 result = (hdr *)GC_scratch_alloc(sizeof(hdr));
175 result = hdr_free_list;
176 hdr_free_list = (hdr *) (result -> hb_next);
181 GC_INLINE void free_hdr(hdr * hhdr)
183 hhdr -> hb_next = (struct hblk *) hdr_free_list;
184 hdr_free_list = hhdr;
187 #ifdef COUNT_HDR_CACHE_HITS
188 /* Used for debugging/profiling (the symbols are externally visible). */
189 word GC_hdr_cache_hits = 0;
190 word GC_hdr_cache_misses = 0;
193 GC_INNER void GC_init_headers(void)
197 GC_all_nils = (bottom_index *)GC_scratch_alloc(sizeof(bottom_index));
198 if (GC_all_nils == NULL) {
199 GC_err_printf("Insufficient memory for GC_all_nils\n");
202 BZERO(GC_all_nils, sizeof(bottom_index));
203 for (i = 0; i < TOP_SZ; i++) {
204 GC_top_index[i] = GC_all_nils;
208 /* Make sure that there is a bottom level index block for address addr. */
209 /* Return FALSE on failure. */
210 static GC_bool get_index(word addr)
212 word hi = (word)(addr) >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
215 bottom_index ** prev;
216 bottom_index *pi; /* old_p */
219 GC_ASSERT(I_HOLD_LOCK());
223 pi = p = GC_top_index[i];
224 while(p != GC_all_nils) {
225 if (p -> key == hi) return(TRUE);
229 if (GC_top_index[hi] != GC_all_nils)
233 r = (bottom_index *)GC_scratch_alloc(sizeof(bottom_index));
234 if (EXPECT(NULL == r, FALSE))
236 BZERO(r, sizeof(bottom_index));
242 /* Add it to the list of bottom indices */
243 prev = &GC_all_bottom_indices; /* pointer to p */
244 pi = 0; /* bottom_index preceding p */
245 while ((p = *prev) != 0 && p -> key < hi) {
247 prev = &(p -> asc_link);
251 GC_all_bottom_indices_end = r;
262 /* Install a header for block h. */
263 /* The header is uninitialized. */
264 /* Returns the header or 0 on failure. */
265 GC_INNER struct hblkhdr * GC_install_header(struct hblk *h)
269 if (!get_index((word) h)) return(0);
270 result = alloc_hdr();
274 result -> hb_last_reclaimed = (unsigned short)GC_gc_no;
280 /* Set up forwarding counts for block h of size sz */
281 GC_INNER GC_bool GC_install_counts(struct hblk *h, size_t sz/* bytes */)
285 for (hbp = h; (word)hbp < (word)h + sz; hbp += BOTTOM_SZ) {
286 if (!get_index((word)hbp))
288 if ((word)hbp > GC_WORD_MAX - (word)BOTTOM_SZ * HBLKSIZE)
289 break; /* overflow of hbp+=BOTTOM_SZ is expected */
291 if (!get_index((word)h + sz - 1))
293 for (hbp = h + 1; (word)hbp < (word)h + sz; hbp += 1) {
294 word i = HBLK_PTR_DIFF(hbp, h);
296 SET_HDR(hbp, (hdr *)(i > MAX_JUMP? MAX_JUMP : i));
301 /* Remove the header for block h */
302 GC_INNER void GC_remove_header(struct hblk *h)
310 /* Remove forwarding counts for h */
311 GC_INNER void GC_remove_counts(struct hblk *h, size_t sz/* bytes */)
315 for (hbp = h+1; (word)hbp < (word)h + sz; hbp += 1) {
320 /* Apply fn to all allocated blocks. It is the caller responsibility */
321 /* to avoid data race during the function execution (e.g. by holding */
322 /* the allocation lock). */
323 void GC_apply_to_all_blocks(void (*fn)(struct hblk *h, word client_data),
327 bottom_index * index_p;
329 for (index_p = GC_all_bottom_indices; index_p != 0;
330 index_p = index_p -> asc_link) {
331 for (j = BOTTOM_SZ-1; j >= 0;) {
332 if (!IS_FORWARDING_ADDR_OR_NIL(index_p->index[j])) {
333 if (!HBLK_IS_FREE(index_p->index[j])) {
334 (*fn)(((struct hblk *)
335 (((index_p->key << LOG_BOTTOM_SZ) + (word)j)
340 } else if (index_p->index[j] == 0) {
343 j -= (signed_word)(index_p->index[j]);
349 /* Get the next valid block whose address is at least h */
350 /* Return 0 if there is none. */
351 GC_INNER struct hblk * GC_next_used_block(struct hblk *h)
353 REGISTER bottom_index * bi;
354 REGISTER word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
356 GC_ASSERT(I_HOLD_LOCK());
358 if (bi == GC_all_nils) {
359 REGISTER word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
361 bi = GC_all_bottom_indices;
362 while (bi != 0 && bi -> key < hi) bi = bi -> asc_link;
366 while (j < BOTTOM_SZ) {
367 hdr * hhdr = bi -> index[j];
368 if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
371 if (!HBLK_IS_FREE(hhdr)) {
372 return((struct hblk *)
373 (((bi -> key << LOG_BOTTOM_SZ) + j)
376 j += divHBLKSZ(hhdr -> hb_sz);
386 /* Get the last (highest address) block whose address is */
387 /* at most h. Return 0 if there is none. */
388 /* Unlike the above, this may return a free block. */
389 GC_INNER struct hblk * GC_prev_block(struct hblk *h)
392 signed_word j = ((word)h >> LOG_HBLKSIZE) & (BOTTOM_SZ-1);
394 GC_ASSERT(I_HOLD_LOCK());
396 if (bi == GC_all_nils) {
397 word hi = (word)h >> (LOG_BOTTOM_SZ + LOG_HBLKSIZE);
398 bi = GC_all_bottom_indices_end;
399 while (bi != 0 && bi -> key > hi) bi = bi -> desc_link;
404 hdr * hhdr = bi -> index[j];
407 } else if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
408 j -= (signed_word)hhdr;
410 return((struct hblk *)
411 (((bi -> key << LOG_BOTTOM_SZ) + j)
416 bi = bi -> desc_link;