2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
19 #include "private/gc_priv.h"
21 extern void * GC_clear_stack(void *); /* in misc.c, behaves like identity */
22 void GC_extend_size_map(size_t); /* in misc.c. */
24 /* Allocate reclaim list for kind: */
25 /* Return TRUE on success */
26 GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
28 struct hblk ** result = (struct hblk **)
29 GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
30 if (result == 0) return(FALSE);
31 BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
32 kind -> ok_reclaim_list = result;
36 /* Allocate a large block of size lb bytes. */
37 /* The block is not cleared. */
38 /* Flags is 0 or IGNORE_OFF_PAGE. */
39 /* We hold the allocation lock. */
40 /* EXTRA_BYTES were already added to lb. */
41 ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
47 /* Round up to a multiple of a granule. */
48 lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
49 n_blocks = OBJ_SZ_TO_BLOCKS(lb);
50 if (!GC_is_initialized) GC_init_inner();
51 /* Do our share of marking work */
52 if(GC_incremental && !GC_dont_gc)
53 GC_collect_a_little_inner((int)n_blocks);
54 h = GC_allochblk(lb, k, flags);
58 h = GC_allochblk(lb, k, flags);
61 while (0 == h && GC_collect_or_expand(n_blocks, (flags != 0))) {
62 h = GC_allochblk(lb, k, flags);
67 size_t total_bytes = n_blocks * HBLKSIZE;
69 GC_large_allocd_bytes += total_bytes;
70 if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
71 GC_max_large_allocd_bytes = GC_large_allocd_bytes;
73 result = h -> hb_body;
79 /* Allocate a large block of size lb bytes. Clear if appropriate. */
80 /* We hold the allocation lock. */
81 /* EXTRA_BYTES were already added to lb. */
82 ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
84 ptr_t result = GC_alloc_large(lb, k, flags);
85 word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
87 if (0 == result) return 0;
88 if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
89 /* Clear the whole block, in case of GC_realloc call. */
90 BZERO(result, n_blocks * HBLKSIZE);
95 /* allocate lb bytes for an object of kind k. */
96 /* Should not be used to directly to allocate */
97 /* objects such as STUBBORN objects that */
98 /* require special handling on allocation. */
99 /* First a version that assumes we already */
101 void * GC_generic_malloc_inner(size_t lb, int k)
106 struct obj_kind * kind = GC_obj_kinds + k;
107 size_t lg = GC_size_map[lb];
108 void ** opp = &(kind -> ok_freelist[lg]);
110 if( (op = *opp) == 0 ) {
111 if (GC_size_map[lb] == 0) {
112 if (!GC_is_initialized) GC_init_inner();
113 if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
114 return(GC_generic_malloc_inner(lb, k));
116 if (kind -> ok_reclaim_list == 0) {
117 if (!GC_alloc_reclaim_list(kind)) goto out;
119 op = GC_allocobj(lg, k);
120 if (op == 0) goto out;
124 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
126 op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
127 GC_bytes_allocd += lb;
134 /* Allocate a composite object of size n bytes. The caller guarantees */
135 /* that pointers past the first page are not relevant. Caller holds */
136 /* allocation lock. */
137 void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
143 return(GC_generic_malloc_inner(lb, k));
144 lb_adjusted = ADD_SLOP(lb);
145 op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
146 GC_bytes_allocd += lb_adjusted;
150 void * GC_generic_malloc(size_t lb, int k)
155 if (GC_have_errors) GC_print_all_errors();
156 GC_INVOKE_FINALIZERS();
159 result = GC_generic_malloc_inner((word)lb, k);
166 lw = ROUNDED_UP_WORDS(lb);
167 lb_rounded = WORDS_TO_BYTES(lw);
168 n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
169 init = GC_obj_kinds[k].ok_init;
171 result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
173 if (GC_debugging_started) {
174 BZERO(result, n_blocks * HBLKSIZE);
177 /* Clear any memory that might be used for GC descriptors */
178 /* before we release the lock. */
179 ((word *)result)[0] = 0;
180 ((word *)result)[1] = 0;
181 ((word *)result)[lw-1] = 0;
182 ((word *)result)[lw-2] = 0;
186 GC_bytes_allocd += lb_rounded;
188 if (init && !GC_debugging_started && 0 != result) {
189 BZERO(result, n_blocks * HBLKSIZE);
193 return((*GC_oom_fn)(lb));
200 #define GENERAL_MALLOC(lb,k) \
201 GC_clear_stack(GC_generic_malloc(lb, k))
202 /* We make the GC_clear_stack_call a tail call, hoping to get more of */
205 /* Allocate lb bytes of atomic (pointerfree) data */
206 #ifdef THREAD_LOCAL_ALLOC
207 void * GC_core_malloc_atomic(size_t lb)
209 void * GC_malloc_atomic(size_t lb)
218 lg = GC_size_map[lb];
219 opp = &(GC_aobjfreelist[lg]);
221 if( EXPECT((op = *opp) == 0, 0) ) {
223 return(GENERAL_MALLOC((word)lb, PTRFREE));
226 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
230 return(GENERAL_MALLOC((word)lb, PTRFREE));
234 /* provide a version of strdup() that uses the collector to allocate the
235 copy of the string */
237 char *GC_strdup(const char *s)
245 if (s == NULL) return NULL;
246 if ((copy = GC_malloc_atomic(strlen(s) + 1)) == NULL) {
254 /* Allocate lb bytes of composite (pointerful) data */
255 #ifdef THREAD_LOCAL_ALLOC
256 void * GC_core_malloc(size_t lb)
258 void * GC_malloc(size_t lb)
267 lg = GC_size_map[lb];
268 opp = (void **)&(GC_objfreelist[lg]);
270 if( EXPECT((op = *opp) == 0, 0) ) {
272 return(GENERAL_MALLOC((word)lb, NORMAL));
274 /* See above comment on signals. */
275 GC_ASSERT(0 == obj_link(op)
276 || (word)obj_link(op)
277 <= (word)GC_greatest_plausible_heap_addr
278 && (word)obj_link(op)
279 >= (word)GC_least_plausible_heap_addr);
282 GC_bytes_allocd += GRANULES_TO_BYTES(lg);
286 return(GENERAL_MALLOC(lb, NORMAL));
290 # ifdef REDIRECT_MALLOC
292 /* Avoid unnecessary nested procedure calls here, by #defining some */
293 /* malloc replacements. Otherwise we end up saving a */
294 /* meaningless return address in the object. It also speeds things up, */
295 /* but it is admittedly quite ugly. */
296 # ifdef GC_ADD_CALLER
297 # define RA GC_RETURN_ADDR,
301 # define GC_debug_malloc_replacement(lb) \
302 GC_debug_malloc(lb, RA "unknown", 0)
304 void * malloc(size_t lb)
306 /* It might help to manually inline the GC_malloc call here. */
307 /* But any decent compiler should reduce the extra procedure call */
308 /* to at most a jump instruction in this case. */
309 # if defined(I386) && defined(GC_SOLARIS_THREADS)
311 * Thread initialisation can call malloc before
312 * we're ready for it.
313 * It's not clear that this is enough to help matters.
314 * The thread implementation may well call malloc at other
317 if (!GC_is_initialized) return sbrk(lb);
318 # endif /* I386 && GC_SOLARIS_THREADS */
319 return((void *)REDIRECT_MALLOC(lb));
322 #if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
323 static ptr_t GC_libpthread_start = 0;
324 static ptr_t GC_libpthread_end = 0;
325 static ptr_t GC_libld_start = 0;
326 static ptr_t GC_libld_end = 0;
327 extern GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp);
330 void GC_init_lib_bounds(void)
332 if (GC_libpthread_start != 0) return;
333 if (!GC_text_mapping("libpthread-",
334 &GC_libpthread_start, &GC_libpthread_end)) {
335 WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
336 /* This might still work with some versions of libpthread, */
337 /* so we don't abort. Perhaps we should. */
338 /* Generate message only once: */
339 GC_libpthread_start = (ptr_t)1;
341 if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
342 WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
347 void * calloc(size_t n, size_t lb)
349 # if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
350 /* libpthread allocated some memory that is only pointed to by */
351 /* mmapped thread stacks. Make sure it's not collectable. */
353 static GC_bool lib_bounds_set = FALSE;
354 ptr_t caller = (ptr_t)__builtin_return_address(0);
355 /* This test does not need to ensure memory visibility, since */
356 /* the bounds will be set when/if we create another thread. */
357 if (!lib_bounds_set) {
358 GC_init_lib_bounds();
359 lib_bounds_set = TRUE;
361 if (caller >= GC_libpthread_start && caller < GC_libpthread_end
362 || (caller >= GC_libld_start && caller < GC_libld_end))
363 return GC_malloc_uncollectable(n*lb);
364 /* The two ranges are actually usually adjacent, so there may */
365 /* be a way to speed this up. */
368 return((void *)REDIRECT_MALLOC(n*lb));
373 char *strdup(const char *s)
375 size_t len = strlen(s) + 1;
376 char * result = ((char *)REDIRECT_MALLOC(len+1));
381 BCOPY(s, result, len+1);
384 #endif /* !defined(strdup) */
385 /* If strdup is macro defined, we assume that it actually calls malloc, */
386 /* and thus the right thing will happen even without overriding it. */
387 /* This seems to be true on most Linux systems. */
389 #undef GC_debug_malloc_replacement
391 # endif /* REDIRECT_MALLOC */
393 /* Explicitly deallocate an object p. */
394 void GC_free(void * p)
398 size_t sz; /* In bytes */
399 size_t ngranules; /* sz in granules */
402 struct obj_kind * ok;
406 /* Required by ANSI. It's not my fault ... */
408 GC_err_printf("GC_free(%p): %d\n", p, GC_gc_no);
413 ngranules = BYTES_TO_GRANULES(sz);
414 # if defined(REDIRECT_MALLOC) && \
415 (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
417 /* For Solaris, we have to redirect malloc calls during */
418 /* initialization. For the others, this seems to happen */
420 /* Don't try to deallocate that memory. */
421 if (0 == hhdr) return;
423 GC_ASSERT(GC_base(p) == p);
424 knd = hhdr -> hb_obj_kind;
425 ok = &GC_obj_kinds[knd];
426 if (EXPECT((ngranules <= MAXOBJGRANULES), 1)) {
428 GC_bytes_freed += sz;
429 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
430 /* Its unnecessary to clear the mark bit. If the */
431 /* object is reallocated, it doesn't matter. O.w. the */
432 /* collector will do it, since it's on a free list. */
434 BZERO((word *)p + 1, sz-sizeof(word));
436 flh = &(ok -> ok_freelist[ngranules]);
441 size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
443 GC_bytes_freed += sz;
444 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
446 GC_large_allocd_bytes -= nblocks * HBLKSIZE;
453 /* Explicitly deallocate an object p when we already hold lock. */
454 /* Only used for internally allocated objects, so we can take some */
457 void GC_free_inner(void * p)
461 size_t sz; /* bytes */
462 size_t ngranules; /* sz in granules */
465 struct obj_kind * ok;
470 knd = hhdr -> hb_obj_kind;
472 ngranules = BYTES_TO_GRANULES(sz);
473 ok = &GC_obj_kinds[knd];
474 if (ngranules <= MAXOBJGRANULES) {
475 GC_bytes_freed += sz;
476 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
478 BZERO((word *)p + 1, sz-sizeof(word));
480 flh = &(ok -> ok_freelist[ngranules]);
484 size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
485 GC_bytes_freed += sz;
486 if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
488 GC_large_allocd_bytes -= nblocks * HBLKSIZE;
495 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
496 # define REDIRECT_FREE GC_free
498 # ifdef REDIRECT_FREE
501 # if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
503 /* Don't bother with initialization checks. If nothing */
504 /* has been initialized, the check fails, and that's safe, */
505 /* since we haven't allocated uncollectable objects either. */
506 ptr_t caller = (ptr_t)__builtin_return_address(0);
507 /* This test does not need to ensure memory visibility, since */
508 /* the bounds will be set when/if we create another thread. */
509 if (caller >= GC_libpthread_start && caller < GC_libpthread_end
510 || (caller >= GC_libld_start && caller < GC_libld_end)) {
520 # endif /* REDIRECT_MALLOC */