2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (C) 2007 Free Software Foundation, Inc
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_pmark.h"
19 #ifndef GC_NO_FINALIZATION
21 /* Type of mark procedure used for marking from finalizable object. */
22 /* This procedure normally does not mark the object, only its */
24 typedef void (* finalization_mark_proc)(ptr_t /* finalizable_obj_ptr */);
26 #define HASH3(addr,size,log_size) \
27 ((((word)(addr) >> 3) ^ ((word)(addr) >> (3 + (log_size)))) \
29 #define HASH2(addr,log_size) HASH3(addr, (word)1 << (log_size), log_size)
31 struct hash_chain_entry {
33 struct hash_chain_entry * next;
36 struct disappearing_link {
37 struct hash_chain_entry prolog;
38 # define dl_hidden_link prolog.hidden_key
39 /* Field to be cleared. */
40 # define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
41 # define dl_set_next(x, y) \
42 (void)((x)->prolog.next = (struct hash_chain_entry *)(y))
43 word dl_hidden_obj; /* Pointer to object base */
47 struct disappearing_link **head;
52 STATIC struct dl_hashtbl_s GC_dl_hashtbl = {
53 /* head */ NULL, /* log_size */ -1, /* entries */ 0 };
54 #ifndef GC_LONG_REFS_NOT_NEEDED
55 STATIC struct dl_hashtbl_s GC_ll_hashtbl = { NULL, -1, 0 };
58 struct finalizable_object {
59 struct hash_chain_entry prolog;
60 # define fo_hidden_base prolog.hidden_key
61 /* Pointer to object base. */
62 /* No longer hidden once object */
63 /* is on finalize_now queue. */
64 # define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
65 # define fo_set_next(x,y) ((x)->prolog.next = (struct hash_chain_entry *)(y))
66 GC_finalization_proc fo_fn; /* Finalizer. */
68 word fo_object_size; /* In bytes. */
69 finalization_mark_proc fo_mark_proc; /* Mark-through procedure */
72 static signed_word log_fo_table_size = -1;
75 struct finalizable_object **fo_head;
76 /* List of objects that should be finalized now: */
77 struct finalizable_object *finalize_now;
78 } GC_fnlz_roots = { NULL, NULL };
80 GC_API void GC_CALL GC_push_finalizer_structures(void)
82 GC_ASSERT((word)&GC_dl_hashtbl.head % sizeof(word) == 0);
83 GC_ASSERT((word)&GC_fnlz_roots % sizeof(word) == 0);
84 # ifndef GC_LONG_REFS_NOT_NEEDED
85 GC_ASSERT((word)&GC_ll_hashtbl.head % sizeof(word) == 0);
86 GC_PUSH_ALL_SYM(GC_ll_hashtbl.head);
88 GC_PUSH_ALL_SYM(GC_dl_hashtbl.head);
89 GC_PUSH_ALL_SYM(GC_fnlz_roots);
92 /* Double the size of a hash table. *size_ptr is the log of its current */
93 /* size. May be a no-op. */
94 /* *table is a pointer to an array of hash headers. If we succeed, we */
95 /* update both *table and *log_size_ptr. Lock is held. */
96 STATIC void GC_grow_table(struct hash_chain_entry ***table,
97 signed_word *log_size_ptr)
100 register struct hash_chain_entry *p;
101 signed_word log_old_size = *log_size_ptr;
102 signed_word log_new_size = log_old_size + 1;
103 word old_size = log_old_size == -1 ? 0 : (word)1 << log_old_size;
104 word new_size = (word)1 << log_new_size;
105 /* FIXME: Power of 2 size often gets rounded up to one more page. */
106 struct hash_chain_entry **new_table;
108 GC_ASSERT(I_HOLD_LOCK());
109 new_table = (struct hash_chain_entry **)
110 GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
111 (size_t)new_size * sizeof(struct hash_chain_entry *),
113 if (new_table == 0) {
115 ABORT("Insufficient space for initial table allocation");
120 for (i = 0; i < old_size; i++) {
123 ptr_t real_key = GC_REVEAL_POINTER(p -> hidden_key);
124 struct hash_chain_entry *next = p -> next;
125 size_t new_hash = HASH3(real_key, new_size, log_new_size);
127 p -> next = new_table[new_hash];
128 new_table[new_hash] = p;
132 *log_size_ptr = log_new_size;
136 GC_API int GC_CALL GC_register_disappearing_link(void * * link)
140 base = (ptr_t)GC_base(link);
142 ABORT("Bad arg to GC_register_disappearing_link");
143 return(GC_general_register_disappearing_link(link, base));
146 STATIC int GC_register_disappearing_link_inner(
147 struct dl_hashtbl_s *dl_hashtbl, void **link,
148 const void *obj, const char *tbl_log_name)
150 struct disappearing_link *curr_dl;
152 struct disappearing_link * new_dl;
156 GC_ASSERT(obj != NULL && GC_base_C(obj) == obj);
157 if (dl_hashtbl -> log_size == -1
158 || dl_hashtbl -> entries > ((word)1 << dl_hashtbl -> log_size)) {
159 GC_grow_table((struct hash_chain_entry ***)&dl_hashtbl -> head,
160 &dl_hashtbl -> log_size);
162 if (dl_hashtbl->log_size < 0) ABORT("log_size is negative");
164 GC_COND_LOG_PRINTF("Grew %s table to %u entries\n", tbl_log_name,
165 1 << (unsigned)dl_hashtbl -> log_size);
167 index = HASH2(link, dl_hashtbl -> log_size);
168 for (curr_dl = dl_hashtbl -> head[index]; curr_dl != 0;
169 curr_dl = dl_next(curr_dl)) {
170 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
171 curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
176 new_dl = (struct disappearing_link *)
177 GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
179 GC_oom_func oom_fn = GC_oom_fn;
181 new_dl = (struct disappearing_link *)
182 (*oom_fn)(sizeof(struct disappearing_link));
186 /* It's not likely we'll make it here, but ... */
188 /* Recalculate index since the table may grow. */
189 index = HASH2(link, dl_hashtbl -> log_size);
190 /* Check again that our disappearing link not in the table. */
191 for (curr_dl = dl_hashtbl -> head[index]; curr_dl != 0;
192 curr_dl = dl_next(curr_dl)) {
193 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
194 curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
196 # ifndef DBG_HDRS_ALL
197 /* Free unused new_dl returned by GC_oom_fn() */
198 GC_free((void *)new_dl);
204 new_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
205 new_dl -> dl_hidden_link = GC_HIDE_POINTER(link);
206 dl_set_next(new_dl, dl_hashtbl -> head[index]);
207 dl_hashtbl -> head[index] = new_dl;
208 dl_hashtbl -> entries++;
213 GC_API int GC_CALL GC_general_register_disappearing_link(void * * link,
216 if (((word)link & (ALIGNMENT-1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
217 ABORT("Bad arg to GC_general_register_disappearing_link");
218 return GC_register_disappearing_link_inner(&GC_dl_hashtbl, link, obj,
223 # define FREE_DL_ENTRY(curr_dl) dl_set_next(curr_dl, NULL)
225 # define FREE_DL_ENTRY(curr_dl) GC_free(curr_dl)
228 /* Unregisters given link and returns the link entry to free. */
229 /* Assume the lock is held. */
230 GC_INLINE struct disappearing_link *GC_unregister_disappearing_link_inner(
231 struct dl_hashtbl_s *dl_hashtbl, void **link)
233 struct disappearing_link *curr_dl;
234 struct disappearing_link *prev_dl = NULL;
237 if (dl_hashtbl->log_size == -1)
238 return NULL; /* prevent integer shift by a negative amount */
240 index = HASH2(link, dl_hashtbl->log_size);
241 for (curr_dl = dl_hashtbl -> head[index]; curr_dl;
242 curr_dl = dl_next(curr_dl)) {
243 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
244 /* Remove found entry from the table. */
245 if (NULL == prev_dl) {
246 dl_hashtbl -> head[index] = dl_next(curr_dl);
248 dl_set_next(prev_dl, dl_next(curr_dl));
250 dl_hashtbl -> entries--;
258 GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
260 struct disappearing_link *curr_dl;
263 if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
266 curr_dl = GC_unregister_disappearing_link_inner(&GC_dl_hashtbl, link);
268 if (NULL == curr_dl) return 0;
269 FREE_DL_ENTRY(curr_dl);
273 /* Toggle-ref support. */
274 #ifndef GC_TOGGLE_REFS_NOT_NEEDED
276 /* Lowest bit is used to distinguish between choices. */
278 GC_hidden_pointer weak_ref;
281 STATIC GC_toggleref_func GC_toggleref_callback = 0;
282 STATIC GCToggleRef *GC_toggleref_arr = NULL;
283 STATIC int GC_toggleref_array_size = 0;
284 STATIC int GC_toggleref_array_capacity = 0;
286 GC_INNER void GC_process_togglerefs(void)
291 GC_ASSERT(I_HOLD_LOCK());
292 for (i = 0; i < GC_toggleref_array_size; ++i) {
293 GCToggleRef r = GC_toggleref_arr[i];
294 void *obj = r.strong_ref;
296 if (((word)obj & 1) != 0) {
297 obj = GC_REVEAL_POINTER(r.weak_ref);
302 switch (GC_toggleref_callback(obj)) {
303 case GC_TOGGLE_REF_DROP:
305 case GC_TOGGLE_REF_STRONG:
306 GC_toggleref_arr[new_size++].strong_ref = obj;
308 case GC_TOGGLE_REF_WEAK:
309 GC_toggleref_arr[new_size++].weak_ref = GC_HIDE_POINTER(obj);
312 ABORT("Bad toggle-ref status returned by callback");
316 if (new_size < GC_toggleref_array_size) {
317 BZERO(&GC_toggleref_arr[new_size],
318 (GC_toggleref_array_size - new_size) * sizeof(GCToggleRef));
319 GC_toggleref_array_size = new_size;
323 STATIC void GC_normal_finalize_mark_proc(ptr_t);
325 static void push_and_mark_object(void *p)
327 GC_normal_finalize_mark_proc(p);
328 while (!GC_mark_stack_empty()) {
329 MARK_FROM_MARK_STACK();
332 if (GC_mark_state != MS_NONE) {
333 while (!GC_mark_some(0)) {
339 STATIC void GC_mark_togglerefs(void)
342 if (NULL == GC_toggleref_arr)
345 /* TODO: Hide GC_toggleref_arr to avoid its marking from roots. */
346 GC_set_mark_bit(GC_toggleref_arr);
347 for (i = 0; i < GC_toggleref_array_size; ++i) {
348 void *obj = GC_toggleref_arr[i].strong_ref;
349 if (obj != NULL && ((word)obj & 1) == 0) {
350 push_and_mark_object(obj);
355 STATIC void GC_clear_togglerefs(void)
358 for (i = 0; i < GC_toggleref_array_size; ++i) {
359 if ((GC_toggleref_arr[i].weak_ref & 1) != 0) {
360 if (!GC_is_marked(GC_REVEAL_POINTER(GC_toggleref_arr[i].weak_ref))) {
361 GC_toggleref_arr[i].weak_ref = 0;
363 /* No need to copy, BDWGC is a non-moving collector. */
369 GC_API void GC_CALL GC_set_toggleref_func(GC_toggleref_func fn)
374 GC_toggleref_callback = fn;
378 GC_API GC_toggleref_func GC_CALL GC_get_toggleref_func(void)
380 GC_toggleref_func fn;
384 fn = GC_toggleref_callback;
389 static GC_bool ensure_toggleref_capacity(int capacity_inc)
391 GC_ASSERT(capacity_inc >= 0);
392 if (NULL == GC_toggleref_arr) {
393 GC_toggleref_array_capacity = 32; /* initial capacity */
394 GC_toggleref_arr = GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
395 GC_toggleref_array_capacity * sizeof(GCToggleRef),
397 if (NULL == GC_toggleref_arr)
400 if ((unsigned)GC_toggleref_array_size + (unsigned)capacity_inc
401 >= (unsigned)GC_toggleref_array_capacity) {
402 GCToggleRef *new_array;
403 while ((unsigned)GC_toggleref_array_capacity
404 < (unsigned)GC_toggleref_array_size + (unsigned)capacity_inc) {
405 GC_toggleref_array_capacity *= 2;
406 if (GC_toggleref_array_capacity < 0) /* overflow */
410 new_array = GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
411 GC_toggleref_array_capacity * sizeof(GCToggleRef),
413 if (NULL == new_array)
415 BCOPY(GC_toggleref_arr, new_array,
416 GC_toggleref_array_size * sizeof(GCToggleRef));
417 GC_INTERNAL_FREE(GC_toggleref_arr);
418 GC_toggleref_arr = new_array;
423 GC_API int GC_CALL GC_toggleref_add(void *obj, int is_strong_ref)
425 int res = GC_SUCCESS;
428 GC_ASSERT(obj != NULL);
430 if (GC_toggleref_callback != 0) {
431 if (!ensure_toggleref_capacity(1)) {
434 GC_toggleref_arr[GC_toggleref_array_size++].strong_ref =
435 is_strong_ref ? obj : (void *)GC_HIDE_POINTER(obj);
441 #endif /* !GC_TOGGLE_REFS_NOT_NEEDED */
443 /* Finalizer callback support. */
444 STATIC GC_await_finalize_proc GC_object_finalized_proc = 0;
446 GC_API void GC_CALL GC_set_await_finalize_proc(GC_await_finalize_proc fn)
451 GC_object_finalized_proc = fn;
455 GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void)
457 GC_await_finalize_proc fn;
461 fn = GC_object_finalized_proc;
466 #ifndef GC_LONG_REFS_NOT_NEEDED
467 GC_API int GC_CALL GC_register_long_link(void * * link, const void * obj)
469 if (((word)link & (ALIGNMENT-1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
470 ABORT("Bad arg to GC_register_long_link");
471 return GC_register_disappearing_link_inner(&GC_ll_hashtbl, link, obj,
475 GC_API int GC_CALL GC_unregister_long_link(void * * link)
477 struct disappearing_link *curr_dl;
480 if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
483 curr_dl = GC_unregister_disappearing_link_inner(&GC_ll_hashtbl, link);
485 if (NULL == curr_dl) return 0;
486 FREE_DL_ENTRY(curr_dl);
489 #endif /* !GC_LONG_REFS_NOT_NEEDED */
491 #ifndef GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED
492 /* Moves a link. Assume the lock is held. */
493 STATIC int GC_move_disappearing_link_inner(
494 struct dl_hashtbl_s *dl_hashtbl,
495 void **link, void **new_link)
497 struct disappearing_link *curr_dl, *prev_dl, *new_dl;
498 size_t curr_index, new_index;
499 word curr_hidden_link;
500 word new_hidden_link;
502 if (dl_hashtbl->log_size == -1)
503 return GC_NOT_FOUND; /* prevent integer shift by a negative amount */
505 /* Find current link. */
506 curr_index = HASH2(link, dl_hashtbl -> log_size);
507 curr_hidden_link = GC_HIDE_POINTER(link);
509 for (curr_dl = dl_hashtbl -> head[curr_index]; curr_dl;
510 curr_dl = dl_next(curr_dl)) {
511 if (curr_dl -> dl_hidden_link == curr_hidden_link)
516 if (NULL == curr_dl) {
520 if (link == new_link) {
521 return GC_SUCCESS; /* Nothing to do. */
524 /* link found; now check new_link not present. */
525 new_index = HASH2(new_link, dl_hashtbl -> log_size);
526 new_hidden_link = GC_HIDE_POINTER(new_link);
527 for (new_dl = dl_hashtbl -> head[new_index]; new_dl;
528 new_dl = dl_next(new_dl)) {
529 if (new_dl -> dl_hidden_link == new_hidden_link) {
530 /* Target already registered; bail. */
535 /* Remove from old, add to new, update link. */
536 if (NULL == prev_dl) {
537 dl_hashtbl -> head[curr_index] = dl_next(curr_dl);
539 dl_set_next(prev_dl, dl_next(curr_dl));
541 curr_dl -> dl_hidden_link = new_hidden_link;
542 dl_set_next(curr_dl, dl_hashtbl -> head[new_index]);
543 dl_hashtbl -> head[new_index] = curr_dl;
547 GC_API int GC_CALL GC_move_disappearing_link(void **link, void **new_link)
552 if (((word)new_link & (ALIGNMENT-1)) != 0
553 || !NONNULL_ARG_NOT_NULL(new_link))
554 ABORT("Bad new_link arg to GC_move_disappearing_link");
555 if (((word)link & (ALIGNMENT-1)) != 0)
556 return GC_NOT_FOUND; /* Nothing to do. */
559 result = GC_move_disappearing_link_inner(&GC_dl_hashtbl, link, new_link);
564 # ifndef GC_LONG_REFS_NOT_NEEDED
565 GC_API int GC_CALL GC_move_long_link(void **link, void **new_link)
570 if (((word)new_link & (ALIGNMENT-1)) != 0
571 || !NONNULL_ARG_NOT_NULL(new_link))
572 ABORT("Bad new_link arg to GC_move_long_link");
573 if (((word)link & (ALIGNMENT-1)) != 0)
574 return GC_NOT_FOUND; /* Nothing to do. */
577 result = GC_move_disappearing_link_inner(&GC_ll_hashtbl, link, new_link);
581 # endif /* !GC_LONG_REFS_NOT_NEEDED */
582 #endif /* !GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED */
584 /* Possible finalization_marker procedures. Note that mark stack */
585 /* overflow is handled by the caller, and is not a disaster. */
586 STATIC void GC_normal_finalize_mark_proc(ptr_t p)
590 PUSH_OBJ(p, hhdr, GC_mark_stack_top,
591 &(GC_mark_stack[GC_mark_stack_size]));
594 /* This only pays very partial attention to the mark descriptor. */
595 /* It does the right thing for normal and atomic objects, and treats */
596 /* most others as normal. */
597 STATIC void GC_ignore_self_finalize_mark_proc(ptr_t p)
600 word descr = hhdr -> hb_descr;
603 ptr_t target_limit = p + hhdr -> hb_sz - 1;
605 if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
606 scan_limit = p + descr - sizeof(word);
608 scan_limit = target_limit + 1 - sizeof(word);
610 for (q = p; (word)q <= (word)scan_limit; q += ALIGNMENT) {
613 if (r < (word)p || r > (word)target_limit) {
614 GC_PUSH_ONE_HEAP(r, q, GC_mark_stack_top);
619 STATIC void GC_null_finalize_mark_proc(ptr_t p GC_ATTR_UNUSED) {}
621 /* Possible finalization_marker procedures. Note that mark stack */
622 /* overflow is handled by the caller, and is not a disaster. */
624 /* GC_unreachable_finalize_mark_proc is an alias for normal marking, */
625 /* but it is explicitly tested for, and triggers different */
626 /* behavior. Objects registered in this way are not finalized */
627 /* if they are reachable by other finalizable objects, even if those */
628 /* other objects specify no ordering. */
629 STATIC void GC_unreachable_finalize_mark_proc(ptr_t p)
631 GC_normal_finalize_mark_proc(p);
634 /* Register a finalization function. See gc.h for details. */
635 /* The last parameter is a procedure that determines */
636 /* marking for finalization ordering. Any objects marked */
637 /* by that procedure will be guaranteed to not have been */
638 /* finalized when this finalizer is invoked. */
639 STATIC void GC_register_finalizer_inner(void * obj,
640 GC_finalization_proc fn, void *cd,
641 GC_finalization_proc *ofn, void **ocd,
642 finalization_mark_proc mp)
644 struct finalizable_object * curr_fo;
646 struct finalizable_object *new_fo = 0;
647 hdr *hhdr = NULL; /* initialized to prevent warning. */
651 if (log_fo_table_size == -1
652 || GC_fo_entries > ((word)1 << log_fo_table_size)) {
653 GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
656 if (log_fo_table_size < 0) ABORT("log_size is negative");
658 GC_COND_LOG_PRINTF("Grew fo table to %u entries\n",
659 1 << (unsigned)log_fo_table_size);
661 /* in the THREADS case we hold allocation lock. */
663 struct finalizable_object *prev_fo = NULL;
666 index = HASH2(obj, log_fo_table_size);
667 curr_fo = GC_fnlz_roots.fo_head[index];
668 while (curr_fo != 0) {
669 GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
670 if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(obj)) {
671 /* Interruption by a signal in the middle of this */
672 /* should be safe. The client may see only *ocd */
673 /* updated, but we'll declare that to be his problem. */
674 if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
675 if (ofn) *ofn = curr_fo -> fo_fn;
676 /* Delete the structure for obj. */
678 GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
680 fo_set_next(prev_fo, fo_next(curr_fo));
684 /* May not happen if we get a signal. But a high */
685 /* estimate will only make the table larger than */
687 # if !defined(THREADS) && !defined(DBG_HDRS_ALL)
688 GC_free((void *)curr_fo);
691 curr_fo -> fo_fn = fn;
692 curr_fo -> fo_client_data = (ptr_t)cd;
693 curr_fo -> fo_mark_proc = mp;
694 /* Reinsert it. We deleted it first to maintain */
695 /* consistency in the event of a signal. */
697 GC_fnlz_roots.fo_head[index] = curr_fo;
699 fo_set_next(prev_fo, curr_fo);
703 # ifndef DBG_HDRS_ALL
704 if (EXPECT(new_fo != 0, FALSE)) {
705 /* Free unused new_fo returned by GC_oom_fn() */
706 GC_free((void *)new_fo);
712 curr_fo = fo_next(curr_fo);
714 if (EXPECT(new_fo != 0, FALSE)) {
715 /* new_fo is returned by GC_oom_fn(). */
718 if (NULL == hhdr) ABORT("Bad hhdr in GC_register_finalizer_inner");
729 if (EXPECT(0 == hhdr, FALSE)) {
730 /* We won't collect it, hence finalizer wouldn't be run. */
736 new_fo = (struct finalizable_object *)
737 GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
738 if (EXPECT(new_fo != 0, TRUE))
742 new_fo = (struct finalizable_object *)
743 (*oom_fn)(sizeof(struct finalizable_object));
745 /* No enough memory. *ocd and *ofn remains unchanged. */
748 /* It's not likely we'll make it here, but ... */
750 /* Recalculate index since the table may grow and */
751 /* check again that our finalizer is not in the table. */
753 GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
756 new_fo -> fo_hidden_base = GC_HIDE_POINTER(obj);
757 new_fo -> fo_fn = fn;
758 new_fo -> fo_client_data = (ptr_t)cd;
759 new_fo -> fo_object_size = hhdr -> hb_sz;
760 new_fo -> fo_mark_proc = mp;
761 fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
763 GC_fnlz_roots.fo_head[index] = new_fo;
767 GC_API void GC_CALL GC_register_finalizer(void * obj,
768 GC_finalization_proc fn, void * cd,
769 GC_finalization_proc *ofn, void ** ocd)
771 GC_register_finalizer_inner(obj, fn, cd, ofn,
772 ocd, GC_normal_finalize_mark_proc);
775 GC_API void GC_CALL GC_register_finalizer_ignore_self(void * obj,
776 GC_finalization_proc fn, void * cd,
777 GC_finalization_proc *ofn, void ** ocd)
779 GC_register_finalizer_inner(obj, fn, cd, ofn,
780 ocd, GC_ignore_self_finalize_mark_proc);
783 GC_API void GC_CALL GC_register_finalizer_no_order(void * obj,
784 GC_finalization_proc fn, void * cd,
785 GC_finalization_proc *ofn, void ** ocd)
787 GC_register_finalizer_inner(obj, fn, cd, ofn,
788 ocd, GC_null_finalize_mark_proc);
791 static GC_bool need_unreachable_finalization = FALSE;
792 /* Avoid the work if this isn't used. */
794 GC_API void GC_CALL GC_register_finalizer_unreachable(void * obj,
795 GC_finalization_proc fn, void * cd,
796 GC_finalization_proc *ofn, void ** ocd)
798 need_unreachable_finalization = TRUE;
799 GC_ASSERT(GC_java_finalization);
800 GC_register_finalizer_inner(obj, fn, cd, ofn,
801 ocd, GC_unreachable_finalize_mark_proc);
805 STATIC void GC_dump_finalization_links(
806 const struct dl_hashtbl_s *dl_hashtbl)
808 size_t dl_size = dl_hashtbl->log_size == -1 ? 0 :
809 (size_t)1 << dl_hashtbl->log_size;
812 for (i = 0; i < dl_size; i++) {
813 struct disappearing_link *curr_dl;
815 for (curr_dl = dl_hashtbl -> head[i]; curr_dl != 0;
816 curr_dl = dl_next(curr_dl)) {
817 ptr_t real_ptr = GC_REVEAL_POINTER(curr_dl -> dl_hidden_obj);
818 ptr_t real_link = GC_REVEAL_POINTER(curr_dl -> dl_hidden_link);
820 GC_printf("Object: %p, link: %p\n",
821 (void *)real_ptr, (void *)real_link);
826 GC_API void GC_CALL GC_dump_finalization(void)
828 struct finalizable_object * curr_fo;
829 size_t fo_size = log_fo_table_size == -1 ? 0 :
830 (size_t)1 << log_fo_table_size;
833 GC_printf("Disappearing (short) links:\n");
834 GC_dump_finalization_links(&GC_dl_hashtbl);
835 # ifndef GC_LONG_REFS_NOT_NEEDED
836 GC_printf("Disappearing long links:\n");
837 GC_dump_finalization_links(&GC_ll_hashtbl);
839 GC_printf("Finalizers:\n");
840 for (i = 0; i < fo_size; i++) {
841 for (curr_fo = GC_fnlz_roots.fo_head[i];
842 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
843 ptr_t real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
845 GC_printf("Finalizable object: %p\n", (void *)real_ptr);
849 #endif /* !NO_DEBUGGING */
852 STATIC word GC_old_dl_entries = 0; /* for stats printing */
853 # ifndef GC_LONG_REFS_NOT_NEEDED
854 STATIC word GC_old_ll_entries = 0;
856 #endif /* !SMALL_CONFIG */
859 /* Global variables to minimize the level of recursion when a client */
860 /* finalizer allocates memory. */
861 STATIC int GC_finalizer_nested = 0;
862 /* Only the lowest byte is used, the rest is */
863 /* padding for proper global data alignment */
864 /* required for some compilers (like Watcom). */
865 STATIC unsigned GC_finalizer_skipped = 0;
867 /* Checks and updates the level of finalizers recursion. */
868 /* Returns NULL if GC_invoke_finalizers() should not be called by the */
869 /* collector (to minimize the risk of a deep finalizers recursion), */
870 /* otherwise returns a pointer to GC_finalizer_nested. */
871 STATIC unsigned char *GC_check_finalizer_nested(void)
873 unsigned nesting_level = *(unsigned char *)&GC_finalizer_nested;
875 /* We are inside another GC_invoke_finalizers(). */
876 /* Skip some implicitly-called GC_invoke_finalizers() */
877 /* depending on the nesting (recursion) level. */
878 if (++GC_finalizer_skipped < (1U << nesting_level)) return NULL;
879 GC_finalizer_skipped = 0;
881 *(char *)&GC_finalizer_nested = (char)(nesting_level + 1);
882 return (unsigned char *)&GC_finalizer_nested;
886 #define ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr_dl, prev_dl) \
889 size_t dl_size = dl_hashtbl->log_size == -1 ? 0 : \
890 (size_t)1 << dl_hashtbl->log_size; \
891 for (i = 0; i < dl_size; i++) { \
892 struct disappearing_link *prev_dl = NULL; \
893 curr_dl = dl_hashtbl -> head[i]; \
896 #define ITERATE_DL_HASHTBL_END(curr_dl, prev_dl) \
898 curr_dl = dl_next(curr_dl); \
903 #define DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr_dl, prev_dl, next_dl) \
905 next_dl = dl_next(curr_dl); \
906 if (NULL == prev_dl) { \
907 dl_hashtbl -> head[i] = next_dl; \
909 dl_set_next(prev_dl, next_dl); \
911 GC_clear_mark_bit(curr_dl); \
912 dl_hashtbl -> entries--; \
917 GC_INLINE void GC_make_disappearing_links_disappear(
918 struct dl_hashtbl_s* dl_hashtbl)
920 struct disappearing_link *curr, *next;
922 ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
923 ptr_t real_ptr = GC_REVEAL_POINTER(curr -> dl_hidden_obj);
924 ptr_t real_link = GC_REVEAL_POINTER(curr -> dl_hidden_link);
926 if (!GC_is_marked(real_ptr)) {
927 *(word *)real_link = 0;
928 GC_clear_mark_bit(curr);
929 DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr, prev, next);
931 ITERATE_DL_HASHTBL_END(curr, prev)
934 GC_INLINE void GC_remove_dangling_disappearing_links(
935 struct dl_hashtbl_s* dl_hashtbl)
937 struct disappearing_link *curr, *next;
939 ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
940 ptr_t real_link = GC_base(GC_REVEAL_POINTER(curr -> dl_hidden_link));
942 if (NULL != real_link && !GC_is_marked(real_link)) {
943 GC_clear_mark_bit(curr);
944 DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr, prev, next);
946 ITERATE_DL_HASHTBL_END(curr, prev)
949 /* Called with held lock (but the world is running). */
950 /* Cause disappearing links to disappear and unreachable objects to be */
951 /* enqueued for finalization. */
952 GC_INNER void GC_finalize(void)
954 struct finalizable_object * curr_fo, * prev_fo, * next_fo;
957 size_t fo_size = log_fo_table_size == -1 ? 0 :
958 (size_t)1 << log_fo_table_size;
960 # ifndef SMALL_CONFIG
961 /* Save current GC_[dl/ll]_entries value for stats printing */
962 GC_old_dl_entries = GC_dl_hashtbl.entries;
963 # ifndef GC_LONG_REFS_NOT_NEEDED
964 GC_old_ll_entries = GC_ll_hashtbl.entries;
968 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
969 GC_mark_togglerefs();
971 GC_make_disappearing_links_disappear(&GC_dl_hashtbl);
973 /* Mark all objects reachable via chains of 1 or more pointers */
974 /* from finalizable objects. */
975 GC_ASSERT(GC_mark_state == MS_NONE);
976 for (i = 0; i < fo_size; i++) {
977 for (curr_fo = GC_fnlz_roots.fo_head[i];
978 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
979 GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
980 real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
981 if (!GC_is_marked(real_ptr)) {
982 GC_MARKED_FOR_FINALIZATION(real_ptr);
983 GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
984 if (GC_is_marked(real_ptr)) {
985 WARN("Finalization cycle involving %p\n", real_ptr);
990 /* Enqueue for finalization all objects that are still */
992 GC_bytes_finalized = 0;
993 for (i = 0; i < fo_size; i++) {
994 curr_fo = GC_fnlz_roots.fo_head[i];
996 while (curr_fo != 0) {
997 real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
998 if (!GC_is_marked(real_ptr)) {
999 if (!GC_java_finalization) {
1000 GC_set_mark_bit(real_ptr);
1002 /* Delete from hash table */
1003 next_fo = fo_next(curr_fo);
1004 if (NULL == prev_fo) {
1005 GC_fnlz_roots.fo_head[i] = next_fo;
1007 fo_set_next(prev_fo, next_fo);
1010 if (GC_object_finalized_proc)
1011 GC_object_finalized_proc(real_ptr);
1013 /* Add to list of objects awaiting finalization. */
1014 fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
1015 GC_fnlz_roots.finalize_now = curr_fo;
1016 /* unhide object pointer so any future collections will */
1018 curr_fo -> fo_hidden_base =
1019 (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
1020 GC_bytes_finalized +=
1021 curr_fo -> fo_object_size
1022 + sizeof(struct finalizable_object);
1023 GC_ASSERT(GC_is_marked(GC_base(curr_fo)));
1027 curr_fo = fo_next(curr_fo);
1032 if (GC_java_finalization) {
1033 /* make sure we mark everything reachable from objects finalized
1034 using the no_order mark_proc */
1035 for (curr_fo = GC_fnlz_roots.finalize_now;
1036 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
1037 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
1038 if (!GC_is_marked(real_ptr)) {
1039 if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
1040 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
1042 if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
1043 GC_set_mark_bit(real_ptr);
1048 /* now revive finalize-when-unreachable objects reachable from
1049 other finalizable objects */
1050 if (need_unreachable_finalization) {
1051 curr_fo = GC_fnlz_roots.finalize_now;
1052 # if defined(GC_ASSERTIONS) || defined(LINT2)
1053 if (curr_fo != NULL && log_fo_table_size < 0)
1054 ABORT("log_size is negative");
1057 while (curr_fo != NULL) {
1058 next_fo = fo_next(curr_fo);
1059 if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
1060 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
1061 if (!GC_is_marked(real_ptr)) {
1062 GC_set_mark_bit(real_ptr);
1064 if (NULL == prev_fo) {
1065 GC_fnlz_roots.finalize_now = next_fo;
1067 fo_set_next(prev_fo, next_fo);
1069 curr_fo -> fo_hidden_base =
1070 GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
1071 GC_bytes_finalized -=
1072 curr_fo->fo_object_size + sizeof(struct finalizable_object);
1074 i = HASH2(real_ptr, log_fo_table_size);
1075 fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
1077 GC_fnlz_roots.fo_head[i] = curr_fo;
1087 GC_remove_dangling_disappearing_links(&GC_dl_hashtbl);
1088 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
1089 GC_clear_togglerefs();
1091 # ifndef GC_LONG_REFS_NOT_NEEDED
1092 GC_make_disappearing_links_disappear(&GC_ll_hashtbl);
1093 GC_remove_dangling_disappearing_links(&GC_ll_hashtbl);
1096 if (GC_fail_count) {
1097 /* Don't prevent running finalizers if there has been an allocation */
1098 /* failure recently. */
1100 GC_reset_finalizer_nested();
1102 GC_finalizer_nested = 0;
1107 #ifndef JAVA_FINALIZATION_NOT_NEEDED
1109 /* Enqueue all remaining finalizers to be run - Assumes lock is held. */
1110 STATIC void GC_enqueue_all_finalizers(void)
1112 struct finalizable_object * curr_fo, * next_fo;
1117 fo_size = log_fo_table_size == -1 ? 0 : 1 << log_fo_table_size;
1118 GC_bytes_finalized = 0;
1119 for (i = 0; i < fo_size; i++) {
1120 curr_fo = GC_fnlz_roots.fo_head[i];
1121 GC_fnlz_roots.fo_head[i] = NULL;
1122 while (curr_fo != NULL) {
1123 real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
1124 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
1125 GC_set_mark_bit(real_ptr);
1127 next_fo = fo_next(curr_fo);
1129 /* Add to list of objects awaiting finalization. */
1130 fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
1131 GC_fnlz_roots.finalize_now = curr_fo;
1133 /* unhide object pointer so any future collections will */
1135 curr_fo -> fo_hidden_base =
1136 (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
1137 GC_bytes_finalized +=
1138 curr_fo -> fo_object_size + sizeof(struct finalizable_object);
1142 GC_fo_entries = 0; /* all entries deleted from the hash table */
1145 /* Invoke all remaining finalizers that haven't yet been run.
1146 * This is needed for strict compliance with the Java standard,
1147 * which can make the runtime guarantee that all finalizers are run.
1148 * Unfortunately, the Java standard implies we have to keep running
1149 * finalizers until there are no more left, a potential infinite loop.
1151 * Note that this is even more dangerous than the usual Java
1152 * finalizers, in that objects reachable from static variables
1153 * may have been finalized when these finalizers are run.
1154 * Finalizers run at this point must be prepared to deal with a
1155 * mostly broken world.
1156 * This routine is externally callable, so is called without
1157 * the allocation lock.
1159 GC_API void GC_CALL GC_finalize_all(void)
1164 while (GC_fo_entries > 0) {
1165 GC_enqueue_all_finalizers();
1167 GC_invoke_finalizers();
1168 /* Running the finalizers in this thread is arguably not a good */
1169 /* idea when we should be notifying another thread to run them. */
1170 /* But otherwise we don't have a great way to wait for them to */
1177 #endif /* !JAVA_FINALIZATION_NOT_NEEDED */
1179 /* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
1180 /* finalizers can only be called from some kind of "safe state" and */
1181 /* getting into that safe state is expensive.) */
1182 GC_API int GC_CALL GC_should_invoke_finalizers(void)
1184 return GC_fnlz_roots.finalize_now != NULL;
1187 /* Invoke finalizers for all objects that are ready to be finalized. */
1188 /* Should be called without allocation lock. */
1189 GC_API int GC_CALL GC_invoke_finalizers(void)
1192 word bytes_freed_before = 0; /* initialized to prevent warning. */
1195 while (GC_fnlz_roots.finalize_now != NULL) {
1196 struct finalizable_object * curr_fo;
1202 bytes_freed_before = GC_bytes_freed;
1203 /* Don't do this outside, since we need the lock. */
1205 curr_fo = GC_fnlz_roots.finalize_now;
1207 if (curr_fo != 0) GC_fnlz_roots.finalize_now = fo_next(curr_fo);
1209 if (curr_fo == 0) break;
1211 GC_fnlz_roots.finalize_now = fo_next(curr_fo);
1213 fo_set_next(curr_fo, 0);
1214 (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
1215 curr_fo -> fo_client_data);
1216 curr_fo -> fo_client_data = 0;
1218 /* Explicit freeing of curr_fo is probably a bad idea. */
1219 /* It throws off accounting if nearly all objects are */
1220 /* finalizable. Otherwise it should not matter. */
1222 /* bytes_freed_before is initialized whenever count != 0 */
1223 if (count != 0 && bytes_freed_before != GC_bytes_freed) {
1225 GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
1231 static word last_finalizer_notification = 0;
1233 GC_INNER void GC_notify_or_invoke_finalizers(void)
1235 GC_finalizer_notifier_proc notifier_fn = 0;
1236 # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1237 static word last_back_trace_gc_no = 1; /* Skip first one. */
1241 # if defined(THREADS) && !defined(KEEP_BACK_PTRS) \
1242 && !defined(MAKE_BACK_GRAPH)
1243 /* Quick check (while unlocked) for an empty finalization queue. */
1244 if (NULL == GC_fnlz_roots.finalize_now) return;
1248 /* This is a convenient place to generate backtraces if appropriate, */
1249 /* since that code is not callable with the allocation lock. */
1250 # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1251 if (GC_gc_no > last_back_trace_gc_no) {
1252 # ifdef KEEP_BACK_PTRS
1254 /* Stops when GC_gc_no wraps; that's OK. */
1255 last_back_trace_gc_no = (word)(-1); /* disable others. */
1256 for (i = 0; i < GC_backtraces; ++i) {
1257 /* FIXME: This tolerates concurrent heap mutation, */
1258 /* which may cause occasional mysterious results. */
1259 /* We need to release the GC lock, since GC_print_callers */
1260 /* acquires it. It probably shouldn't. */
1262 GC_generate_random_backtrace_no_gc();
1265 last_back_trace_gc_no = GC_gc_no;
1267 # ifdef MAKE_BACK_GRAPH
1268 if (GC_print_back_height) {
1270 GC_print_back_graph_stats();
1276 if (NULL == GC_fnlz_roots.finalize_now) {
1281 if (!GC_finalize_on_demand) {
1282 unsigned char *pnested = GC_check_finalizer_nested();
1284 /* Skip GC_invoke_finalizers() if nested */
1285 if (pnested != NULL) {
1286 (void) GC_invoke_finalizers();
1287 *pnested = 0; /* Reset since no more finalizers. */
1289 GC_ASSERT(NULL == GC_fnlz_roots.finalize_now);
1290 # endif /* Otherwise GC can run concurrently and add more */
1295 /* These variables require synchronization to avoid data races. */
1296 if (last_finalizer_notification != GC_gc_no) {
1297 last_finalizer_notification = GC_gc_no;
1298 notifier_fn = GC_finalizer_notifier;
1301 if (notifier_fn != 0)
1302 (*notifier_fn)(); /* Invoke the notifier */
1305 #ifndef SMALL_CONFIG
1306 # ifndef GC_LONG_REFS_NOT_NEEDED
1307 # define IF_LONG_REFS_PRESENT_ELSE(x,y) (x)
1309 # define IF_LONG_REFS_PRESENT_ELSE(x,y) (y)
1312 GC_INNER void GC_print_finalization_stats(void)
1314 struct finalizable_object *fo;
1315 unsigned long ready = 0;
1317 GC_log_printf("%lu finalization entries;"
1318 " %lu/%lu short/long disappearing links alive\n",
1319 (unsigned long)GC_fo_entries,
1320 (unsigned long)GC_dl_hashtbl.entries,
1321 (unsigned long)IF_LONG_REFS_PRESENT_ELSE(
1322 GC_ll_hashtbl.entries, 0));
1324 for (fo = GC_fnlz_roots.finalize_now; fo != NULL; fo = fo_next(fo))
1326 GC_log_printf("%lu finalization-ready objects;"
1327 " %ld/%ld short/long links cleared\n",
1329 (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries,
1330 (long)IF_LONG_REFS_PRESENT_ELSE(
1331 GC_old_ll_entries - GC_ll_hashtbl.entries, 0));
1333 #endif /* !SMALL_CONFIG */
1335 #endif /* !GC_NO_FINALIZATION */