2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (C) 2007 Free Software Foundation, Inc
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_pmark.h"
19 #ifndef GC_NO_FINALIZATION
21 /* Type of mark procedure used for marking from finalizable object. */
22 /* This procedure normally does not mark the object, only its */
24 typedef void (* finalization_mark_proc)(ptr_t /* finalizable_obj_ptr */);
26 #define HASH3(addr,size,log_size) \
27 ((((word)(addr) >> 3) ^ ((word)(addr) >> (3 + (log_size)))) \
29 #define HASH2(addr,log_size) HASH3(addr, (word)1 << (log_size), log_size)
31 struct hash_chain_entry {
33 struct hash_chain_entry * next;
36 struct disappearing_link {
37 struct hash_chain_entry prolog;
38 # define dl_hidden_link prolog.hidden_key
39 /* Field to be cleared. */
40 # define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
41 # define dl_set_next(x, y) \
42 (void)((x)->prolog.next = (struct hash_chain_entry *)(y))
43 word dl_hidden_obj; /* Pointer to object base */
47 struct disappearing_link **head;
52 STATIC struct dl_hashtbl_s GC_dl_hashtbl = {
53 /* head */ NULL, /* log_size */ -1, /* entries */ 0 };
54 #ifndef GC_LONG_REFS_NOT_NEEDED
55 STATIC struct dl_hashtbl_s GC_ll_hashtbl = { NULL, -1, 0 };
58 struct finalizable_object {
59 struct hash_chain_entry prolog;
60 # define fo_hidden_base prolog.hidden_key
61 /* Pointer to object base. */
62 /* No longer hidden once object */
63 /* is on finalize_now queue. */
64 # define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
65 # define fo_set_next(x,y) ((x)->prolog.next = (struct hash_chain_entry *)(y))
66 GC_finalization_proc fo_fn; /* Finalizer. */
68 word fo_object_size; /* In bytes. */
69 finalization_mark_proc fo_mark_proc; /* Mark-through procedure */
72 static signed_word log_fo_table_size = -1;
75 struct finalizable_object **fo_head;
76 /* List of objects that should be finalized now: */
77 struct finalizable_object *finalize_now;
78 } GC_fnlz_roots = { NULL, NULL };
80 GC_API void GC_CALL GC_push_finalizer_structures(void)
82 GC_ASSERT((word)&GC_dl_hashtbl.head % sizeof(word) == 0);
83 GC_ASSERT((word)&GC_fnlz_roots % sizeof(word) == 0);
84 # ifndef GC_LONG_REFS_NOT_NEEDED
85 GC_ASSERT((word)&GC_ll_hashtbl.head % sizeof(word) == 0);
86 GC_PUSH_ALL_SYM(GC_ll_hashtbl.head);
88 GC_PUSH_ALL_SYM(GC_dl_hashtbl.head);
89 GC_PUSH_ALL_SYM(GC_fnlz_roots);
92 /* Double the size of a hash table. *size_ptr is the log of its current */
93 /* size. May be a no-op. */
94 /* *table is a pointer to an array of hash headers. If we succeed, we */
95 /* update both *table and *log_size_ptr. Lock is held. */
96 STATIC void GC_grow_table(struct hash_chain_entry ***table,
97 signed_word *log_size_ptr)
100 register struct hash_chain_entry *p;
101 signed_word log_old_size = *log_size_ptr;
102 signed_word log_new_size = log_old_size + 1;
103 word old_size = log_old_size == -1 ? 0 : (word)1 << log_old_size;
104 word new_size = (word)1 << log_new_size;
105 /* FIXME: Power of 2 size often gets rounded up to one more page. */
106 struct hash_chain_entry **new_table;
108 GC_ASSERT(I_HOLD_LOCK());
109 new_table = (struct hash_chain_entry **)
110 GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
111 (size_t)new_size * sizeof(struct hash_chain_entry *),
113 if (new_table == 0) {
115 ABORT("Insufficient space for initial table allocation");
120 for (i = 0; i < old_size; i++) {
123 ptr_t real_key = GC_REVEAL_POINTER(p -> hidden_key);
124 struct hash_chain_entry *next = p -> next;
125 size_t new_hash = HASH3(real_key, new_size, log_new_size);
127 p -> next = new_table[new_hash];
128 new_table[new_hash] = p;
132 *log_size_ptr = log_new_size;
136 GC_API int GC_CALL GC_register_disappearing_link(void * * link)
140 base = (ptr_t)GC_base(link);
142 ABORT("Bad arg to GC_register_disappearing_link");
143 return(GC_general_register_disappearing_link(link, base));
146 STATIC int GC_register_disappearing_link_inner(
147 struct dl_hashtbl_s *dl_hashtbl, void **link,
148 const void *obj, const char *tbl_log_name)
150 struct disappearing_link *curr_dl;
152 struct disappearing_link * new_dl;
156 GC_ASSERT(obj != NULL && GC_base_C(obj) == obj);
157 if (dl_hashtbl -> log_size == -1
158 || dl_hashtbl -> entries > ((word)1 << dl_hashtbl -> log_size)) {
159 GC_grow_table((struct hash_chain_entry ***)&dl_hashtbl -> head,
160 &dl_hashtbl -> log_size);
161 GC_ASSERT(dl_hashtbl->log_size >= 0);
162 GC_COND_LOG_PRINTF("Grew %s table to %u entries\n", tbl_log_name,
163 1 << (unsigned)dl_hashtbl -> log_size);
165 index = HASH2(link, dl_hashtbl -> log_size);
166 for (curr_dl = dl_hashtbl -> head[index]; curr_dl != 0;
167 curr_dl = dl_next(curr_dl)) {
168 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
169 curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
174 new_dl = (struct disappearing_link *)
175 GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
177 GC_oom_func oom_fn = GC_oom_fn;
179 new_dl = (struct disappearing_link *)
180 (*oom_fn)(sizeof(struct disappearing_link));
184 /* It's not likely we'll make it here, but ... */
186 /* Recalculate index since the table may grow. */
187 index = HASH2(link, dl_hashtbl -> log_size);
188 /* Check again that our disappearing link not in the table. */
189 for (curr_dl = dl_hashtbl -> head[index]; curr_dl != 0;
190 curr_dl = dl_next(curr_dl)) {
191 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
192 curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
194 # ifndef DBG_HDRS_ALL
195 /* Free unused new_dl returned by GC_oom_fn() */
196 GC_free((void *)new_dl);
202 new_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
203 new_dl -> dl_hidden_link = GC_HIDE_POINTER(link);
204 dl_set_next(new_dl, dl_hashtbl -> head[index]);
205 dl_hashtbl -> head[index] = new_dl;
206 dl_hashtbl -> entries++;
211 GC_API int GC_CALL GC_general_register_disappearing_link(void * * link,
214 if (((word)link & (ALIGNMENT-1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
215 ABORT("Bad arg to GC_general_register_disappearing_link");
216 return GC_register_disappearing_link_inner(&GC_dl_hashtbl, link, obj,
221 # define FREE_DL_ENTRY(curr_dl) dl_set_next(curr_dl, NULL)
223 # define FREE_DL_ENTRY(curr_dl) GC_free(curr_dl)
226 /* Unregisters given link and returns the link entry to free. */
227 /* Assume the lock is held. */
228 GC_INLINE struct disappearing_link *GC_unregister_disappearing_link_inner(
229 struct dl_hashtbl_s *dl_hashtbl, void **link)
231 struct disappearing_link *curr_dl;
232 struct disappearing_link *prev_dl = NULL;
235 if (dl_hashtbl->log_size == -1)
236 return NULL; /* prevent integer shift by a negative amount */
238 index = HASH2(link, dl_hashtbl->log_size);
239 for (curr_dl = dl_hashtbl -> head[index]; curr_dl;
240 curr_dl = dl_next(curr_dl)) {
241 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
242 /* Remove found entry from the table. */
243 if (NULL == prev_dl) {
244 dl_hashtbl -> head[index] = dl_next(curr_dl);
246 dl_set_next(prev_dl, dl_next(curr_dl));
248 dl_hashtbl -> entries--;
256 GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
258 struct disappearing_link *curr_dl;
261 if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
264 curr_dl = GC_unregister_disappearing_link_inner(&GC_dl_hashtbl, link);
266 if (NULL == curr_dl) return 0;
267 FREE_DL_ENTRY(curr_dl);
271 /* Toggle-ref support. */
272 #ifndef GC_TOGGLE_REFS_NOT_NEEDED
274 /* Lowest bit is used to distinguish between choices. */
276 GC_hidden_pointer weak_ref;
279 STATIC GC_toggleref_func GC_toggleref_callback = 0;
280 STATIC GCToggleRef *GC_toggleref_arr = NULL;
281 STATIC int GC_toggleref_array_size = 0;
282 STATIC int GC_toggleref_array_capacity = 0;
284 GC_INNER void GC_process_togglerefs(void)
289 GC_ASSERT(I_HOLD_LOCK());
290 for (i = 0; i < GC_toggleref_array_size; ++i) {
291 GCToggleRef r = GC_toggleref_arr[i];
292 void *obj = r.strong_ref;
294 if (((word)obj & 1) != 0) {
295 obj = GC_REVEAL_POINTER(r.weak_ref);
300 switch (GC_toggleref_callback(obj)) {
301 case GC_TOGGLE_REF_DROP:
303 case GC_TOGGLE_REF_STRONG:
304 GC_toggleref_arr[new_size++].strong_ref = obj;
306 case GC_TOGGLE_REF_WEAK:
307 GC_toggleref_arr[new_size++].weak_ref = GC_HIDE_POINTER(obj);
310 ABORT("Bad toggle-ref status returned by callback");
314 if (new_size < GC_toggleref_array_size) {
315 BZERO(&GC_toggleref_arr[new_size],
316 (GC_toggleref_array_size - new_size) * sizeof(GCToggleRef));
317 GC_toggleref_array_size = new_size;
321 STATIC void GC_normal_finalize_mark_proc(ptr_t);
323 static void push_and_mark_object(void *p)
325 GC_normal_finalize_mark_proc(p);
326 while (!GC_mark_stack_empty()) {
327 MARK_FROM_MARK_STACK();
330 if (GC_mark_state != MS_NONE) {
331 while (!GC_mark_some(0)) {
337 STATIC void GC_mark_togglerefs(void)
340 if (NULL == GC_toggleref_arr)
343 /* TODO: Hide GC_toggleref_arr to avoid its marking from roots. */
344 GC_set_mark_bit(GC_toggleref_arr);
345 for (i = 0; i < GC_toggleref_array_size; ++i) {
346 void *obj = GC_toggleref_arr[i].strong_ref;
347 if (obj != NULL && ((word)obj & 1) == 0) {
348 push_and_mark_object(obj);
353 STATIC void GC_clear_togglerefs(void)
356 for (i = 0; i < GC_toggleref_array_size; ++i) {
357 if ((GC_toggleref_arr[i].weak_ref & 1) != 0) {
358 if (!GC_is_marked(GC_REVEAL_POINTER(GC_toggleref_arr[i].weak_ref))) {
359 GC_toggleref_arr[i].weak_ref = 0;
361 /* No need to copy, BDWGC is a non-moving collector. */
367 GC_API void GC_CALL GC_set_toggleref_func(GC_toggleref_func fn)
372 GC_toggleref_callback = fn;
376 GC_API GC_toggleref_func GC_CALL GC_get_toggleref_func(void)
378 GC_toggleref_func fn;
382 fn = GC_toggleref_callback;
387 static GC_bool ensure_toggleref_capacity(int capacity_inc)
389 GC_ASSERT(capacity_inc >= 0);
390 if (NULL == GC_toggleref_arr) {
391 GC_toggleref_array_capacity = 32; /* initial capacity */
392 GC_toggleref_arr = GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
393 GC_toggleref_array_capacity * sizeof(GCToggleRef),
395 if (NULL == GC_toggleref_arr)
398 if ((unsigned)GC_toggleref_array_size + (unsigned)capacity_inc
399 >= (unsigned)GC_toggleref_array_capacity) {
400 GCToggleRef *new_array;
401 while ((unsigned)GC_toggleref_array_capacity
402 < (unsigned)GC_toggleref_array_size + (unsigned)capacity_inc) {
403 GC_toggleref_array_capacity *= 2;
404 if (GC_toggleref_array_capacity < 0) /* overflow */
408 new_array = GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
409 GC_toggleref_array_capacity * sizeof(GCToggleRef),
411 if (NULL == new_array)
413 BCOPY(GC_toggleref_arr, new_array,
414 GC_toggleref_array_size * sizeof(GCToggleRef));
415 GC_INTERNAL_FREE(GC_toggleref_arr);
416 GC_toggleref_arr = new_array;
421 GC_API int GC_CALL GC_toggleref_add(void *obj, int is_strong_ref)
423 int res = GC_SUCCESS;
426 GC_ASSERT(obj != NULL);
428 if (GC_toggleref_callback != 0) {
429 if (!ensure_toggleref_capacity(1)) {
432 GC_toggleref_arr[GC_toggleref_array_size++].strong_ref =
433 is_strong_ref ? obj : (void *)GC_HIDE_POINTER(obj);
439 #endif /* !GC_TOGGLE_REFS_NOT_NEEDED */
441 /* Finalizer callback support. */
442 STATIC GC_await_finalize_proc GC_object_finalized_proc = 0;
444 GC_API void GC_CALL GC_set_await_finalize_proc(GC_await_finalize_proc fn)
449 GC_object_finalized_proc = fn;
453 GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void)
455 GC_await_finalize_proc fn;
459 fn = GC_object_finalized_proc;
464 #ifndef GC_LONG_REFS_NOT_NEEDED
465 GC_API int GC_CALL GC_register_long_link(void * * link, const void * obj)
467 if (((word)link & (ALIGNMENT-1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
468 ABORT("Bad arg to GC_register_long_link");
469 return GC_register_disappearing_link_inner(&GC_ll_hashtbl, link, obj,
473 GC_API int GC_CALL GC_unregister_long_link(void * * link)
475 struct disappearing_link *curr_dl;
478 if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
481 curr_dl = GC_unregister_disappearing_link_inner(&GC_ll_hashtbl, link);
483 if (NULL == curr_dl) return 0;
484 FREE_DL_ENTRY(curr_dl);
487 #endif /* !GC_LONG_REFS_NOT_NEEDED */
489 #ifndef GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED
490 /* Moves a link. Assume the lock is held. */
491 STATIC int GC_move_disappearing_link_inner(
492 struct dl_hashtbl_s *dl_hashtbl,
493 void **link, void **new_link)
495 struct disappearing_link *curr_dl, *prev_dl, *new_dl;
496 size_t curr_index, new_index;
497 word curr_hidden_link;
498 word new_hidden_link;
500 if (dl_hashtbl->log_size == -1)
501 return GC_NOT_FOUND; /* prevent integer shift by a negative amount */
503 /* Find current link. */
504 curr_index = HASH2(link, dl_hashtbl -> log_size);
505 curr_hidden_link = GC_HIDE_POINTER(link);
507 for (curr_dl = dl_hashtbl -> head[curr_index]; curr_dl;
508 curr_dl = dl_next(curr_dl)) {
509 if (curr_dl -> dl_hidden_link == curr_hidden_link)
514 if (NULL == curr_dl) {
518 if (link == new_link) {
519 return GC_SUCCESS; /* Nothing to do. */
522 /* link found; now check new_link not present. */
523 new_index = HASH2(new_link, dl_hashtbl -> log_size);
524 new_hidden_link = GC_HIDE_POINTER(new_link);
525 for (new_dl = dl_hashtbl -> head[new_index]; new_dl;
526 new_dl = dl_next(new_dl)) {
527 if (new_dl -> dl_hidden_link == new_hidden_link) {
528 /* Target already registered; bail. */
533 /* Remove from old, add to new, update link. */
534 if (NULL == prev_dl) {
535 dl_hashtbl -> head[curr_index] = dl_next(curr_dl);
537 dl_set_next(prev_dl, dl_next(curr_dl));
539 curr_dl -> dl_hidden_link = new_hidden_link;
540 dl_set_next(curr_dl, dl_hashtbl -> head[new_index]);
541 dl_hashtbl -> head[new_index] = curr_dl;
545 GC_API int GC_CALL GC_move_disappearing_link(void **link, void **new_link)
550 if (((word)new_link & (ALIGNMENT-1)) != 0
551 || !NONNULL_ARG_NOT_NULL(new_link))
552 ABORT("Bad new_link arg to GC_move_disappearing_link");
553 if (((word)link & (ALIGNMENT-1)) != 0)
554 return GC_NOT_FOUND; /* Nothing to do. */
557 result = GC_move_disappearing_link_inner(&GC_dl_hashtbl, link, new_link);
562 # ifndef GC_LONG_REFS_NOT_NEEDED
563 GC_API int GC_CALL GC_move_long_link(void **link, void **new_link)
568 if (((word)new_link & (ALIGNMENT-1)) != 0
569 || !NONNULL_ARG_NOT_NULL(new_link))
570 ABORT("Bad new_link arg to GC_move_long_link");
571 if (((word)link & (ALIGNMENT-1)) != 0)
572 return GC_NOT_FOUND; /* Nothing to do. */
575 result = GC_move_disappearing_link_inner(&GC_ll_hashtbl, link, new_link);
579 # endif /* !GC_LONG_REFS_NOT_NEEDED */
580 #endif /* !GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED */
582 /* Possible finalization_marker procedures. Note that mark stack */
583 /* overflow is handled by the caller, and is not a disaster. */
584 STATIC void GC_normal_finalize_mark_proc(ptr_t p)
588 PUSH_OBJ(p, hhdr, GC_mark_stack_top,
589 &(GC_mark_stack[GC_mark_stack_size]));
592 /* This only pays very partial attention to the mark descriptor. */
593 /* It does the right thing for normal and atomic objects, and treats */
594 /* most others as normal. */
595 STATIC void GC_ignore_self_finalize_mark_proc(ptr_t p)
598 word descr = hhdr -> hb_descr;
601 ptr_t target_limit = p + hhdr -> hb_sz - 1;
603 if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
604 scan_limit = p + descr - sizeof(word);
606 scan_limit = target_limit + 1 - sizeof(word);
608 for (q = p; (word)q <= (word)scan_limit; q += ALIGNMENT) {
611 if (r < (word)p || r > (word)target_limit) {
612 GC_PUSH_ONE_HEAP(r, q, GC_mark_stack_top);
617 STATIC void GC_null_finalize_mark_proc(ptr_t p GC_ATTR_UNUSED) {}
619 /* Possible finalization_marker procedures. Note that mark stack */
620 /* overflow is handled by the caller, and is not a disaster. */
622 /* GC_unreachable_finalize_mark_proc is an alias for normal marking, */
623 /* but it is explicitly tested for, and triggers different */
624 /* behavior. Objects registered in this way are not finalized */
625 /* if they are reachable by other finalizable objects, even if those */
626 /* other objects specify no ordering. */
627 STATIC void GC_unreachable_finalize_mark_proc(ptr_t p)
629 GC_normal_finalize_mark_proc(p);
632 /* Register a finalization function. See gc.h for details. */
633 /* The last parameter is a procedure that determines */
634 /* marking for finalization ordering. Any objects marked */
635 /* by that procedure will be guaranteed to not have been */
636 /* finalized when this finalizer is invoked. */
637 STATIC void GC_register_finalizer_inner(void * obj,
638 GC_finalization_proc fn, void *cd,
639 GC_finalization_proc *ofn, void **ocd,
640 finalization_mark_proc mp)
642 struct finalizable_object * curr_fo;
644 struct finalizable_object *new_fo = 0;
645 hdr *hhdr = NULL; /* initialized to prevent warning. */
649 if (log_fo_table_size == -1
650 || GC_fo_entries > ((word)1 << log_fo_table_size)) {
651 GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
653 GC_ASSERT(log_fo_table_size >= 0);
654 GC_COND_LOG_PRINTF("Grew fo table to %u entries\n",
655 1 << (unsigned)log_fo_table_size);
657 /* in the THREADS case we hold allocation lock. */
659 struct finalizable_object *prev_fo = NULL;
662 index = HASH2(obj, log_fo_table_size);
663 curr_fo = GC_fnlz_roots.fo_head[index];
664 while (curr_fo != 0) {
665 GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
666 if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(obj)) {
667 /* Interruption by a signal in the middle of this */
668 /* should be safe. The client may see only *ocd */
669 /* updated, but we'll declare that to be his problem. */
670 if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
671 if (ofn) *ofn = curr_fo -> fo_fn;
672 /* Delete the structure for obj. */
674 GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
676 fo_set_next(prev_fo, fo_next(curr_fo));
680 /* May not happen if we get a signal. But a high */
681 /* estimate will only make the table larger than */
683 # if !defined(THREADS) && !defined(DBG_HDRS_ALL)
684 GC_free((void *)curr_fo);
687 curr_fo -> fo_fn = fn;
688 curr_fo -> fo_client_data = (ptr_t)cd;
689 curr_fo -> fo_mark_proc = mp;
690 /* Reinsert it. We deleted it first to maintain */
691 /* consistency in the event of a signal. */
693 GC_fnlz_roots.fo_head[index] = curr_fo;
695 fo_set_next(prev_fo, curr_fo);
699 # ifndef DBG_HDRS_ALL
700 if (EXPECT(new_fo != 0, FALSE)) {
701 /* Free unused new_fo returned by GC_oom_fn() */
702 GC_free((void *)new_fo);
708 curr_fo = fo_next(curr_fo);
710 if (EXPECT(new_fo != 0, FALSE)) {
711 /* new_fo is returned by GC_oom_fn(). */
714 if (NULL == hhdr) ABORT("Bad hhdr in GC_register_finalizer_inner");
725 if (EXPECT(0 == hhdr, FALSE)) {
726 /* We won't collect it, hence finalizer wouldn't be run. */
732 new_fo = (struct finalizable_object *)
733 GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
734 if (EXPECT(new_fo != 0, TRUE))
738 new_fo = (struct finalizable_object *)
739 (*oom_fn)(sizeof(struct finalizable_object));
741 /* No enough memory. *ocd and *ofn remains unchanged. */
744 /* It's not likely we'll make it here, but ... */
746 /* Recalculate index since the table may grow and */
747 /* check again that our finalizer is not in the table. */
749 GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
752 new_fo -> fo_hidden_base = GC_HIDE_POINTER(obj);
753 new_fo -> fo_fn = fn;
754 new_fo -> fo_client_data = (ptr_t)cd;
755 new_fo -> fo_object_size = hhdr -> hb_sz;
756 new_fo -> fo_mark_proc = mp;
757 fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
759 GC_fnlz_roots.fo_head[index] = new_fo;
763 GC_API void GC_CALL GC_register_finalizer(void * obj,
764 GC_finalization_proc fn, void * cd,
765 GC_finalization_proc *ofn, void ** ocd)
767 GC_register_finalizer_inner(obj, fn, cd, ofn,
768 ocd, GC_normal_finalize_mark_proc);
771 GC_API void GC_CALL GC_register_finalizer_ignore_self(void * obj,
772 GC_finalization_proc fn, void * cd,
773 GC_finalization_proc *ofn, void ** ocd)
775 GC_register_finalizer_inner(obj, fn, cd, ofn,
776 ocd, GC_ignore_self_finalize_mark_proc);
779 GC_API void GC_CALL GC_register_finalizer_no_order(void * obj,
780 GC_finalization_proc fn, void * cd,
781 GC_finalization_proc *ofn, void ** ocd)
783 GC_register_finalizer_inner(obj, fn, cd, ofn,
784 ocd, GC_null_finalize_mark_proc);
787 static GC_bool need_unreachable_finalization = FALSE;
788 /* Avoid the work if this isn't used. */
790 GC_API void GC_CALL GC_register_finalizer_unreachable(void * obj,
791 GC_finalization_proc fn, void * cd,
792 GC_finalization_proc *ofn, void ** ocd)
794 need_unreachable_finalization = TRUE;
795 GC_ASSERT(GC_java_finalization);
796 GC_register_finalizer_inner(obj, fn, cd, ofn,
797 ocd, GC_unreachable_finalize_mark_proc);
801 STATIC void GC_dump_finalization_links(
802 const struct dl_hashtbl_s *dl_hashtbl)
804 size_t dl_size = dl_hashtbl->log_size == -1 ? 0 :
805 (size_t)1 << dl_hashtbl->log_size;
808 for (i = 0; i < dl_size; i++) {
809 struct disappearing_link *curr_dl;
811 for (curr_dl = dl_hashtbl -> head[i]; curr_dl != 0;
812 curr_dl = dl_next(curr_dl)) {
813 ptr_t real_ptr = GC_REVEAL_POINTER(curr_dl -> dl_hidden_obj);
814 ptr_t real_link = GC_REVEAL_POINTER(curr_dl -> dl_hidden_link);
816 GC_printf("Object: %p, link: %p\n",
817 (void *)real_ptr, (void *)real_link);
822 GC_API void GC_CALL GC_dump_finalization(void)
824 struct finalizable_object * curr_fo;
825 size_t fo_size = log_fo_table_size == -1 ? 0 :
826 (size_t)1 << log_fo_table_size;
829 GC_printf("Disappearing (short) links:\n");
830 GC_dump_finalization_links(&GC_dl_hashtbl);
831 # ifndef GC_LONG_REFS_NOT_NEEDED
832 GC_printf("Disappearing long links:\n");
833 GC_dump_finalization_links(&GC_ll_hashtbl);
835 GC_printf("Finalizers:\n");
836 for (i = 0; i < fo_size; i++) {
837 for (curr_fo = GC_fnlz_roots.fo_head[i];
838 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
839 ptr_t real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
841 GC_printf("Finalizable object: %p\n", (void *)real_ptr);
845 #endif /* !NO_DEBUGGING */
848 STATIC word GC_old_dl_entries = 0; /* for stats printing */
849 # ifndef GC_LONG_REFS_NOT_NEEDED
850 STATIC word GC_old_ll_entries = 0;
852 #endif /* !SMALL_CONFIG */
855 /* Global variables to minimize the level of recursion when a client */
856 /* finalizer allocates memory. */
857 STATIC int GC_finalizer_nested = 0;
858 /* Only the lowest byte is used, the rest is */
859 /* padding for proper global data alignment */
860 /* required for some compilers (like Watcom). */
861 STATIC unsigned GC_finalizer_skipped = 0;
863 /* Checks and updates the level of finalizers recursion. */
864 /* Returns NULL if GC_invoke_finalizers() should not be called by the */
865 /* collector (to minimize the risk of a deep finalizers recursion), */
866 /* otherwise returns a pointer to GC_finalizer_nested. */
867 STATIC unsigned char *GC_check_finalizer_nested(void)
869 unsigned nesting_level = *(unsigned char *)&GC_finalizer_nested;
871 /* We are inside another GC_invoke_finalizers(). */
872 /* Skip some implicitly-called GC_invoke_finalizers() */
873 /* depending on the nesting (recursion) level. */
874 if (++GC_finalizer_skipped < (1U << nesting_level)) return NULL;
875 GC_finalizer_skipped = 0;
877 *(char *)&GC_finalizer_nested = (char)(nesting_level + 1);
878 return (unsigned char *)&GC_finalizer_nested;
882 #define ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr_dl, prev_dl) \
885 size_t dl_size = dl_hashtbl->log_size == -1 ? 0 : \
886 (size_t)1 << dl_hashtbl->log_size; \
887 for (i = 0; i < dl_size; i++) { \
888 struct disappearing_link *prev_dl = NULL; \
889 curr_dl = dl_hashtbl -> head[i]; \
892 #define ITERATE_DL_HASHTBL_END(curr_dl, prev_dl) \
894 curr_dl = dl_next(curr_dl); \
899 #define DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr_dl, prev_dl, next_dl) \
901 next_dl = dl_next(curr_dl); \
902 if (NULL == prev_dl) { \
903 dl_hashtbl -> head[i] = next_dl; \
905 dl_set_next(prev_dl, next_dl); \
907 GC_clear_mark_bit(curr_dl); \
908 dl_hashtbl -> entries--; \
913 GC_INLINE void GC_make_disappearing_links_disappear(
914 struct dl_hashtbl_s* dl_hashtbl)
916 struct disappearing_link *curr, *next;
918 ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
919 ptr_t real_ptr = GC_REVEAL_POINTER(curr -> dl_hidden_obj);
920 ptr_t real_link = GC_REVEAL_POINTER(curr -> dl_hidden_link);
922 if (!GC_is_marked(real_ptr)) {
923 *(word *)real_link = 0;
924 GC_clear_mark_bit(curr);
925 DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr, prev, next);
927 ITERATE_DL_HASHTBL_END(curr, prev)
930 GC_INLINE void GC_remove_dangling_disappearing_links(
931 struct dl_hashtbl_s* dl_hashtbl)
933 struct disappearing_link *curr, *next;
935 ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
936 ptr_t real_link = GC_base(GC_REVEAL_POINTER(curr -> dl_hidden_link));
938 if (NULL != real_link && !GC_is_marked(real_link)) {
939 GC_clear_mark_bit(curr);
940 DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr, prev, next);
942 ITERATE_DL_HASHTBL_END(curr, prev)
945 /* Called with held lock (but the world is running). */
946 /* Cause disappearing links to disappear and unreachable objects to be */
947 /* enqueued for finalization. */
948 GC_INNER void GC_finalize(void)
950 struct finalizable_object * curr_fo, * prev_fo, * next_fo;
953 size_t fo_size = log_fo_table_size == -1 ? 0 :
954 (size_t)1 << log_fo_table_size;
956 # ifndef SMALL_CONFIG
957 /* Save current GC_[dl/ll]_entries value for stats printing */
958 GC_old_dl_entries = GC_dl_hashtbl.entries;
959 # ifndef GC_LONG_REFS_NOT_NEEDED
960 GC_old_ll_entries = GC_ll_hashtbl.entries;
964 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
965 GC_mark_togglerefs();
967 GC_make_disappearing_links_disappear(&GC_dl_hashtbl);
969 /* Mark all objects reachable via chains of 1 or more pointers */
970 /* from finalizable objects. */
971 GC_ASSERT(GC_mark_state == MS_NONE);
972 for (i = 0; i < fo_size; i++) {
973 for (curr_fo = GC_fnlz_roots.fo_head[i];
974 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
975 GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
976 real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
977 if (!GC_is_marked(real_ptr)) {
978 GC_MARKED_FOR_FINALIZATION(real_ptr);
979 GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
980 if (GC_is_marked(real_ptr)) {
981 WARN("Finalization cycle involving %p\n", real_ptr);
986 /* Enqueue for finalization all objects that are still */
988 GC_bytes_finalized = 0;
989 for (i = 0; i < fo_size; i++) {
990 curr_fo = GC_fnlz_roots.fo_head[i];
992 while (curr_fo != 0) {
993 real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
994 if (!GC_is_marked(real_ptr)) {
995 if (!GC_java_finalization) {
996 GC_set_mark_bit(real_ptr);
998 /* Delete from hash table */
999 next_fo = fo_next(curr_fo);
1000 if (NULL == prev_fo) {
1001 GC_fnlz_roots.fo_head[i] = next_fo;
1003 fo_set_next(prev_fo, next_fo);
1006 if (GC_object_finalized_proc)
1007 GC_object_finalized_proc(real_ptr);
1009 /* Add to list of objects awaiting finalization. */
1010 fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
1011 GC_fnlz_roots.finalize_now = curr_fo;
1012 /* unhide object pointer so any future collections will */
1014 curr_fo -> fo_hidden_base =
1015 (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
1016 GC_bytes_finalized +=
1017 curr_fo -> fo_object_size
1018 + sizeof(struct finalizable_object);
1019 GC_ASSERT(GC_is_marked(GC_base(curr_fo)));
1023 curr_fo = fo_next(curr_fo);
1028 if (GC_java_finalization) {
1029 /* make sure we mark everything reachable from objects finalized
1030 using the no_order mark_proc */
1031 for (curr_fo = GC_fnlz_roots.finalize_now;
1032 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
1033 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
1034 if (!GC_is_marked(real_ptr)) {
1035 if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
1036 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
1038 if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
1039 GC_set_mark_bit(real_ptr);
1044 /* now revive finalize-when-unreachable objects reachable from
1045 other finalizable objects */
1046 if (need_unreachable_finalization) {
1047 curr_fo = GC_fnlz_roots.finalize_now;
1048 GC_ASSERT(NULL == curr_fo || log_fo_table_size >= 0);
1050 while (curr_fo != NULL) {
1051 next_fo = fo_next(curr_fo);
1052 if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
1053 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
1054 if (!GC_is_marked(real_ptr)) {
1055 GC_set_mark_bit(real_ptr);
1057 if (NULL == prev_fo) {
1058 GC_fnlz_roots.finalize_now = next_fo;
1060 fo_set_next(prev_fo, next_fo);
1062 curr_fo -> fo_hidden_base =
1063 GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
1064 GC_bytes_finalized -=
1065 curr_fo->fo_object_size + sizeof(struct finalizable_object);
1067 i = HASH2(real_ptr, log_fo_table_size);
1068 fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
1070 GC_fnlz_roots.fo_head[i] = curr_fo;
1080 GC_remove_dangling_disappearing_links(&GC_dl_hashtbl);
1081 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
1082 GC_clear_togglerefs();
1084 # ifndef GC_LONG_REFS_NOT_NEEDED
1085 GC_make_disappearing_links_disappear(&GC_ll_hashtbl);
1086 GC_remove_dangling_disappearing_links(&GC_ll_hashtbl);
1089 if (GC_fail_count) {
1090 /* Don't prevent running finalizers if there has been an allocation */
1091 /* failure recently. */
1093 GC_reset_finalizer_nested();
1095 GC_finalizer_nested = 0;
1100 #ifndef JAVA_FINALIZATION_NOT_NEEDED
1102 /* Enqueue all remaining finalizers to be run - Assumes lock is held. */
1103 STATIC void GC_enqueue_all_finalizers(void)
1105 struct finalizable_object * curr_fo, * next_fo;
1110 fo_size = log_fo_table_size == -1 ? 0 : 1 << log_fo_table_size;
1111 GC_bytes_finalized = 0;
1112 for (i = 0; i < fo_size; i++) {
1113 curr_fo = GC_fnlz_roots.fo_head[i];
1114 GC_fnlz_roots.fo_head[i] = NULL;
1115 while (curr_fo != NULL) {
1116 real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
1117 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
1118 GC_set_mark_bit(real_ptr);
1120 next_fo = fo_next(curr_fo);
1122 /* Add to list of objects awaiting finalization. */
1123 fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
1124 GC_fnlz_roots.finalize_now = curr_fo;
1126 /* unhide object pointer so any future collections will */
1128 curr_fo -> fo_hidden_base =
1129 (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
1130 GC_bytes_finalized +=
1131 curr_fo -> fo_object_size + sizeof(struct finalizable_object);
1135 GC_fo_entries = 0; /* all entries deleted from the hash table */
1138 /* Invoke all remaining finalizers that haven't yet been run.
1139 * This is needed for strict compliance with the Java standard,
1140 * which can make the runtime guarantee that all finalizers are run.
1141 * Unfortunately, the Java standard implies we have to keep running
1142 * finalizers until there are no more left, a potential infinite loop.
1144 * Note that this is even more dangerous than the usual Java
1145 * finalizers, in that objects reachable from static variables
1146 * may have been finalized when these finalizers are run.
1147 * Finalizers run at this point must be prepared to deal with a
1148 * mostly broken world.
1149 * This routine is externally callable, so is called without
1150 * the allocation lock.
1152 GC_API void GC_CALL GC_finalize_all(void)
1157 while (GC_fo_entries > 0) {
1158 GC_enqueue_all_finalizers();
1160 GC_invoke_finalizers();
1161 /* Running the finalizers in this thread is arguably not a good */
1162 /* idea when we should be notifying another thread to run them. */
1163 /* But otherwise we don't have a great way to wait for them to */
1170 #endif /* !JAVA_FINALIZATION_NOT_NEEDED */
1172 /* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
1173 /* finalizers can only be called from some kind of "safe state" and */
1174 /* getting into that safe state is expensive.) */
1175 GC_API int GC_CALL GC_should_invoke_finalizers(void)
1177 return GC_fnlz_roots.finalize_now != NULL;
1180 /* Invoke finalizers for all objects that are ready to be finalized. */
1181 /* Should be called without allocation lock. */
1182 GC_API int GC_CALL GC_invoke_finalizers(void)
1185 word bytes_freed_before = 0; /* initialized to prevent warning. */
1188 while (GC_fnlz_roots.finalize_now != NULL) {
1189 struct finalizable_object * curr_fo;
1195 bytes_freed_before = GC_bytes_freed;
1196 /* Don't do this outside, since we need the lock. */
1198 curr_fo = GC_fnlz_roots.finalize_now;
1200 if (curr_fo != 0) GC_fnlz_roots.finalize_now = fo_next(curr_fo);
1202 if (curr_fo == 0) break;
1204 GC_fnlz_roots.finalize_now = fo_next(curr_fo);
1206 fo_set_next(curr_fo, 0);
1207 (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
1208 curr_fo -> fo_client_data);
1209 curr_fo -> fo_client_data = 0;
1211 /* Explicit freeing of curr_fo is probably a bad idea. */
1212 /* It throws off accounting if nearly all objects are */
1213 /* finalizable. Otherwise it should not matter. */
1215 /* bytes_freed_before is initialized whenever count != 0 */
1216 if (count != 0 && bytes_freed_before != GC_bytes_freed) {
1218 GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
1224 static word last_finalizer_notification = 0;
1226 GC_INNER void GC_notify_or_invoke_finalizers(void)
1228 GC_finalizer_notifier_proc notifier_fn = 0;
1229 # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1230 static word last_back_trace_gc_no = 1; /* Skip first one. */
1234 # if defined(THREADS) && !defined(KEEP_BACK_PTRS) \
1235 && !defined(MAKE_BACK_GRAPH)
1236 /* Quick check (while unlocked) for an empty finalization queue. */
1237 if (NULL == GC_fnlz_roots.finalize_now) return;
1241 /* This is a convenient place to generate backtraces if appropriate, */
1242 /* since that code is not callable with the allocation lock. */
1243 # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1244 if (GC_gc_no > last_back_trace_gc_no) {
1245 # ifdef KEEP_BACK_PTRS
1247 /* Stops when GC_gc_no wraps; that's OK. */
1248 last_back_trace_gc_no = (word)(-1); /* disable others. */
1249 for (i = 0; i < GC_backtraces; ++i) {
1250 /* FIXME: This tolerates concurrent heap mutation, */
1251 /* which may cause occasional mysterious results. */
1252 /* We need to release the GC lock, since GC_print_callers */
1253 /* acquires it. It probably shouldn't. */
1255 GC_generate_random_backtrace_no_gc();
1258 last_back_trace_gc_no = GC_gc_no;
1260 # ifdef MAKE_BACK_GRAPH
1261 if (GC_print_back_height) {
1263 GC_print_back_graph_stats();
1269 if (NULL == GC_fnlz_roots.finalize_now) {
1274 if (!GC_finalize_on_demand) {
1275 unsigned char *pnested = GC_check_finalizer_nested();
1277 /* Skip GC_invoke_finalizers() if nested */
1278 if (pnested != NULL) {
1279 (void) GC_invoke_finalizers();
1280 *pnested = 0; /* Reset since no more finalizers. */
1282 GC_ASSERT(NULL == GC_fnlz_roots.finalize_now);
1283 # endif /* Otherwise GC can run concurrently and add more */
1288 /* These variables require synchronization to avoid data races. */
1289 if (last_finalizer_notification != GC_gc_no) {
1290 last_finalizer_notification = GC_gc_no;
1291 notifier_fn = GC_finalizer_notifier;
1294 if (notifier_fn != 0)
1295 (*notifier_fn)(); /* Invoke the notifier */
1298 #ifndef SMALL_CONFIG
1299 # ifndef GC_LONG_REFS_NOT_NEEDED
1300 # define IF_LONG_REFS_PRESENT_ELSE(x,y) (x)
1302 # define IF_LONG_REFS_PRESENT_ELSE(x,y) (y)
1305 GC_INNER void GC_print_finalization_stats(void)
1307 struct finalizable_object *fo;
1308 unsigned long ready = 0;
1310 GC_log_printf("%lu finalization entries;"
1311 " %lu/%lu short/long disappearing links alive\n",
1312 (unsigned long)GC_fo_entries,
1313 (unsigned long)GC_dl_hashtbl.entries,
1314 (unsigned long)IF_LONG_REFS_PRESENT_ELSE(
1315 GC_ll_hashtbl.entries, 0));
1317 for (fo = GC_fnlz_roots.finalize_now; fo != NULL; fo = fo_next(fo))
1319 GC_log_printf("%lu finalization-ready objects;"
1320 " %ld/%ld short/long links cleared\n",
1322 (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries,
1323 (long)IF_LONG_REFS_PRESENT_ELSE(
1324 GC_old_ll_entries - GC_ll_hashtbl.entries, 0));
1326 #endif /* !SMALL_CONFIG */
1328 #endif /* !GC_NO_FINALIZATION */