2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (C) 2007 Free Software Foundation, Inc
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_pmark.h"
19 #ifndef GC_NO_FINALIZATION
20 # include "javaxfc.h" /* to get GC_finalize_all() as extern "C" */
22 /* Type of mark procedure used for marking from finalizable object. */
23 /* This procedure normally does not mark the object, only its */
25 typedef void (* finalization_mark_proc)(ptr_t /* finalizable_obj_ptr */);
27 #define HASH3(addr,size,log_size) \
28 ((((word)(addr) >> 3) ^ ((word)(addr) >> (3 + (log_size)))) \
30 #define HASH2(addr,log_size) HASH3(addr, (word)1 << (log_size), log_size)
32 struct hash_chain_entry {
34 struct hash_chain_entry * next;
37 struct disappearing_link {
38 struct hash_chain_entry prolog;
39 # define dl_hidden_link prolog.hidden_key
40 /* Field to be cleared. */
41 # define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
42 # define dl_set_next(x, y) \
43 (void)((x)->prolog.next = (struct hash_chain_entry *)(y))
44 word dl_hidden_obj; /* Pointer to object base */
48 struct disappearing_link **head;
53 STATIC struct dl_hashtbl_s GC_dl_hashtbl = {
54 /* head */ NULL, /* log_size */ -1, /* entries */ 0 };
55 #ifndef GC_LONG_REFS_NOT_NEEDED
56 STATIC struct dl_hashtbl_s GC_ll_hashtbl = { NULL, -1, 0 };
59 struct finalizable_object {
60 struct hash_chain_entry prolog;
61 # define fo_hidden_base prolog.hidden_key
62 /* Pointer to object base. */
63 /* No longer hidden once object */
64 /* is on finalize_now queue. */
65 # define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
66 # define fo_set_next(x,y) ((x)->prolog.next = (struct hash_chain_entry *)(y))
67 GC_finalization_proc fo_fn; /* Finalizer. */
69 word fo_object_size; /* In bytes. */
70 finalization_mark_proc fo_mark_proc; /* Mark-through procedure */
73 static signed_word log_fo_table_size = -1;
75 STATIC struct fnlz_roots_s {
76 struct finalizable_object **fo_head;
77 /* List of objects that should be finalized now: */
78 struct finalizable_object *finalize_now;
79 } GC_fnlz_roots = { NULL, NULL };
82 /* Update finalize_now atomically as GC_should_invoke_finalizers does */
83 /* not acquire the allocation lock. */
84 # define SET_FINALIZE_NOW(fo) \
85 AO_store((volatile AO_t *)&GC_fnlz_roots.finalize_now, (AO_t)(fo))
87 # define SET_FINALIZE_NOW(fo) (void)(GC_fnlz_roots.finalize_now = (fo))
90 GC_API void GC_CALL GC_push_finalizer_structures(void)
92 GC_ASSERT((word)(&GC_dl_hashtbl.head) % sizeof(word) == 0);
93 GC_ASSERT((word)(&GC_fnlz_roots) % sizeof(word) == 0);
94 # ifndef GC_LONG_REFS_NOT_NEEDED
95 GC_ASSERT((word)(&GC_ll_hashtbl.head) % sizeof(word) == 0);
96 GC_PUSH_ALL_SYM(GC_ll_hashtbl.head);
98 GC_PUSH_ALL_SYM(GC_dl_hashtbl.head);
99 GC_PUSH_ALL_SYM(GC_fnlz_roots);
102 /* Threshold of log_size to initiate full collection before growing */
104 #ifndef GC_ON_GROW_LOG_SIZE_MIN
105 # define GC_ON_GROW_LOG_SIZE_MIN CPP_LOG_HBLKSIZE
108 /* Double the size of a hash table. *log_size_ptr is the log of its */
109 /* current size. May be a no-op. */
110 /* *table is a pointer to an array of hash headers. If we succeed, we */
111 /* update both *table and *log_size_ptr. Lock is held. */
112 STATIC void GC_grow_table(struct hash_chain_entry ***table,
113 signed_word *log_size_ptr, word *entries_ptr)
116 struct hash_chain_entry *p;
117 signed_word log_old_size = *log_size_ptr;
118 signed_word log_new_size = log_old_size + 1;
119 word old_size = log_old_size == -1 ? 0 : (word)1 << log_old_size;
120 word new_size = (word)1 << log_new_size;
121 /* FIXME: Power of 2 size often gets rounded up to one more page. */
122 struct hash_chain_entry **new_table;
124 GC_ASSERT(I_HOLD_LOCK());
125 /* Avoid growing the table in case of at least 25% of entries can */
126 /* be deleted by enforcing a collection. Ignored for small tables. */
127 if (log_old_size >= GC_ON_GROW_LOG_SIZE_MIN) {
128 IF_CANCEL(int cancel_state;)
130 DISABLE_CANCEL(cancel_state);
131 (void)GC_try_to_collect_inner(GC_never_stop_func);
132 RESTORE_CANCEL(cancel_state);
133 /* GC_finalize might decrease entries value. */
134 if (*entries_ptr < ((word)1 << log_old_size) - (*entries_ptr >> 2))
138 new_table = (struct hash_chain_entry **)
139 GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
140 (size_t)new_size * sizeof(struct hash_chain_entry *),
142 if (new_table == 0) {
144 ABORT("Insufficient space for initial table allocation");
149 for (i = 0; i < old_size; i++) {
152 ptr_t real_key = (ptr_t)GC_REVEAL_POINTER(p->hidden_key);
153 struct hash_chain_entry *next = p -> next;
154 size_t new_hash = HASH3(real_key, new_size, log_new_size);
156 p -> next = new_table[new_hash];
158 new_table[new_hash] = p;
162 *log_size_ptr = log_new_size;
164 GC_dirty(new_table); /* entire object */
167 GC_API int GC_CALL GC_register_disappearing_link(void * * link)
171 base = (ptr_t)GC_base(link);
173 ABORT("Bad arg to GC_register_disappearing_link");
174 return(GC_general_register_disappearing_link(link, base));
177 STATIC int GC_register_disappearing_link_inner(
178 struct dl_hashtbl_s *dl_hashtbl, void **link,
179 const void *obj, const char *tbl_log_name)
181 struct disappearing_link *curr_dl;
183 struct disappearing_link * new_dl;
186 if (EXPECT(GC_find_leak, FALSE)) return GC_UNIMPLEMENTED;
188 GC_ASSERT(obj != NULL && GC_base_C(obj) == obj);
189 if (dl_hashtbl -> log_size == -1
190 || dl_hashtbl -> entries > ((word)1 << dl_hashtbl -> log_size)) {
191 GC_grow_table((struct hash_chain_entry ***)&dl_hashtbl -> head,
192 &dl_hashtbl -> log_size, &dl_hashtbl -> entries);
194 if (dl_hashtbl->log_size < 0) ABORT("log_size is negative");
196 GC_COND_LOG_PRINTF("Grew %s table to %u entries\n", tbl_log_name,
197 1 << (unsigned)dl_hashtbl -> log_size);
199 index = HASH2(link, dl_hashtbl -> log_size);
200 for (curr_dl = dl_hashtbl -> head[index]; curr_dl != 0;
201 curr_dl = dl_next(curr_dl)) {
202 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
203 curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
208 new_dl = (struct disappearing_link *)
209 GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
211 GC_oom_func oom_fn = GC_oom_fn;
213 new_dl = (struct disappearing_link *)
214 (*oom_fn)(sizeof(struct disappearing_link));
218 /* It's not likely we'll make it here, but ... */
220 /* Recalculate index since the table may grow. */
221 index = HASH2(link, dl_hashtbl -> log_size);
222 /* Check again that our disappearing link not in the table. */
223 for (curr_dl = dl_hashtbl -> head[index]; curr_dl != 0;
224 curr_dl = dl_next(curr_dl)) {
225 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
226 curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
228 # ifndef DBG_HDRS_ALL
229 /* Free unused new_dl returned by GC_oom_fn() */
230 GC_free((void *)new_dl);
236 new_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
237 new_dl -> dl_hidden_link = GC_HIDE_POINTER(link);
238 dl_set_next(new_dl, dl_hashtbl -> head[index]);
240 dl_hashtbl -> head[index] = new_dl;
241 dl_hashtbl -> entries++;
242 GC_dirty(dl_hashtbl->head + index);
247 GC_API int GC_CALL GC_general_register_disappearing_link(void * * link,
250 if (((word)link & (ALIGNMENT-1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
251 ABORT("Bad arg to GC_general_register_disappearing_link");
252 return GC_register_disappearing_link_inner(&GC_dl_hashtbl, link, obj,
257 # define FREE_DL_ENTRY(curr_dl) dl_set_next(curr_dl, NULL)
259 # define FREE_DL_ENTRY(curr_dl) GC_free(curr_dl)
262 /* Unregisters given link and returns the link entry to free. */
263 GC_INLINE struct disappearing_link *GC_unregister_disappearing_link_inner(
264 struct dl_hashtbl_s *dl_hashtbl, void **link)
266 struct disappearing_link *curr_dl;
267 struct disappearing_link *prev_dl = NULL;
270 GC_ASSERT(I_HOLD_LOCK());
271 if (dl_hashtbl->log_size == -1)
272 return NULL; /* prevent integer shift by a negative amount */
274 index = HASH2(link, dl_hashtbl->log_size);
275 for (curr_dl = dl_hashtbl -> head[index]; curr_dl;
276 curr_dl = dl_next(curr_dl)) {
277 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
278 /* Remove found entry from the table. */
279 if (NULL == prev_dl) {
280 dl_hashtbl -> head[index] = dl_next(curr_dl);
281 GC_dirty(dl_hashtbl->head + index);
283 dl_set_next(prev_dl, dl_next(curr_dl));
286 dl_hashtbl -> entries--;
294 GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
296 struct disappearing_link *curr_dl;
299 if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
302 curr_dl = GC_unregister_disappearing_link_inner(&GC_dl_hashtbl, link);
304 if (NULL == curr_dl) return 0;
305 FREE_DL_ENTRY(curr_dl);
309 /* Toggle-ref support. */
310 #ifndef GC_TOGGLE_REFS_NOT_NEEDED
312 /* Lowest bit is used to distinguish between choices. */
314 GC_hidden_pointer weak_ref;
317 STATIC GC_toggleref_func GC_toggleref_callback = 0;
318 STATIC GCToggleRef *GC_toggleref_arr = NULL;
319 STATIC int GC_toggleref_array_size = 0;
320 STATIC int GC_toggleref_array_capacity = 0;
322 GC_INNER void GC_process_togglerefs(void)
326 GC_bool needs_barrier = FALSE;
328 GC_ASSERT(I_HOLD_LOCK());
329 for (i = 0; i < GC_toggleref_array_size; ++i) {
330 GCToggleRef r = GC_toggleref_arr[i];
331 void *obj = r.strong_ref;
333 if (((word)obj & 1) != 0) {
334 obj = GC_REVEAL_POINTER(r.weak_ref);
339 switch (GC_toggleref_callback(obj)) {
340 case GC_TOGGLE_REF_DROP:
342 case GC_TOGGLE_REF_STRONG:
343 GC_toggleref_arr[new_size++].strong_ref = obj;
344 needs_barrier = TRUE;
346 case GC_TOGGLE_REF_WEAK:
347 GC_toggleref_arr[new_size++].weak_ref = GC_HIDE_POINTER(obj);
350 ABORT("Bad toggle-ref status returned by callback");
354 if (new_size < GC_toggleref_array_size) {
355 BZERO(&GC_toggleref_arr[new_size],
356 (GC_toggleref_array_size - new_size) * sizeof(GCToggleRef));
357 GC_toggleref_array_size = new_size;
360 GC_dirty(GC_toggleref_arr); /* entire object */
363 STATIC void GC_normal_finalize_mark_proc(ptr_t);
365 static void push_and_mark_object(void *p)
367 GC_normal_finalize_mark_proc((ptr_t)p);
368 while (!GC_mark_stack_empty()) {
369 MARK_FROM_MARK_STACK();
372 if (GC_mark_state != MS_NONE) {
373 while (!GC_mark_some(0)) {
379 STATIC void GC_mark_togglerefs(void)
382 if (NULL == GC_toggleref_arr)
385 /* TODO: Hide GC_toggleref_arr to avoid its marking from roots. */
386 GC_set_mark_bit(GC_toggleref_arr);
387 for (i = 0; i < GC_toggleref_array_size; ++i) {
388 void *obj = GC_toggleref_arr[i].strong_ref;
389 if (obj != NULL && ((word)obj & 1) == 0) {
390 push_and_mark_object(obj);
395 STATIC void GC_clear_togglerefs(void)
398 for (i = 0; i < GC_toggleref_array_size; ++i) {
399 if ((GC_toggleref_arr[i].weak_ref & 1) != 0) {
400 if (!GC_is_marked(GC_REVEAL_POINTER(GC_toggleref_arr[i].weak_ref))) {
401 GC_toggleref_arr[i].weak_ref = 0;
403 /* No need to copy, BDWGC is a non-moving collector. */
409 GC_API void GC_CALL GC_set_toggleref_func(GC_toggleref_func fn)
414 GC_toggleref_callback = fn;
418 GC_API GC_toggleref_func GC_CALL GC_get_toggleref_func(void)
420 GC_toggleref_func fn;
424 fn = GC_toggleref_callback;
429 static GC_bool ensure_toggleref_capacity(int capacity_inc)
431 GC_ASSERT(capacity_inc >= 0);
432 GC_ASSERT(I_HOLD_LOCK());
433 if (NULL == GC_toggleref_arr) {
434 GC_toggleref_array_capacity = 32; /* initial capacity */
435 GC_toggleref_arr = (GCToggleRef *)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
436 GC_toggleref_array_capacity * sizeof(GCToggleRef),
438 if (NULL == GC_toggleref_arr)
441 if ((unsigned)GC_toggleref_array_size + (unsigned)capacity_inc
442 >= (unsigned)GC_toggleref_array_capacity) {
443 GCToggleRef *new_array;
444 while ((unsigned)GC_toggleref_array_capacity
445 < (unsigned)GC_toggleref_array_size + (unsigned)capacity_inc) {
446 GC_toggleref_array_capacity *= 2;
447 if (GC_toggleref_array_capacity < 0) /* overflow */
451 new_array = (GCToggleRef *)GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
452 GC_toggleref_array_capacity * sizeof(GCToggleRef),
454 if (NULL == new_array)
456 if (EXPECT(GC_toggleref_array_size > 0, TRUE))
457 BCOPY(GC_toggleref_arr, new_array,
458 GC_toggleref_array_size * sizeof(GCToggleRef));
459 GC_INTERNAL_FREE(GC_toggleref_arr);
460 GC_toggleref_arr = new_array;
465 GC_API int GC_CALL GC_toggleref_add(void *obj, int is_strong_ref)
467 int res = GC_SUCCESS;
470 GC_ASSERT(NONNULL_ARG_NOT_NULL(obj));
472 if (GC_toggleref_callback != 0) {
473 if (!ensure_toggleref_capacity(1)) {
476 GC_toggleref_arr[GC_toggleref_array_size].strong_ref =
477 is_strong_ref ? obj : (void *)GC_HIDE_POINTER(obj);
479 GC_dirty(GC_toggleref_arr + GC_toggleref_array_size);
480 GC_toggleref_array_size++;
486 #endif /* !GC_TOGGLE_REFS_NOT_NEEDED */
488 /* Finalizer callback support. */
489 STATIC GC_await_finalize_proc GC_object_finalized_proc = 0;
491 GC_API void GC_CALL GC_set_await_finalize_proc(GC_await_finalize_proc fn)
496 GC_object_finalized_proc = fn;
500 GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void)
502 GC_await_finalize_proc fn;
506 fn = GC_object_finalized_proc;
511 #ifndef GC_LONG_REFS_NOT_NEEDED
512 GC_API int GC_CALL GC_register_long_link(void * * link, const void * obj)
514 if (((word)link & (ALIGNMENT-1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
515 ABORT("Bad arg to GC_register_long_link");
516 return GC_register_disappearing_link_inner(&GC_ll_hashtbl, link, obj,
520 GC_API int GC_CALL GC_unregister_long_link(void * * link)
522 struct disappearing_link *curr_dl;
525 if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
528 curr_dl = GC_unregister_disappearing_link_inner(&GC_ll_hashtbl, link);
530 if (NULL == curr_dl) return 0;
531 FREE_DL_ENTRY(curr_dl);
534 #endif /* !GC_LONG_REFS_NOT_NEEDED */
536 #ifndef GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED
537 /* Moves a link. Assume the lock is held. */
538 STATIC int GC_move_disappearing_link_inner(
539 struct dl_hashtbl_s *dl_hashtbl,
540 void **link, void **new_link)
542 struct disappearing_link *curr_dl, *prev_dl, *new_dl;
543 size_t curr_index, new_index;
544 word curr_hidden_link;
545 word new_hidden_link;
547 GC_ASSERT(I_HOLD_LOCK());
548 if (dl_hashtbl->log_size == -1)
549 return GC_NOT_FOUND; /* prevent integer shift by a negative amount */
551 /* Find current link. */
552 curr_index = HASH2(link, dl_hashtbl -> log_size);
553 curr_hidden_link = GC_HIDE_POINTER(link);
555 for (curr_dl = dl_hashtbl -> head[curr_index]; curr_dl;
556 curr_dl = dl_next(curr_dl)) {
557 if (curr_dl -> dl_hidden_link == curr_hidden_link)
562 if (NULL == curr_dl) {
566 if (link == new_link) {
567 return GC_SUCCESS; /* Nothing to do. */
570 /* link found; now check new_link not present. */
571 new_index = HASH2(new_link, dl_hashtbl -> log_size);
572 new_hidden_link = GC_HIDE_POINTER(new_link);
573 for (new_dl = dl_hashtbl -> head[new_index]; new_dl;
574 new_dl = dl_next(new_dl)) {
575 if (new_dl -> dl_hidden_link == new_hidden_link) {
576 /* Target already registered; bail. */
581 /* Remove from old, add to new, update link. */
582 if (NULL == prev_dl) {
583 dl_hashtbl -> head[curr_index] = dl_next(curr_dl);
585 dl_set_next(prev_dl, dl_next(curr_dl));
588 curr_dl -> dl_hidden_link = new_hidden_link;
589 dl_set_next(curr_dl, dl_hashtbl -> head[new_index]);
590 dl_hashtbl -> head[new_index] = curr_dl;
592 GC_dirty(dl_hashtbl->head); /* entire object */
596 GC_API int GC_CALL GC_move_disappearing_link(void **link, void **new_link)
601 if (((word)new_link & (ALIGNMENT-1)) != 0
602 || !NONNULL_ARG_NOT_NULL(new_link))
603 ABORT("Bad new_link arg to GC_move_disappearing_link");
604 if (((word)link & (ALIGNMENT-1)) != 0)
605 return GC_NOT_FOUND; /* Nothing to do. */
608 result = GC_move_disappearing_link_inner(&GC_dl_hashtbl, link, new_link);
613 # ifndef GC_LONG_REFS_NOT_NEEDED
614 GC_API int GC_CALL GC_move_long_link(void **link, void **new_link)
619 if (((word)new_link & (ALIGNMENT-1)) != 0
620 || !NONNULL_ARG_NOT_NULL(new_link))
621 ABORT("Bad new_link arg to GC_move_long_link");
622 if (((word)link & (ALIGNMENT-1)) != 0)
623 return GC_NOT_FOUND; /* Nothing to do. */
626 result = GC_move_disappearing_link_inner(&GC_ll_hashtbl, link, new_link);
630 # endif /* !GC_LONG_REFS_NOT_NEEDED */
631 #endif /* !GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED */
633 /* Possible finalization_marker procedures. Note that mark stack */
634 /* overflow is handled by the caller, and is not a disaster. */
635 STATIC void GC_normal_finalize_mark_proc(ptr_t p)
637 GC_mark_stack_top = GC_push_obj(p, HDR(p), GC_mark_stack_top,
638 GC_mark_stack + GC_mark_stack_size);
641 /* This only pays very partial attention to the mark descriptor. */
642 /* It does the right thing for normal and atomic objects, and treats */
643 /* most others as normal. */
644 STATIC void GC_ignore_self_finalize_mark_proc(ptr_t p)
647 word descr = hhdr -> hb_descr;
650 ptr_t target_limit = p + hhdr -> hb_sz - 1;
652 if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
653 scan_limit = p + descr - sizeof(word);
655 scan_limit = target_limit + 1 - sizeof(word);
657 for (q = p; (word)q <= (word)scan_limit; q += ALIGNMENT) {
660 if (r < (word)p || r > (word)target_limit) {
661 GC_PUSH_ONE_HEAP(r, q, GC_mark_stack_top);
666 STATIC void GC_null_finalize_mark_proc(ptr_t p GC_ATTR_UNUSED) {}
668 /* Possible finalization_marker procedures. Note that mark stack */
669 /* overflow is handled by the caller, and is not a disaster. */
671 /* GC_unreachable_finalize_mark_proc is an alias for normal marking, */
672 /* but it is explicitly tested for, and triggers different */
673 /* behavior. Objects registered in this way are not finalized */
674 /* if they are reachable by other finalizable objects, even if those */
675 /* other objects specify no ordering. */
676 STATIC void GC_unreachable_finalize_mark_proc(ptr_t p)
678 GC_normal_finalize_mark_proc(p);
681 /* Register a finalization function. See gc.h for details. */
682 /* The last parameter is a procedure that determines */
683 /* marking for finalization ordering. Any objects marked */
684 /* by that procedure will be guaranteed to not have been */
685 /* finalized when this finalizer is invoked. */
686 STATIC void GC_register_finalizer_inner(void * obj,
687 GC_finalization_proc fn, void *cd,
688 GC_finalization_proc *ofn, void **ocd,
689 finalization_mark_proc mp)
691 struct finalizable_object * curr_fo;
693 struct finalizable_object *new_fo = 0;
694 hdr *hhdr = NULL; /* initialized to prevent warning. */
697 if (EXPECT(GC_find_leak, FALSE)) return;
699 if (log_fo_table_size == -1
700 || GC_fo_entries > ((word)1 << log_fo_table_size)) {
701 GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
702 &log_fo_table_size, &GC_fo_entries);
704 if (log_fo_table_size < 0) ABORT("log_size is negative");
706 GC_COND_LOG_PRINTF("Grew fo table to %u entries\n",
707 1 << (unsigned)log_fo_table_size);
709 /* in the THREADS case we hold allocation lock. */
711 struct finalizable_object *prev_fo = NULL;
714 index = HASH2(obj, log_fo_table_size);
715 curr_fo = GC_fnlz_roots.fo_head[index];
716 while (curr_fo != 0) {
717 GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
718 if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(obj)) {
719 /* Interruption by a signal in the middle of this */
720 /* should be safe. The client may see only *ocd */
721 /* updated, but we'll declare that to be his problem. */
722 if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
723 if (ofn) *ofn = curr_fo -> fo_fn;
724 /* Delete the structure for obj. */
726 GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
728 fo_set_next(prev_fo, fo_next(curr_fo));
733 /* May not happen if we get a signal. But a high */
734 /* estimate will only make the table larger than */
736 # if !defined(THREADS) && !defined(DBG_HDRS_ALL)
737 GC_free((void *)curr_fo);
740 curr_fo -> fo_fn = fn;
741 curr_fo -> fo_client_data = (ptr_t)cd;
742 curr_fo -> fo_mark_proc = mp;
744 /* Reinsert it. We deleted it first to maintain */
745 /* consistency in the event of a signal. */
747 GC_fnlz_roots.fo_head[index] = curr_fo;
749 fo_set_next(prev_fo, curr_fo);
754 GC_dirty(GC_fnlz_roots.fo_head + index);
756 # ifndef DBG_HDRS_ALL
757 if (EXPECT(new_fo != 0, FALSE)) {
758 /* Free unused new_fo returned by GC_oom_fn() */
759 GC_free((void *)new_fo);
765 curr_fo = fo_next(curr_fo);
767 if (EXPECT(new_fo != 0, FALSE)) {
768 /* new_fo is returned by GC_oom_fn(). */
771 if (NULL == hhdr) ABORT("Bad hhdr in GC_register_finalizer_inner");
782 if (EXPECT(0 == hhdr, FALSE)) {
783 /* We won't collect it, hence finalizer wouldn't be run. */
789 new_fo = (struct finalizable_object *)
790 GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
791 if (EXPECT(new_fo != 0, TRUE))
795 new_fo = (struct finalizable_object *)
796 (*oom_fn)(sizeof(struct finalizable_object));
798 /* No enough memory. *ocd and *ofn remains unchanged. */
801 /* It's not likely we'll make it here, but ... */
803 /* Recalculate index since the table may grow and */
804 /* check again that our finalizer is not in the table. */
806 GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
809 new_fo -> fo_hidden_base = GC_HIDE_POINTER(obj);
810 new_fo -> fo_fn = fn;
811 new_fo -> fo_client_data = (ptr_t)cd;
812 new_fo -> fo_object_size = hhdr -> hb_sz;
813 new_fo -> fo_mark_proc = mp;
814 fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
817 GC_fnlz_roots.fo_head[index] = new_fo;
818 GC_dirty(GC_fnlz_roots.fo_head + index);
822 GC_API void GC_CALL GC_register_finalizer(void * obj,
823 GC_finalization_proc fn, void * cd,
824 GC_finalization_proc *ofn, void ** ocd)
826 GC_register_finalizer_inner(obj, fn, cd, ofn,
827 ocd, GC_normal_finalize_mark_proc);
830 GC_API void GC_CALL GC_register_finalizer_ignore_self(void * obj,
831 GC_finalization_proc fn, void * cd,
832 GC_finalization_proc *ofn, void ** ocd)
834 GC_register_finalizer_inner(obj, fn, cd, ofn,
835 ocd, GC_ignore_self_finalize_mark_proc);
838 GC_API void GC_CALL GC_register_finalizer_no_order(void * obj,
839 GC_finalization_proc fn, void * cd,
840 GC_finalization_proc *ofn, void ** ocd)
842 GC_register_finalizer_inner(obj, fn, cd, ofn,
843 ocd, GC_null_finalize_mark_proc);
846 static GC_bool need_unreachable_finalization = FALSE;
847 /* Avoid the work if this isn't used. */
849 GC_API void GC_CALL GC_register_finalizer_unreachable(void * obj,
850 GC_finalization_proc fn, void * cd,
851 GC_finalization_proc *ofn, void ** ocd)
853 need_unreachable_finalization = TRUE;
854 GC_ASSERT(GC_java_finalization);
855 GC_register_finalizer_inner(obj, fn, cd, ofn,
856 ocd, GC_unreachable_finalize_mark_proc);
860 STATIC void GC_dump_finalization_links(
861 const struct dl_hashtbl_s *dl_hashtbl)
863 size_t dl_size = dl_hashtbl->log_size == -1 ? 0 :
864 (size_t)1 << dl_hashtbl->log_size;
867 for (i = 0; i < dl_size; i++) {
868 struct disappearing_link *curr_dl;
870 for (curr_dl = dl_hashtbl -> head[i]; curr_dl != 0;
871 curr_dl = dl_next(curr_dl)) {
872 ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_obj);
873 ptr_t real_link = (ptr_t)GC_REVEAL_POINTER(curr_dl->dl_hidden_link);
875 GC_printf("Object: %p, link: %p\n",
876 (void *)real_ptr, (void *)real_link);
881 GC_API void GC_CALL GC_dump_finalization(void)
883 struct finalizable_object * curr_fo;
884 size_t fo_size = log_fo_table_size == -1 ? 0 :
885 (size_t)1 << log_fo_table_size;
888 GC_printf("Disappearing (short) links:\n");
889 GC_dump_finalization_links(&GC_dl_hashtbl);
890 # ifndef GC_LONG_REFS_NOT_NEEDED
891 GC_printf("Disappearing long links:\n");
892 GC_dump_finalization_links(&GC_ll_hashtbl);
894 GC_printf("Finalizers:\n");
895 for (i = 0; i < fo_size; i++) {
896 for (curr_fo = GC_fnlz_roots.fo_head[i];
897 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
898 ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
900 GC_printf("Finalizable object: %p\n", (void *)real_ptr);
904 #endif /* !NO_DEBUGGING */
907 STATIC word GC_old_dl_entries = 0; /* for stats printing */
908 # ifndef GC_LONG_REFS_NOT_NEEDED
909 STATIC word GC_old_ll_entries = 0;
911 #endif /* !SMALL_CONFIG */
914 /* Global variables to minimize the level of recursion when a client */
915 /* finalizer allocates memory. */
916 STATIC int GC_finalizer_nested = 0;
917 /* Only the lowest byte is used, the rest is */
918 /* padding for proper global data alignment */
919 /* required for some compilers (like Watcom). */
920 STATIC unsigned GC_finalizer_skipped = 0;
922 /* Checks and updates the level of finalizers recursion. */
923 /* Returns NULL if GC_invoke_finalizers() should not be called by the */
924 /* collector (to minimize the risk of a deep finalizers recursion), */
925 /* otherwise returns a pointer to GC_finalizer_nested. */
926 STATIC unsigned char *GC_check_finalizer_nested(void)
928 unsigned nesting_level = *(unsigned char *)&GC_finalizer_nested;
930 /* We are inside another GC_invoke_finalizers(). */
931 /* Skip some implicitly-called GC_invoke_finalizers() */
932 /* depending on the nesting (recursion) level. */
933 if (++GC_finalizer_skipped < (1U << nesting_level)) return NULL;
934 GC_finalizer_skipped = 0;
936 *(char *)&GC_finalizer_nested = (char)(nesting_level + 1);
937 return (unsigned char *)&GC_finalizer_nested;
941 GC_INLINE void GC_make_disappearing_links_disappear(
942 struct dl_hashtbl_s* dl_hashtbl,
943 GC_bool is_remove_dangling)
946 size_t dl_size = dl_hashtbl->log_size == -1 ? 0
947 : (size_t)1 << dl_hashtbl->log_size;
948 GC_bool needs_barrier = FALSE;
950 GC_ASSERT(I_HOLD_LOCK());
951 for (i = 0; i < dl_size; i++) {
952 struct disappearing_link *curr_dl, *next_dl;
953 struct disappearing_link *prev_dl = NULL;
955 for (curr_dl = dl_hashtbl->head[i]; curr_dl != NULL; curr_dl = next_dl) {
956 next_dl = dl_next(curr_dl);
957 if (is_remove_dangling) {
958 ptr_t real_link = (ptr_t)GC_base(GC_REVEAL_POINTER(
959 curr_dl->dl_hidden_link));
961 if (NULL == real_link || EXPECT(GC_is_marked(real_link), TRUE)) {
966 if (EXPECT(GC_is_marked((ptr_t)GC_REVEAL_POINTER(
967 curr_dl->dl_hidden_obj)), TRUE)) {
971 *(ptr_t *)GC_REVEAL_POINTER(curr_dl->dl_hidden_link) = NULL;
974 /* Delete curr_dl entry from dl_hashtbl. */
975 if (NULL == prev_dl) {
976 dl_hashtbl -> head[i] = next_dl;
977 needs_barrier = TRUE;
979 dl_set_next(prev_dl, next_dl);
982 GC_clear_mark_bit(curr_dl);
983 dl_hashtbl -> entries--;
987 GC_dirty(dl_hashtbl -> head); /* entire object */
990 /* Called with held lock (but the world is running). */
991 /* Cause disappearing links to disappear and unreachable objects to be */
992 /* enqueued for finalization. */
993 GC_INNER void GC_finalize(void)
995 struct finalizable_object * curr_fo, * prev_fo, * next_fo;
998 size_t fo_size = log_fo_table_size == -1 ? 0 :
999 (size_t)1 << log_fo_table_size;
1000 GC_bool needs_barrier = FALSE;
1002 GC_ASSERT(I_HOLD_LOCK());
1003 # ifndef SMALL_CONFIG
1004 /* Save current GC_[dl/ll]_entries value for stats printing */
1005 GC_old_dl_entries = GC_dl_hashtbl.entries;
1006 # ifndef GC_LONG_REFS_NOT_NEEDED
1007 GC_old_ll_entries = GC_ll_hashtbl.entries;
1011 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
1012 GC_mark_togglerefs();
1014 GC_make_disappearing_links_disappear(&GC_dl_hashtbl, FALSE);
1016 /* Mark all objects reachable via chains of 1 or more pointers */
1017 /* from finalizable objects. */
1018 GC_ASSERT(GC_mark_state == MS_NONE);
1019 for (i = 0; i < fo_size; i++) {
1020 for (curr_fo = GC_fnlz_roots.fo_head[i];
1021 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
1022 GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
1023 real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
1024 if (!GC_is_marked(real_ptr)) {
1025 GC_MARKED_FOR_FINALIZATION(real_ptr);
1026 GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
1027 if (GC_is_marked(real_ptr)) {
1028 WARN("Finalization cycle involving %p\n", real_ptr);
1033 /* Enqueue for finalization all objects that are still */
1035 GC_bytes_finalized = 0;
1036 for (i = 0; i < fo_size; i++) {
1037 curr_fo = GC_fnlz_roots.fo_head[i];
1039 while (curr_fo != 0) {
1040 real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
1041 if (!GC_is_marked(real_ptr)) {
1042 if (!GC_java_finalization) {
1043 GC_set_mark_bit(real_ptr);
1045 /* Delete from hash table */
1046 next_fo = fo_next(curr_fo);
1047 if (NULL == prev_fo) {
1048 GC_fnlz_roots.fo_head[i] = next_fo;
1049 if (GC_object_finalized_proc) {
1050 GC_dirty(GC_fnlz_roots.fo_head + i);
1052 needs_barrier = TRUE;
1055 fo_set_next(prev_fo, next_fo);
1059 if (GC_object_finalized_proc)
1060 GC_object_finalized_proc(real_ptr);
1062 /* Add to list of objects awaiting finalization. */
1063 fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
1065 SET_FINALIZE_NOW(curr_fo);
1066 /* unhide object pointer so any future collections will */
1068 curr_fo -> fo_hidden_base =
1069 (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
1070 GC_bytes_finalized +=
1071 curr_fo -> fo_object_size
1072 + sizeof(struct finalizable_object);
1073 GC_ASSERT(GC_is_marked(GC_base(curr_fo)));
1077 curr_fo = fo_next(curr_fo);
1082 if (GC_java_finalization) {
1083 /* make sure we mark everything reachable from objects finalized
1084 using the no_order mark_proc */
1085 for (curr_fo = GC_fnlz_roots.finalize_now;
1086 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
1087 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
1088 if (!GC_is_marked(real_ptr)) {
1089 if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
1090 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
1092 if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
1093 GC_set_mark_bit(real_ptr);
1098 /* now revive finalize-when-unreachable objects reachable from
1099 other finalizable objects */
1100 if (need_unreachable_finalization) {
1101 curr_fo = GC_fnlz_roots.finalize_now;
1102 # if defined(GC_ASSERTIONS) || defined(LINT2)
1103 if (curr_fo != NULL && log_fo_table_size < 0)
1104 ABORT("log_size is negative");
1107 while (curr_fo != NULL) {
1108 next_fo = fo_next(curr_fo);
1109 if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
1110 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
1111 if (!GC_is_marked(real_ptr)) {
1112 GC_set_mark_bit(real_ptr);
1114 if (NULL == prev_fo) {
1115 SET_FINALIZE_NOW(next_fo);
1117 fo_set_next(prev_fo, next_fo);
1120 curr_fo -> fo_hidden_base =
1121 GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
1122 GC_bytes_finalized -=
1123 curr_fo->fo_object_size + sizeof(struct finalizable_object);
1125 i = HASH2(real_ptr, log_fo_table_size);
1126 fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
1129 GC_fnlz_roots.fo_head[i] = curr_fo;
1131 needs_barrier = TRUE;
1140 GC_dirty(GC_fnlz_roots.fo_head); /* entire object */
1142 /* Remove dangling disappearing links. */
1143 GC_make_disappearing_links_disappear(&GC_dl_hashtbl, TRUE);
1145 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
1146 GC_clear_togglerefs();
1148 # ifndef GC_LONG_REFS_NOT_NEEDED
1149 GC_make_disappearing_links_disappear(&GC_ll_hashtbl, FALSE);
1150 GC_make_disappearing_links_disappear(&GC_ll_hashtbl, TRUE);
1153 if (GC_fail_count) {
1154 /* Don't prevent running finalizers if there has been an allocation */
1155 /* failure recently. */
1157 GC_reset_finalizer_nested();
1159 GC_finalizer_nested = 0;
1164 #ifndef JAVA_FINALIZATION_NOT_NEEDED
1166 /* Enqueue all remaining finalizers to be run. */
1167 STATIC void GC_enqueue_all_finalizers(void)
1169 struct finalizable_object * next_fo;
1173 GC_ASSERT(I_HOLD_LOCK());
1174 fo_size = log_fo_table_size == -1 ? 0 : 1 << log_fo_table_size;
1175 GC_bytes_finalized = 0;
1176 for (i = 0; i < fo_size; i++) {
1177 struct finalizable_object * curr_fo = GC_fnlz_roots.fo_head[i];
1179 GC_fnlz_roots.fo_head[i] = NULL;
1180 while (curr_fo != NULL) {
1181 ptr_t real_ptr = (ptr_t)GC_REVEAL_POINTER(curr_fo->fo_hidden_base);
1183 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
1184 GC_set_mark_bit(real_ptr);
1186 next_fo = fo_next(curr_fo);
1188 /* Add to list of objects awaiting finalization. */
1189 fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
1191 SET_FINALIZE_NOW(curr_fo);
1193 /* unhide object pointer so any future collections will */
1195 curr_fo -> fo_hidden_base =
1196 (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
1197 GC_bytes_finalized +=
1198 curr_fo -> fo_object_size + sizeof(struct finalizable_object);
1202 GC_fo_entries = 0; /* all entries deleted from the hash table */
1205 /* Invoke all remaining finalizers that haven't yet been run.
1206 * This is needed for strict compliance with the Java standard,
1207 * which can make the runtime guarantee that all finalizers are run.
1208 * Unfortunately, the Java standard implies we have to keep running
1209 * finalizers until there are no more left, a potential infinite loop.
1211 * Note that this is even more dangerous than the usual Java
1212 * finalizers, in that objects reachable from static variables
1213 * may have been finalized when these finalizers are run.
1214 * Finalizers run at this point must be prepared to deal with a
1215 * mostly broken world.
1216 * This routine is externally callable, so is called without
1217 * the allocation lock.
1219 GC_API void GC_CALL GC_finalize_all(void)
1224 while (GC_fo_entries > 0) {
1225 GC_enqueue_all_finalizers();
1227 GC_invoke_finalizers();
1228 /* Running the finalizers in this thread is arguably not a good */
1229 /* idea when we should be notifying another thread to run them. */
1230 /* But otherwise we don't have a great way to wait for them to */
1237 #endif /* !JAVA_FINALIZATION_NOT_NEEDED */
1239 /* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
1240 /* finalizers can only be called from some kind of "safe state" and */
1241 /* getting into that safe state is expensive.) */
1242 GC_API int GC_CALL GC_should_invoke_finalizers(void)
1244 # ifdef AO_HAVE_load
1245 return AO_load((volatile AO_t *)&GC_fnlz_roots.finalize_now) != 0;
1247 return GC_fnlz_roots.finalize_now != NULL;
1248 # endif /* !THREADS */
1251 /* Invoke finalizers for all objects that are ready to be finalized. */
1252 /* Should be called without allocation lock. */
1253 GC_API int GC_CALL GC_invoke_finalizers(void)
1256 word bytes_freed_before = 0; /* initialized to prevent warning. */
1259 while (GC_should_invoke_finalizers()) {
1260 struct finalizable_object * curr_fo;
1266 bytes_freed_before = GC_bytes_freed;
1267 /* Don't do this outside, since we need the lock. */
1269 curr_fo = GC_fnlz_roots.finalize_now;
1271 if (curr_fo != NULL)
1272 SET_FINALIZE_NOW(fo_next(curr_fo));
1274 if (curr_fo == 0) break;
1276 GC_fnlz_roots.finalize_now = fo_next(curr_fo);
1278 fo_set_next(curr_fo, 0);
1279 (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
1280 curr_fo -> fo_client_data);
1281 curr_fo -> fo_client_data = 0;
1283 /* Explicit freeing of curr_fo is probably a bad idea. */
1284 /* It throws off accounting if nearly all objects are */
1285 /* finalizable. Otherwise it should not matter. */
1287 /* bytes_freed_before is initialized whenever count != 0 */
1289 # if defined(THREADS) && !defined(THREAD_SANITIZER)
1290 /* A quick check whether some memory was freed. */
1291 /* The race with GC_free() is safe to be ignored */
1292 /* because we only need to know if the current */
1293 /* thread has deallocated something. */
1294 && bytes_freed_before != GC_bytes_freed
1298 GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
1304 static word last_finalizer_notification = 0;
1306 GC_INNER void GC_notify_or_invoke_finalizers(void)
1308 GC_finalizer_notifier_proc notifier_fn = 0;
1309 # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1310 static word last_back_trace_gc_no = 1; /* Skip first one. */
1314 # if defined(THREADS) && !defined(KEEP_BACK_PTRS) \
1315 && !defined(MAKE_BACK_GRAPH)
1316 /* Quick check (while unlocked) for an empty finalization queue. */
1317 if (!GC_should_invoke_finalizers())
1322 /* This is a convenient place to generate backtraces if appropriate, */
1323 /* since that code is not callable with the allocation lock. */
1324 # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1325 if (GC_gc_no > last_back_trace_gc_no) {
1326 # ifdef KEEP_BACK_PTRS
1328 /* Stops when GC_gc_no wraps; that's OK. */
1329 last_back_trace_gc_no = (word)(-1); /* disable others. */
1330 for (i = 0; i < GC_backtraces; ++i) {
1331 /* FIXME: This tolerates concurrent heap mutation, */
1332 /* which may cause occasional mysterious results. */
1333 /* We need to release the GC lock, since GC_print_callers */
1334 /* acquires it. It probably shouldn't. */
1336 GC_generate_random_backtrace_no_gc();
1339 last_back_trace_gc_no = GC_gc_no;
1341 # ifdef MAKE_BACK_GRAPH
1342 if (GC_print_back_height) {
1343 GC_print_back_graph_stats();
1348 if (NULL == GC_fnlz_roots.finalize_now) {
1353 if (!GC_finalize_on_demand) {
1354 unsigned char *pnested = GC_check_finalizer_nested();
1356 /* Skip GC_invoke_finalizers() if nested */
1357 if (pnested != NULL) {
1358 (void) GC_invoke_finalizers();
1359 *pnested = 0; /* Reset since no more finalizers. */
1361 GC_ASSERT(NULL == GC_fnlz_roots.finalize_now);
1362 # endif /* Otherwise GC can run concurrently and add more */
1367 /* These variables require synchronization to avoid data races. */
1368 if (last_finalizer_notification != GC_gc_no) {
1369 last_finalizer_notification = GC_gc_no;
1370 notifier_fn = GC_finalizer_notifier;
1373 if (notifier_fn != 0)
1374 (*notifier_fn)(); /* Invoke the notifier */
1377 #ifndef SMALL_CONFIG
1378 # ifndef GC_LONG_REFS_NOT_NEEDED
1379 # define IF_LONG_REFS_PRESENT_ELSE(x,y) (x)
1381 # define IF_LONG_REFS_PRESENT_ELSE(x,y) (y)
1384 GC_INNER void GC_print_finalization_stats(void)
1386 struct finalizable_object *fo;
1387 unsigned long ready = 0;
1389 GC_log_printf("%lu finalization entries;"
1390 " %lu/%lu short/long disappearing links alive\n",
1391 (unsigned long)GC_fo_entries,
1392 (unsigned long)GC_dl_hashtbl.entries,
1393 (unsigned long)IF_LONG_REFS_PRESENT_ELSE(
1394 GC_ll_hashtbl.entries, 0));
1396 for (fo = GC_fnlz_roots.finalize_now; fo != NULL; fo = fo_next(fo))
1398 GC_log_printf("%lu finalization-ready objects;"
1399 " %ld/%ld short/long links cleared\n",
1401 (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries,
1402 (long)IF_LONG_REFS_PRESENT_ELSE(
1403 GC_old_ll_entries - GC_ll_hashtbl.entries, 0));
1405 #endif /* !SMALL_CONFIG */
1407 #endif /* !GC_NO_FINALIZATION */