2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (C) 2007 Free Software Foundation, Inc
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 #include "private/gc_pmark.h"
19 #ifndef GC_NO_FINALIZATION
21 /* Type of mark procedure used for marking from finalizable object. */
22 /* This procedure normally does not mark the object, only its */
24 typedef void (* finalization_mark_proc)(ptr_t /* finalizable_obj_ptr */);
26 #define HASH3(addr,size,log_size) \
27 ((((word)(addr) >> 3) ^ ((word)(addr) >> (3 + (log_size)))) \
29 #define HASH2(addr,log_size) HASH3(addr, (word)1 << (log_size), log_size)
31 struct hash_chain_entry {
33 struct hash_chain_entry * next;
36 struct disappearing_link {
37 struct hash_chain_entry prolog;
38 # define dl_hidden_link prolog.hidden_key
39 /* Field to be cleared. */
40 # define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
41 # define dl_set_next(x, y) \
42 (void)((x)->prolog.next = (struct hash_chain_entry *)(y))
43 word dl_hidden_obj; /* Pointer to object base */
47 struct disappearing_link **head;
52 STATIC struct dl_hashtbl_s GC_dl_hashtbl = {
53 /* head */ NULL, /* log_size */ -1, /* entries */ 0 };
54 #ifndef GC_LONG_REFS_NOT_NEEDED
55 STATIC struct dl_hashtbl_s GC_ll_hashtbl = { NULL, -1, 0 };
58 struct finalizable_object {
59 struct hash_chain_entry prolog;
60 # define fo_hidden_base prolog.hidden_key
61 /* Pointer to object base. */
62 /* No longer hidden once object */
63 /* is on finalize_now queue. */
64 # define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
65 # define fo_set_next(x,y) ((x)->prolog.next = (struct hash_chain_entry *)(y))
66 GC_finalization_proc fo_fn; /* Finalizer. */
68 word fo_object_size; /* In bytes. */
69 finalization_mark_proc fo_mark_proc; /* Mark-through procedure */
72 static signed_word log_fo_table_size = -1;
75 struct finalizable_object **fo_head;
76 /* List of objects that should be finalized now: */
77 struct finalizable_object *finalize_now;
78 } GC_fnlz_roots = { NULL, NULL };
80 GC_API void GC_CALL GC_push_finalizer_structures(void)
82 GC_ASSERT((word)&GC_dl_hashtbl.head % sizeof(word) == 0);
83 GC_ASSERT((word)&GC_fnlz_roots % sizeof(word) == 0);
84 # ifndef GC_LONG_REFS_NOT_NEEDED
85 GC_ASSERT((word)&GC_ll_hashtbl.head % sizeof(word) == 0);
86 GC_PUSH_ALL_SYM(GC_ll_hashtbl.head);
88 GC_PUSH_ALL_SYM(GC_dl_hashtbl.head);
89 GC_PUSH_ALL_SYM(GC_fnlz_roots);
92 /* Double the size of a hash table. *size_ptr is the log of its current */
93 /* size. May be a no-op. */
94 /* *table is a pointer to an array of hash headers. If we succeed, we */
95 /* update both *table and *log_size_ptr. Lock is held. */
96 STATIC void GC_grow_table(struct hash_chain_entry ***table,
97 signed_word *log_size_ptr)
100 register struct hash_chain_entry *p;
101 signed_word log_old_size = *log_size_ptr;
102 signed_word log_new_size = log_old_size + 1;
103 word old_size = log_old_size == -1 ? 0 : (word)1 << log_old_size;
104 word new_size = (word)1 << log_new_size;
105 /* FIXME: Power of 2 size often gets rounded up to one more page. */
106 struct hash_chain_entry **new_table;
108 GC_ASSERT(I_HOLD_LOCK());
109 new_table = (struct hash_chain_entry **)
110 GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
111 (size_t)new_size * sizeof(struct hash_chain_entry *),
113 if (new_table == 0) {
115 ABORT("Insufficient space for initial table allocation");
120 for (i = 0; i < old_size; i++) {
123 ptr_t real_key = GC_REVEAL_POINTER(p -> hidden_key);
124 struct hash_chain_entry *next = p -> next;
125 size_t new_hash = HASH3(real_key, new_size, log_new_size);
127 p -> next = new_table[new_hash];
128 new_table[new_hash] = p;
132 *log_size_ptr = log_new_size;
136 GC_API int GC_CALL GC_register_disappearing_link(void * * link)
140 base = (ptr_t)GC_base(link);
142 ABORT("Bad arg to GC_register_disappearing_link");
143 return(GC_general_register_disappearing_link(link, base));
146 STATIC int GC_register_disappearing_link_inner(
147 struct dl_hashtbl_s *dl_hashtbl, void **link,
148 const void *obj, const char *tbl_log_name)
150 struct disappearing_link *curr_dl;
152 struct disappearing_link * new_dl;
155 if (EXPECT(GC_find_leak, FALSE)) return GC_UNIMPLEMENTED;
157 GC_ASSERT(obj != NULL && GC_base_C(obj) == obj);
158 if (dl_hashtbl -> log_size == -1
159 || dl_hashtbl -> entries > ((word)1 << dl_hashtbl -> log_size)) {
160 GC_grow_table((struct hash_chain_entry ***)&dl_hashtbl -> head,
161 &dl_hashtbl -> log_size);
163 if (dl_hashtbl->log_size < 0) ABORT("log_size is negative");
165 GC_COND_LOG_PRINTF("Grew %s table to %u entries\n", tbl_log_name,
166 1 << (unsigned)dl_hashtbl -> log_size);
168 index = HASH2(link, dl_hashtbl -> log_size);
169 for (curr_dl = dl_hashtbl -> head[index]; curr_dl != 0;
170 curr_dl = dl_next(curr_dl)) {
171 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
172 curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
177 new_dl = (struct disappearing_link *)
178 GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
180 GC_oom_func oom_fn = GC_oom_fn;
182 new_dl = (struct disappearing_link *)
183 (*oom_fn)(sizeof(struct disappearing_link));
187 /* It's not likely we'll make it here, but ... */
189 /* Recalculate index since the table may grow. */
190 index = HASH2(link, dl_hashtbl -> log_size);
191 /* Check again that our disappearing link not in the table. */
192 for (curr_dl = dl_hashtbl -> head[index]; curr_dl != 0;
193 curr_dl = dl_next(curr_dl)) {
194 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
195 curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
197 # ifndef DBG_HDRS_ALL
198 /* Free unused new_dl returned by GC_oom_fn() */
199 GC_free((void *)new_dl);
205 new_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
206 new_dl -> dl_hidden_link = GC_HIDE_POINTER(link);
207 dl_set_next(new_dl, dl_hashtbl -> head[index]);
208 dl_hashtbl -> head[index] = new_dl;
209 dl_hashtbl -> entries++;
214 GC_API int GC_CALL GC_general_register_disappearing_link(void * * link,
217 if (((word)link & (ALIGNMENT-1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
218 ABORT("Bad arg to GC_general_register_disappearing_link");
219 return GC_register_disappearing_link_inner(&GC_dl_hashtbl, link, obj,
224 # define FREE_DL_ENTRY(curr_dl) dl_set_next(curr_dl, NULL)
226 # define FREE_DL_ENTRY(curr_dl) GC_free(curr_dl)
229 /* Unregisters given link and returns the link entry to free. */
230 /* Assume the lock is held. */
231 GC_INLINE struct disappearing_link *GC_unregister_disappearing_link_inner(
232 struct dl_hashtbl_s *dl_hashtbl, void **link)
234 struct disappearing_link *curr_dl;
235 struct disappearing_link *prev_dl = NULL;
238 if (dl_hashtbl->log_size == -1)
239 return NULL; /* prevent integer shift by a negative amount */
241 index = HASH2(link, dl_hashtbl->log_size);
242 for (curr_dl = dl_hashtbl -> head[index]; curr_dl;
243 curr_dl = dl_next(curr_dl)) {
244 if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
245 /* Remove found entry from the table. */
246 if (NULL == prev_dl) {
247 dl_hashtbl -> head[index] = dl_next(curr_dl);
249 dl_set_next(prev_dl, dl_next(curr_dl));
251 dl_hashtbl -> entries--;
259 GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
261 struct disappearing_link *curr_dl;
264 if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
267 curr_dl = GC_unregister_disappearing_link_inner(&GC_dl_hashtbl, link);
269 if (NULL == curr_dl) return 0;
270 FREE_DL_ENTRY(curr_dl);
274 /* Toggle-ref support. */
275 #ifndef GC_TOGGLE_REFS_NOT_NEEDED
277 /* Lowest bit is used to distinguish between choices. */
279 GC_hidden_pointer weak_ref;
282 STATIC GC_toggleref_func GC_toggleref_callback = 0;
283 STATIC GCToggleRef *GC_toggleref_arr = NULL;
284 STATIC int GC_toggleref_array_size = 0;
285 STATIC int GC_toggleref_array_capacity = 0;
287 GC_INNER void GC_process_togglerefs(void)
292 GC_ASSERT(I_HOLD_LOCK());
293 for (i = 0; i < GC_toggleref_array_size; ++i) {
294 GCToggleRef r = GC_toggleref_arr[i];
295 void *obj = r.strong_ref;
297 if (((word)obj & 1) != 0) {
298 obj = GC_REVEAL_POINTER(r.weak_ref);
303 switch (GC_toggleref_callback(obj)) {
304 case GC_TOGGLE_REF_DROP:
306 case GC_TOGGLE_REF_STRONG:
307 GC_toggleref_arr[new_size++].strong_ref = obj;
309 case GC_TOGGLE_REF_WEAK:
310 GC_toggleref_arr[new_size++].weak_ref = GC_HIDE_POINTER(obj);
313 ABORT("Bad toggle-ref status returned by callback");
317 if (new_size < GC_toggleref_array_size) {
318 BZERO(&GC_toggleref_arr[new_size],
319 (GC_toggleref_array_size - new_size) * sizeof(GCToggleRef));
320 GC_toggleref_array_size = new_size;
324 STATIC void GC_normal_finalize_mark_proc(ptr_t);
326 static void push_and_mark_object(void *p)
328 GC_normal_finalize_mark_proc(p);
329 while (!GC_mark_stack_empty()) {
330 MARK_FROM_MARK_STACK();
333 if (GC_mark_state != MS_NONE) {
334 while (!GC_mark_some(0)) {
340 STATIC void GC_mark_togglerefs(void)
343 if (NULL == GC_toggleref_arr)
346 /* TODO: Hide GC_toggleref_arr to avoid its marking from roots. */
347 GC_set_mark_bit(GC_toggleref_arr);
348 for (i = 0; i < GC_toggleref_array_size; ++i) {
349 void *obj = GC_toggleref_arr[i].strong_ref;
350 if (obj != NULL && ((word)obj & 1) == 0) {
351 push_and_mark_object(obj);
356 STATIC void GC_clear_togglerefs(void)
359 for (i = 0; i < GC_toggleref_array_size; ++i) {
360 if ((GC_toggleref_arr[i].weak_ref & 1) != 0) {
361 if (!GC_is_marked(GC_REVEAL_POINTER(GC_toggleref_arr[i].weak_ref))) {
362 GC_toggleref_arr[i].weak_ref = 0;
364 /* No need to copy, BDWGC is a non-moving collector. */
370 GC_API void GC_CALL GC_set_toggleref_func(GC_toggleref_func fn)
375 GC_toggleref_callback = fn;
379 GC_API GC_toggleref_func GC_CALL GC_get_toggleref_func(void)
381 GC_toggleref_func fn;
385 fn = GC_toggleref_callback;
390 static GC_bool ensure_toggleref_capacity(int capacity_inc)
392 GC_ASSERT(capacity_inc >= 0);
393 if (NULL == GC_toggleref_arr) {
394 GC_toggleref_array_capacity = 32; /* initial capacity */
395 GC_toggleref_arr = GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
396 GC_toggleref_array_capacity * sizeof(GCToggleRef),
398 if (NULL == GC_toggleref_arr)
401 if ((unsigned)GC_toggleref_array_size + (unsigned)capacity_inc
402 >= (unsigned)GC_toggleref_array_capacity) {
403 GCToggleRef *new_array;
404 while ((unsigned)GC_toggleref_array_capacity
405 < (unsigned)GC_toggleref_array_size + (unsigned)capacity_inc) {
406 GC_toggleref_array_capacity *= 2;
407 if (GC_toggleref_array_capacity < 0) /* overflow */
411 new_array = GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
412 GC_toggleref_array_capacity * sizeof(GCToggleRef),
414 if (NULL == new_array)
416 if (EXPECT(GC_toggleref_array_size > 0, TRUE))
417 BCOPY(GC_toggleref_arr, new_array,
418 GC_toggleref_array_size * sizeof(GCToggleRef));
419 GC_INTERNAL_FREE(GC_toggleref_arr);
420 GC_toggleref_arr = new_array;
425 GC_API int GC_CALL GC_toggleref_add(void *obj, int is_strong_ref)
427 int res = GC_SUCCESS;
430 GC_ASSERT(obj != NULL);
432 if (GC_toggleref_callback != 0) {
433 if (!ensure_toggleref_capacity(1)) {
436 GC_toggleref_arr[GC_toggleref_array_size++].strong_ref =
437 is_strong_ref ? obj : (void *)GC_HIDE_POINTER(obj);
443 #endif /* !GC_TOGGLE_REFS_NOT_NEEDED */
445 /* Finalizer callback support. */
446 STATIC GC_await_finalize_proc GC_object_finalized_proc = 0;
448 GC_API void GC_CALL GC_set_await_finalize_proc(GC_await_finalize_proc fn)
453 GC_object_finalized_proc = fn;
457 GC_API GC_await_finalize_proc GC_CALL GC_get_await_finalize_proc(void)
459 GC_await_finalize_proc fn;
463 fn = GC_object_finalized_proc;
468 #ifndef GC_LONG_REFS_NOT_NEEDED
469 GC_API int GC_CALL GC_register_long_link(void * * link, const void * obj)
471 if (((word)link & (ALIGNMENT-1)) != 0 || !NONNULL_ARG_NOT_NULL(link))
472 ABORT("Bad arg to GC_register_long_link");
473 return GC_register_disappearing_link_inner(&GC_ll_hashtbl, link, obj,
477 GC_API int GC_CALL GC_unregister_long_link(void * * link)
479 struct disappearing_link *curr_dl;
482 if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
485 curr_dl = GC_unregister_disappearing_link_inner(&GC_ll_hashtbl, link);
487 if (NULL == curr_dl) return 0;
488 FREE_DL_ENTRY(curr_dl);
491 #endif /* !GC_LONG_REFS_NOT_NEEDED */
493 #ifndef GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED
494 /* Moves a link. Assume the lock is held. */
495 STATIC int GC_move_disappearing_link_inner(
496 struct dl_hashtbl_s *dl_hashtbl,
497 void **link, void **new_link)
499 struct disappearing_link *curr_dl, *prev_dl, *new_dl;
500 size_t curr_index, new_index;
501 word curr_hidden_link;
502 word new_hidden_link;
504 if (dl_hashtbl->log_size == -1)
505 return GC_NOT_FOUND; /* prevent integer shift by a negative amount */
507 /* Find current link. */
508 curr_index = HASH2(link, dl_hashtbl -> log_size);
509 curr_hidden_link = GC_HIDE_POINTER(link);
511 for (curr_dl = dl_hashtbl -> head[curr_index]; curr_dl;
512 curr_dl = dl_next(curr_dl)) {
513 if (curr_dl -> dl_hidden_link == curr_hidden_link)
518 if (NULL == curr_dl) {
522 if (link == new_link) {
523 return GC_SUCCESS; /* Nothing to do. */
526 /* link found; now check new_link not present. */
527 new_index = HASH2(new_link, dl_hashtbl -> log_size);
528 new_hidden_link = GC_HIDE_POINTER(new_link);
529 for (new_dl = dl_hashtbl -> head[new_index]; new_dl;
530 new_dl = dl_next(new_dl)) {
531 if (new_dl -> dl_hidden_link == new_hidden_link) {
532 /* Target already registered; bail. */
537 /* Remove from old, add to new, update link. */
538 if (NULL == prev_dl) {
539 dl_hashtbl -> head[curr_index] = dl_next(curr_dl);
541 dl_set_next(prev_dl, dl_next(curr_dl));
543 curr_dl -> dl_hidden_link = new_hidden_link;
544 dl_set_next(curr_dl, dl_hashtbl -> head[new_index]);
545 dl_hashtbl -> head[new_index] = curr_dl;
549 GC_API int GC_CALL GC_move_disappearing_link(void **link, void **new_link)
554 if (((word)new_link & (ALIGNMENT-1)) != 0
555 || !NONNULL_ARG_NOT_NULL(new_link))
556 ABORT("Bad new_link arg to GC_move_disappearing_link");
557 if (((word)link & (ALIGNMENT-1)) != 0)
558 return GC_NOT_FOUND; /* Nothing to do. */
561 result = GC_move_disappearing_link_inner(&GC_dl_hashtbl, link, new_link);
566 # ifndef GC_LONG_REFS_NOT_NEEDED
567 GC_API int GC_CALL GC_move_long_link(void **link, void **new_link)
572 if (((word)new_link & (ALIGNMENT-1)) != 0
573 || !NONNULL_ARG_NOT_NULL(new_link))
574 ABORT("Bad new_link arg to GC_move_long_link");
575 if (((word)link & (ALIGNMENT-1)) != 0)
576 return GC_NOT_FOUND; /* Nothing to do. */
579 result = GC_move_disappearing_link_inner(&GC_ll_hashtbl, link, new_link);
583 # endif /* !GC_LONG_REFS_NOT_NEEDED */
584 #endif /* !GC_MOVE_DISAPPEARING_LINK_NOT_NEEDED */
586 /* Possible finalization_marker procedures. Note that mark stack */
587 /* overflow is handled by the caller, and is not a disaster. */
588 STATIC void GC_normal_finalize_mark_proc(ptr_t p)
592 PUSH_OBJ(p, hhdr, GC_mark_stack_top,
593 &(GC_mark_stack[GC_mark_stack_size]));
596 /* This only pays very partial attention to the mark descriptor. */
597 /* It does the right thing for normal and atomic objects, and treats */
598 /* most others as normal. */
599 STATIC void GC_ignore_self_finalize_mark_proc(ptr_t p)
602 word descr = hhdr -> hb_descr;
605 ptr_t target_limit = p + hhdr -> hb_sz - 1;
607 if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
608 scan_limit = p + descr - sizeof(word);
610 scan_limit = target_limit + 1 - sizeof(word);
612 for (q = p; (word)q <= (word)scan_limit; q += ALIGNMENT) {
615 if (r < (word)p || r > (word)target_limit) {
616 GC_PUSH_ONE_HEAP(r, q, GC_mark_stack_top);
621 STATIC void GC_null_finalize_mark_proc(ptr_t p GC_ATTR_UNUSED) {}
623 /* Possible finalization_marker procedures. Note that mark stack */
624 /* overflow is handled by the caller, and is not a disaster. */
626 /* GC_unreachable_finalize_mark_proc is an alias for normal marking, */
627 /* but it is explicitly tested for, and triggers different */
628 /* behavior. Objects registered in this way are not finalized */
629 /* if they are reachable by other finalizable objects, even if those */
630 /* other objects specify no ordering. */
631 STATIC void GC_unreachable_finalize_mark_proc(ptr_t p)
633 GC_normal_finalize_mark_proc(p);
636 /* Register a finalization function. See gc.h for details. */
637 /* The last parameter is a procedure that determines */
638 /* marking for finalization ordering. Any objects marked */
639 /* by that procedure will be guaranteed to not have been */
640 /* finalized when this finalizer is invoked. */
641 STATIC void GC_register_finalizer_inner(void * obj,
642 GC_finalization_proc fn, void *cd,
643 GC_finalization_proc *ofn, void **ocd,
644 finalization_mark_proc mp)
646 struct finalizable_object * curr_fo;
648 struct finalizable_object *new_fo = 0;
649 hdr *hhdr = NULL; /* initialized to prevent warning. */
652 if (EXPECT(GC_find_leak, FALSE)) return;
654 if (log_fo_table_size == -1
655 || GC_fo_entries > ((word)1 << log_fo_table_size)) {
656 GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
659 if (log_fo_table_size < 0) ABORT("log_size is negative");
661 GC_COND_LOG_PRINTF("Grew fo table to %u entries\n",
662 1 << (unsigned)log_fo_table_size);
664 /* in the THREADS case we hold allocation lock. */
666 struct finalizable_object *prev_fo = NULL;
669 index = HASH2(obj, log_fo_table_size);
670 curr_fo = GC_fnlz_roots.fo_head[index];
671 while (curr_fo != 0) {
672 GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
673 if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(obj)) {
674 /* Interruption by a signal in the middle of this */
675 /* should be safe. The client may see only *ocd */
676 /* updated, but we'll declare that to be his problem. */
677 if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
678 if (ofn) *ofn = curr_fo -> fo_fn;
679 /* Delete the structure for obj. */
681 GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
683 fo_set_next(prev_fo, fo_next(curr_fo));
687 /* May not happen if we get a signal. But a high */
688 /* estimate will only make the table larger than */
690 # if !defined(THREADS) && !defined(DBG_HDRS_ALL)
691 GC_free((void *)curr_fo);
694 curr_fo -> fo_fn = fn;
695 curr_fo -> fo_client_data = (ptr_t)cd;
696 curr_fo -> fo_mark_proc = mp;
697 /* Reinsert it. We deleted it first to maintain */
698 /* consistency in the event of a signal. */
700 GC_fnlz_roots.fo_head[index] = curr_fo;
702 fo_set_next(prev_fo, curr_fo);
706 # ifndef DBG_HDRS_ALL
707 if (EXPECT(new_fo != 0, FALSE)) {
708 /* Free unused new_fo returned by GC_oom_fn() */
709 GC_free((void *)new_fo);
715 curr_fo = fo_next(curr_fo);
717 if (EXPECT(new_fo != 0, FALSE)) {
718 /* new_fo is returned by GC_oom_fn(). */
721 if (NULL == hhdr) ABORT("Bad hhdr in GC_register_finalizer_inner");
732 if (EXPECT(0 == hhdr, FALSE)) {
733 /* We won't collect it, hence finalizer wouldn't be run. */
739 new_fo = (struct finalizable_object *)
740 GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
741 if (EXPECT(new_fo != 0, TRUE))
745 new_fo = (struct finalizable_object *)
746 (*oom_fn)(sizeof(struct finalizable_object));
748 /* No enough memory. *ocd and *ofn remains unchanged. */
751 /* It's not likely we'll make it here, but ... */
753 /* Recalculate index since the table may grow and */
754 /* check again that our finalizer is not in the table. */
756 GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
759 new_fo -> fo_hidden_base = GC_HIDE_POINTER(obj);
760 new_fo -> fo_fn = fn;
761 new_fo -> fo_client_data = (ptr_t)cd;
762 new_fo -> fo_object_size = hhdr -> hb_sz;
763 new_fo -> fo_mark_proc = mp;
764 fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
766 GC_fnlz_roots.fo_head[index] = new_fo;
770 GC_API void GC_CALL GC_register_finalizer(void * obj,
771 GC_finalization_proc fn, void * cd,
772 GC_finalization_proc *ofn, void ** ocd)
774 GC_register_finalizer_inner(obj, fn, cd, ofn,
775 ocd, GC_normal_finalize_mark_proc);
778 GC_API void GC_CALL GC_register_finalizer_ignore_self(void * obj,
779 GC_finalization_proc fn, void * cd,
780 GC_finalization_proc *ofn, void ** ocd)
782 GC_register_finalizer_inner(obj, fn, cd, ofn,
783 ocd, GC_ignore_self_finalize_mark_proc);
786 GC_API void GC_CALL GC_register_finalizer_no_order(void * obj,
787 GC_finalization_proc fn, void * cd,
788 GC_finalization_proc *ofn, void ** ocd)
790 GC_register_finalizer_inner(obj, fn, cd, ofn,
791 ocd, GC_null_finalize_mark_proc);
794 static GC_bool need_unreachable_finalization = FALSE;
795 /* Avoid the work if this isn't used. */
797 GC_API void GC_CALL GC_register_finalizer_unreachable(void * obj,
798 GC_finalization_proc fn, void * cd,
799 GC_finalization_proc *ofn, void ** ocd)
801 need_unreachable_finalization = TRUE;
802 GC_ASSERT(GC_java_finalization);
803 GC_register_finalizer_inner(obj, fn, cd, ofn,
804 ocd, GC_unreachable_finalize_mark_proc);
808 STATIC void GC_dump_finalization_links(
809 const struct dl_hashtbl_s *dl_hashtbl)
811 size_t dl_size = dl_hashtbl->log_size == -1 ? 0 :
812 (size_t)1 << dl_hashtbl->log_size;
815 for (i = 0; i < dl_size; i++) {
816 struct disappearing_link *curr_dl;
818 for (curr_dl = dl_hashtbl -> head[i]; curr_dl != 0;
819 curr_dl = dl_next(curr_dl)) {
820 ptr_t real_ptr = GC_REVEAL_POINTER(curr_dl -> dl_hidden_obj);
821 ptr_t real_link = GC_REVEAL_POINTER(curr_dl -> dl_hidden_link);
823 GC_printf("Object: %p, link: %p\n",
824 (void *)real_ptr, (void *)real_link);
829 GC_API void GC_CALL GC_dump_finalization(void)
831 struct finalizable_object * curr_fo;
832 size_t fo_size = log_fo_table_size == -1 ? 0 :
833 (size_t)1 << log_fo_table_size;
836 GC_printf("Disappearing (short) links:\n");
837 GC_dump_finalization_links(&GC_dl_hashtbl);
838 # ifndef GC_LONG_REFS_NOT_NEEDED
839 GC_printf("Disappearing long links:\n");
840 GC_dump_finalization_links(&GC_ll_hashtbl);
842 GC_printf("Finalizers:\n");
843 for (i = 0; i < fo_size; i++) {
844 for (curr_fo = GC_fnlz_roots.fo_head[i];
845 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
846 ptr_t real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
848 GC_printf("Finalizable object: %p\n", (void *)real_ptr);
852 #endif /* !NO_DEBUGGING */
855 STATIC word GC_old_dl_entries = 0; /* for stats printing */
856 # ifndef GC_LONG_REFS_NOT_NEEDED
857 STATIC word GC_old_ll_entries = 0;
859 #endif /* !SMALL_CONFIG */
862 /* Global variables to minimize the level of recursion when a client */
863 /* finalizer allocates memory. */
864 STATIC int GC_finalizer_nested = 0;
865 /* Only the lowest byte is used, the rest is */
866 /* padding for proper global data alignment */
867 /* required for some compilers (like Watcom). */
868 STATIC unsigned GC_finalizer_skipped = 0;
870 /* Checks and updates the level of finalizers recursion. */
871 /* Returns NULL if GC_invoke_finalizers() should not be called by the */
872 /* collector (to minimize the risk of a deep finalizers recursion), */
873 /* otherwise returns a pointer to GC_finalizer_nested. */
874 STATIC unsigned char *GC_check_finalizer_nested(void)
876 unsigned nesting_level = *(unsigned char *)&GC_finalizer_nested;
878 /* We are inside another GC_invoke_finalizers(). */
879 /* Skip some implicitly-called GC_invoke_finalizers() */
880 /* depending on the nesting (recursion) level. */
881 if (++GC_finalizer_skipped < (1U << nesting_level)) return NULL;
882 GC_finalizer_skipped = 0;
884 *(char *)&GC_finalizer_nested = (char)(nesting_level + 1);
885 return (unsigned char *)&GC_finalizer_nested;
889 #define ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr_dl, prev_dl) \
892 size_t dl_size = dl_hashtbl->log_size == -1 ? 0 : \
893 (size_t)1 << dl_hashtbl->log_size; \
894 for (i = 0; i < dl_size; i++) { \
895 struct disappearing_link *prev_dl = NULL; \
896 curr_dl = dl_hashtbl -> head[i]; \
899 #define ITERATE_DL_HASHTBL_END(curr_dl, prev_dl) \
901 curr_dl = dl_next(curr_dl); \
906 #define DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr_dl, prev_dl, next_dl) \
908 next_dl = dl_next(curr_dl); \
909 if (NULL == prev_dl) { \
910 dl_hashtbl -> head[i] = next_dl; \
912 dl_set_next(prev_dl, next_dl); \
914 GC_clear_mark_bit(curr_dl); \
915 dl_hashtbl -> entries--; \
920 GC_INLINE void GC_make_disappearing_links_disappear(
921 struct dl_hashtbl_s* dl_hashtbl)
923 struct disappearing_link *curr, *next;
925 ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
926 ptr_t real_ptr = GC_REVEAL_POINTER(curr -> dl_hidden_obj);
927 ptr_t real_link = GC_REVEAL_POINTER(curr -> dl_hidden_link);
929 if (!GC_is_marked(real_ptr)) {
930 *(word *)real_link = 0;
931 GC_clear_mark_bit(curr);
932 DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr, prev, next);
934 ITERATE_DL_HASHTBL_END(curr, prev)
937 GC_INLINE void GC_remove_dangling_disappearing_links(
938 struct dl_hashtbl_s* dl_hashtbl)
940 struct disappearing_link *curr, *next;
942 ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
943 ptr_t real_link = GC_base(GC_REVEAL_POINTER(curr -> dl_hidden_link));
945 if (NULL != real_link && !GC_is_marked(real_link)) {
946 GC_clear_mark_bit(curr);
947 DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr, prev, next);
949 ITERATE_DL_HASHTBL_END(curr, prev)
952 /* Called with held lock (but the world is running). */
953 /* Cause disappearing links to disappear and unreachable objects to be */
954 /* enqueued for finalization. */
955 GC_INNER void GC_finalize(void)
957 struct finalizable_object * curr_fo, * prev_fo, * next_fo;
960 size_t fo_size = log_fo_table_size == -1 ? 0 :
961 (size_t)1 << log_fo_table_size;
963 # ifndef SMALL_CONFIG
964 /* Save current GC_[dl/ll]_entries value for stats printing */
965 GC_old_dl_entries = GC_dl_hashtbl.entries;
966 # ifndef GC_LONG_REFS_NOT_NEEDED
967 GC_old_ll_entries = GC_ll_hashtbl.entries;
971 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
972 GC_mark_togglerefs();
974 GC_make_disappearing_links_disappear(&GC_dl_hashtbl);
976 /* Mark all objects reachable via chains of 1 or more pointers */
977 /* from finalizable objects. */
978 GC_ASSERT(GC_mark_state == MS_NONE);
979 for (i = 0; i < fo_size; i++) {
980 for (curr_fo = GC_fnlz_roots.fo_head[i];
981 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
982 GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
983 real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
984 if (!GC_is_marked(real_ptr)) {
985 GC_MARKED_FOR_FINALIZATION(real_ptr);
986 GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
987 if (GC_is_marked(real_ptr)) {
988 WARN("Finalization cycle involving %p\n", real_ptr);
993 /* Enqueue for finalization all objects that are still */
995 GC_bytes_finalized = 0;
996 for (i = 0; i < fo_size; i++) {
997 curr_fo = GC_fnlz_roots.fo_head[i];
999 while (curr_fo != 0) {
1000 real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
1001 if (!GC_is_marked(real_ptr)) {
1002 if (!GC_java_finalization) {
1003 GC_set_mark_bit(real_ptr);
1005 /* Delete from hash table */
1006 next_fo = fo_next(curr_fo);
1007 if (NULL == prev_fo) {
1008 GC_fnlz_roots.fo_head[i] = next_fo;
1010 fo_set_next(prev_fo, next_fo);
1013 if (GC_object_finalized_proc)
1014 GC_object_finalized_proc(real_ptr);
1016 /* Add to list of objects awaiting finalization. */
1017 fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
1018 GC_fnlz_roots.finalize_now = curr_fo;
1019 /* unhide object pointer so any future collections will */
1021 curr_fo -> fo_hidden_base =
1022 (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
1023 GC_bytes_finalized +=
1024 curr_fo -> fo_object_size
1025 + sizeof(struct finalizable_object);
1026 GC_ASSERT(GC_is_marked(GC_base(curr_fo)));
1030 curr_fo = fo_next(curr_fo);
1035 if (GC_java_finalization) {
1036 /* make sure we mark everything reachable from objects finalized
1037 using the no_order mark_proc */
1038 for (curr_fo = GC_fnlz_roots.finalize_now;
1039 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
1040 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
1041 if (!GC_is_marked(real_ptr)) {
1042 if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
1043 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
1045 if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
1046 GC_set_mark_bit(real_ptr);
1051 /* now revive finalize-when-unreachable objects reachable from
1052 other finalizable objects */
1053 if (need_unreachable_finalization) {
1054 curr_fo = GC_fnlz_roots.finalize_now;
1055 # if defined(GC_ASSERTIONS) || defined(LINT2)
1056 if (curr_fo != NULL && log_fo_table_size < 0)
1057 ABORT("log_size is negative");
1060 while (curr_fo != NULL) {
1061 next_fo = fo_next(curr_fo);
1062 if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
1063 real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
1064 if (!GC_is_marked(real_ptr)) {
1065 GC_set_mark_bit(real_ptr);
1067 if (NULL == prev_fo) {
1068 GC_fnlz_roots.finalize_now = next_fo;
1070 fo_set_next(prev_fo, next_fo);
1072 curr_fo -> fo_hidden_base =
1073 GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
1074 GC_bytes_finalized -=
1075 curr_fo->fo_object_size + sizeof(struct finalizable_object);
1077 i = HASH2(real_ptr, log_fo_table_size);
1078 fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
1080 GC_fnlz_roots.fo_head[i] = curr_fo;
1090 GC_remove_dangling_disappearing_links(&GC_dl_hashtbl);
1091 # ifndef GC_TOGGLE_REFS_NOT_NEEDED
1092 GC_clear_togglerefs();
1094 # ifndef GC_LONG_REFS_NOT_NEEDED
1095 GC_make_disappearing_links_disappear(&GC_ll_hashtbl);
1096 GC_remove_dangling_disappearing_links(&GC_ll_hashtbl);
1099 if (GC_fail_count) {
1100 /* Don't prevent running finalizers if there has been an allocation */
1101 /* failure recently. */
1103 GC_reset_finalizer_nested();
1105 GC_finalizer_nested = 0;
1110 #ifndef JAVA_FINALIZATION_NOT_NEEDED
1112 /* Enqueue all remaining finalizers to be run - Assumes lock is held. */
1113 STATIC void GC_enqueue_all_finalizers(void)
1115 struct finalizable_object * curr_fo, * next_fo;
1120 fo_size = log_fo_table_size == -1 ? 0 : 1 << log_fo_table_size;
1121 GC_bytes_finalized = 0;
1122 for (i = 0; i < fo_size; i++) {
1123 curr_fo = GC_fnlz_roots.fo_head[i];
1124 GC_fnlz_roots.fo_head[i] = NULL;
1125 while (curr_fo != NULL) {
1126 real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
1127 GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
1128 GC_set_mark_bit(real_ptr);
1130 next_fo = fo_next(curr_fo);
1132 /* Add to list of objects awaiting finalization. */
1133 fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
1134 GC_fnlz_roots.finalize_now = curr_fo;
1136 /* unhide object pointer so any future collections will */
1138 curr_fo -> fo_hidden_base =
1139 (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
1140 GC_bytes_finalized +=
1141 curr_fo -> fo_object_size + sizeof(struct finalizable_object);
1145 GC_fo_entries = 0; /* all entries deleted from the hash table */
1148 /* Invoke all remaining finalizers that haven't yet been run.
1149 * This is needed for strict compliance with the Java standard,
1150 * which can make the runtime guarantee that all finalizers are run.
1151 * Unfortunately, the Java standard implies we have to keep running
1152 * finalizers until there are no more left, a potential infinite loop.
1154 * Note that this is even more dangerous than the usual Java
1155 * finalizers, in that objects reachable from static variables
1156 * may have been finalized when these finalizers are run.
1157 * Finalizers run at this point must be prepared to deal with a
1158 * mostly broken world.
1159 * This routine is externally callable, so is called without
1160 * the allocation lock.
1162 GC_API void GC_CALL GC_finalize_all(void)
1167 while (GC_fo_entries > 0) {
1168 GC_enqueue_all_finalizers();
1170 GC_invoke_finalizers();
1171 /* Running the finalizers in this thread is arguably not a good */
1172 /* idea when we should be notifying another thread to run them. */
1173 /* But otherwise we don't have a great way to wait for them to */
1180 #endif /* !JAVA_FINALIZATION_NOT_NEEDED */
1182 /* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
1183 /* finalizers can only be called from some kind of "safe state" and */
1184 /* getting into that safe state is expensive.) */
1185 GC_API int GC_CALL GC_should_invoke_finalizers(void)
1187 return GC_fnlz_roots.finalize_now != NULL;
1190 /* Invoke finalizers for all objects that are ready to be finalized. */
1191 /* Should be called without allocation lock. */
1192 GC_API int GC_CALL GC_invoke_finalizers(void)
1195 word bytes_freed_before = 0; /* initialized to prevent warning. */
1198 while (GC_fnlz_roots.finalize_now != NULL) {
1199 struct finalizable_object * curr_fo;
1205 bytes_freed_before = GC_bytes_freed;
1206 /* Don't do this outside, since we need the lock. */
1208 curr_fo = GC_fnlz_roots.finalize_now;
1210 if (curr_fo != 0) GC_fnlz_roots.finalize_now = fo_next(curr_fo);
1212 if (curr_fo == 0) break;
1214 GC_fnlz_roots.finalize_now = fo_next(curr_fo);
1216 fo_set_next(curr_fo, 0);
1217 (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
1218 curr_fo -> fo_client_data);
1219 curr_fo -> fo_client_data = 0;
1221 /* Explicit freeing of curr_fo is probably a bad idea. */
1222 /* It throws off accounting if nearly all objects are */
1223 /* finalizable. Otherwise it should not matter. */
1225 /* bytes_freed_before is initialized whenever count != 0 */
1226 if (count != 0 && bytes_freed_before != GC_bytes_freed) {
1228 GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
1234 static word last_finalizer_notification = 0;
1236 GC_INNER void GC_notify_or_invoke_finalizers(void)
1238 GC_finalizer_notifier_proc notifier_fn = 0;
1239 # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1240 static word last_back_trace_gc_no = 1; /* Skip first one. */
1244 # if defined(THREADS) && !defined(KEEP_BACK_PTRS) \
1245 && !defined(MAKE_BACK_GRAPH)
1246 /* Quick check (while unlocked) for an empty finalization queue. */
1247 if (NULL == GC_fnlz_roots.finalize_now) return;
1251 /* This is a convenient place to generate backtraces if appropriate, */
1252 /* since that code is not callable with the allocation lock. */
1253 # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
1254 if (GC_gc_no > last_back_trace_gc_no) {
1255 # ifdef KEEP_BACK_PTRS
1257 /* Stops when GC_gc_no wraps; that's OK. */
1258 last_back_trace_gc_no = (word)(-1); /* disable others. */
1259 for (i = 0; i < GC_backtraces; ++i) {
1260 /* FIXME: This tolerates concurrent heap mutation, */
1261 /* which may cause occasional mysterious results. */
1262 /* We need to release the GC lock, since GC_print_callers */
1263 /* acquires it. It probably shouldn't. */
1265 GC_generate_random_backtrace_no_gc();
1268 last_back_trace_gc_no = GC_gc_no;
1270 # ifdef MAKE_BACK_GRAPH
1271 if (GC_print_back_height) {
1273 GC_print_back_graph_stats();
1279 if (NULL == GC_fnlz_roots.finalize_now) {
1284 if (!GC_finalize_on_demand) {
1285 unsigned char *pnested = GC_check_finalizer_nested();
1287 /* Skip GC_invoke_finalizers() if nested */
1288 if (pnested != NULL) {
1289 (void) GC_invoke_finalizers();
1290 *pnested = 0; /* Reset since no more finalizers. */
1292 GC_ASSERT(NULL == GC_fnlz_roots.finalize_now);
1293 # endif /* Otherwise GC can run concurrently and add more */
1298 /* These variables require synchronization to avoid data races. */
1299 if (last_finalizer_notification != GC_gc_no) {
1300 last_finalizer_notification = GC_gc_no;
1301 notifier_fn = GC_finalizer_notifier;
1304 if (notifier_fn != 0)
1305 (*notifier_fn)(); /* Invoke the notifier */
1308 #ifndef SMALL_CONFIG
1309 # ifndef GC_LONG_REFS_NOT_NEEDED
1310 # define IF_LONG_REFS_PRESENT_ELSE(x,y) (x)
1312 # define IF_LONG_REFS_PRESENT_ELSE(x,y) (y)
1315 GC_INNER void GC_print_finalization_stats(void)
1317 struct finalizable_object *fo;
1318 unsigned long ready = 0;
1320 GC_log_printf("%lu finalization entries;"
1321 " %lu/%lu short/long disappearing links alive\n",
1322 (unsigned long)GC_fo_entries,
1323 (unsigned long)GC_dl_hashtbl.entries,
1324 (unsigned long)IF_LONG_REFS_PRESENT_ELSE(
1325 GC_ll_hashtbl.entries, 0));
1327 for (fo = GC_fnlz_roots.finalize_now; fo != NULL; fo = fo_next(fo))
1329 GC_log_printf("%lu finalization-ready objects;"
1330 " %ld/%ld short/long links cleared\n",
1332 (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries,
1333 (long)IF_LONG_REFS_PRESENT_ELSE(
1334 GC_old_ll_entries - GC_ll_hashtbl.entries, 0));
1336 #endif /* !SMALL_CONFIG */
1338 #endif /* !GC_NO_FINALIZATION */