+2007-05-11 Hans Boehm <Hans.Boehm@hp.com>
+
+ * dbg_mlc.c, include/gc.h, finalize.c: Merge Alexandre Oliva's
+ GC_debug_register_finalizer_unreachable() patch from gcc tree.
+ * thread_local_alloc.c (GC_malloc, GC_malloc_atomic): Add assertions
+ to check GC has been initialized.
+
2007-05-10 Hans Boehm <Hans.Boehm@hp.com>
* include/gc_cpp.h: Documentation updates.
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
* Copyright (c) 1997 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
+ * Copyright (C) 2007 Free Software Foundation, Inc
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
&my_old_cd);
}
store_old(obj, my_old_fn, (struct closure *)my_old_cd, ofn, ocd);
- }
+}
+
+void GC_debug_register_finalizer_unreachable
+ (void * obj, GC_finalization_proc fn,
+ void * cd, GC_finalization_proc *ofn,
+ void * *ocd)
+{
+ GC_finalization_proc my_old_fn;
+ void * my_old_cd;
+ ptr_t base = GC_base(obj);
+ if (0 == base) return;
+ if ((ptr_t)obj - base != sizeof(oh)) {
+ GC_err_printf(
+ "GC_debug_register_finalizer_unreachable called with "
+ "non-base-pointer %p\n",
+ obj);
+ }
+ if (0 == fn) {
+ GC_register_finalizer_unreachable(base, 0, 0, &my_old_fn, &my_old_cd);
+ } else {
+ GC_register_finalizer_unreachable(base, GC_debug_invoke_finalizer,
+ GC_make_closure(fn,cd), &my_old_fn,
+ &my_old_cd);
+ }
+ store_old(obj, my_old_fn, (struct closure *)my_old_cd, ofn, ocd);
+}
void GC_debug_register_finalizer_ignore_self
(void * obj, GC_finalization_proc fn,
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
* Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
+ * Copyright (C) 2007 Free Software Foundation, Inc
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
{
}
+/* Possible finalization_marker procedures. Note that mark stack */
+/* overflow is handled by the caller, and is not a disaster. */
+
+/* GC_unreachable_finalize_mark_proc is an alias for normal marking, */
+/* but it is explicitly tested for, and triggers different */
+/* behavior. Objects registered in this way are not finalized */
+/* if they are reachable by other finalizable objects, eve if those */
+/* other objects specify no ordering. */
+GC_API void GC_unreachable_finalize_mark_proc(p)
+ptr_t p;
+{
+ return GC_normal_finalize_mark_proc(p);
+}
+
/* Register a finalization function. See gc.h for details. */
ocd, GC_null_finalize_mark_proc);
}
+static GC_bool need_unreachable_finalization = FALSE;
+ /* Avoid the work if this isn't used. */
+
+void GC_register_finalizer_unreachable(void * obj,
+ GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void ** ocd)
+{
+ need_unreachable_finalization = TRUE;
+ GC_ASSERT(GC_java_finalization);
+ GC_register_finalizer_inner(obj, fn, cd, ofn,
+ ocd, GC_unreachable_finalize_mark_proc);
+}
+
#ifndef NO_DEBUGGING
void GC_dump_finalization(void)
{
if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
}
- GC_set_mark_bit(real_ptr);
+ if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
+ GC_set_mark_bit(real_ptr);
+ }
}
}
+
+ /* now revive finalize-when-unreachable objects reachable from
+ other finalizable objects */
+ if (need_unreachable_finalization) {
+ curr_fo = GC_finalize_now;
+ prev_fo = 0;
+ while (curr_fo != 0) {
+ next_fo = fo_next(curr_fo);
+ if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
+ real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
+ if (!GC_is_marked(real_ptr)) {
+ GC_set_mark_bit(real_ptr);
+ } else {
+ if (prev_fo == 0)
+ GC_finalize_now = next_fo;
+ else
+ fo_set_next(prev_fo, next_fo);
+
+ curr_fo -> fo_hidden_base =
+ (word) HIDE_POINTER(curr_fo -> fo_hidden_base);
+ GC_bytes_finalized -=
+ curr_fo -> fo_object_size + sizeof(struct finalizable_object);
+
+ i = HASH2(real_ptr, log_fo_table_size);
+ fo_set_next (curr_fo, fo_head[i]);
+ GC_fo_entries++;
+ fo_head[i] = curr_fo;
+ curr_fo = prev_fo;
+ }
+ }
+ prev_fo = curr_fo;
+ curr_fo = next_fo;
+ }
+ }
}
/* Remove dangling disappearing links. */
* Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
* Copyright 1996-1999 by Silicon Graphics. All rights reserved.
* Copyright 1999 by Hewlett-Packard Company. All rights reserved.
+ * Copyright (C) 2007 Free Software Foundation, Inc
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
/* it a bit safer to use non-topologically- */
/* ordered finalization. Default value is */
/* determined by JAVA_FINALIZATION macro. */
+ /* Enables register_finalizer_unreachable to */
+ /* work correctly. */
GC_API void (* GC_finalizer_notifier)(void);
/* Invoked by the collector when there are */
GC_debug_register_finalizer_ignore_self(p, f, d, of, od)
# define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
GC_debug_register_finalizer_no_order(p, f, d, of, od)
+# define GC_REGISTER_FINALIZER_UNREACHABLE(p, f, d, of, od) \
+ GC_debug_register_finalizer_unreachable(p, f, d, of, od)
# define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS);
# define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p)
GC_register_finalizer_ignore_self(p, f, d, of, od)
# define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \
GC_register_finalizer_no_order(p, f, d, of, od)
+# define GC_REGISTER_FINALIZER_UNREACHABLE(p, f, d, of, od) \
+ GC_register_finalizer_unreachable(p, f, d, of, od)
# define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz)
# define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p)
# define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p)
(void * obj, GC_finalization_proc fn, void * cd,
GC_finalization_proc *ofn, void * *ocd);
+/* This is a special finalizer that is useful when an object's */
+/* finalizer must be run when the object is known to be no */
+/* longer reachable, not even from other finalizable objects. */
+/* It behaves like "normal" finalization, except that the */
+/* finalizer is not run while the object is reachable from */
+/* other objects specifying unordered finalization. */
+/* Effectively it allows an object referenced, possibly */
+/* indirectly, from an unordered finalizable object to override */
+/* the unordered finalization request. */
+/* This can be used in combination with finalizer_no_order so */
+/* as to release resources that must not be released while an */
+/* object can still be brought back to life by other */
+/* finalizers. */
+/* Only works if GC_java_finalization is set. Probably only */
+/* of interest when implementing a language that requires */
+/* unordered finalization (e.g. Java, C#). */
+GC_API void GC_register_finalizer_unreachable
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
+GC_API void GC_debug_register_finalizer_unreachable
+ (void * obj, GC_finalization_proc fn, void * cd,
+ GC_finalization_proc *ofn, void * *ocd);
/* The following routine may be used to break cycles between */
/* finalizable objects, thus causing cyclic finalizable */
}
tsd = GC_getspecific(k);
# else
+ GC_ASSERT(GC_is_initialized);
tsd = GC_getspecific(GC_thread_key);
# endif
# if defined(REDIRECT_MALLOC) && defined(USE_PTHREAD_SPECIFIC)
{
size_t granules = ROUNDED_UP_GRANULES(bytes);
void *result;
- void **tiny_fl = ((GC_tlfs)GC_getspecific(GC_thread_key))
+ void **tiny_fl;
+
+ GC_ASSERT(GC_is_initialized);
+ tiny_fl = ((GC_tlfs)GC_getspecific(GC_thread_key))
-> ptrfree_freelists;
GC_FAST_MALLOC_GRANS(result, bytes, tiny_fl, DIRECT_GRANULES,
PTRFREE, GC_core_malloc_atomic(bytes), 0/* no init */);