+2009-06-09 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski)
+ diff90_cvs (resembling diff28, diff30, diff32, diff34, diff47,
+ diff49, diff60, diff62, diff66, diff67, diff68, diff72 partly)
+
+ * finalize.c (finalization_mark_proc): Replace K&R-style declaration
+ with ANSI C one.
+ * finalize.c (GC_grow_table, GC_register_finalizer_inner,
+ GC_enqueue_all_finalizers): Remove outdated comments about disabling
+ signals.
+ * finalize.c (GC_general_register_disappearing_link): Fix assertion
+ to catch NULL "obj" value.
+ * finalize.c (GC_unregister_disappearing_link): Check "link"
+ alignment before gaining the lock.
+ * finalize.c (GC_finalize): Refine comment.
+ * finalize.c (GC_finalize): Fix WARN() format specifier (should be
+ word-complient, "%p" is used w/o "0x").
+ * finalize.c (GC_invoke_finalizers): Initialize "bytes_freed_before"
+ variable (to 0) to suppress compiler warning.
+ * include/gc_gcj.h (MARK_DESCR_OFFSET): Move to private/gc_pmark.h.
+ * include/gc_gcj.h: add "extern C" header and tail.
+ * include/private/gc_pmark.h: Remove GC_do_parallel_mark(),
+ GC_help_wanted, GC_helper_count, GC_active_count declarations (move
+ the comments to the place where these symbols are defined in mark.c).
+ * mark.c: Add STATIC GC_do_parallel_mark() declaration (for use by
+ GC_mark_some_inner, if PARALLEL_MARK only).
+ * mark.c (GC_mark_some_inner, GC_help_wanted, GC_helper_count,
+ GC_active_count, GC_do_parallel_mark): Define as STATIC.
+ * pthread_support.c (GC_mark_thread): Ditto.
+ * typd_mlc.c (GC_explicit_typing_initialized, GC_explicit_kind,
+ GC_array_kind, GC_ext_descriptors, GC_ed_size, GC_avail_descr,
+ GC_typed_mark_proc_index, GC_array_mark_proc_index, GC_eobjfreelist,
+ GC_arobjfreelist): Ditto.
+ * include/private/gc_pmark.h (PUSH_CONTENTS_HDR): Change GC_ASSERT
+ for HBLKSIZE to GC_STATIC_ASSERT.
+ * mark.c (GC_noop): Define for Borland C the same as for Watcom.
+ * mark.c (GC_noop, GC_mark_and_push): Add ARGSUSED tag.
+ * pthread_support.c (GC_do_blocking_inner): Ditto.
+ * mark.c (GC_mark_from): Initialize "limit" (to 0) in the default
+ switch branch to suppress compiler warning.
+ * mark.c (GC_return_mark_stack): Append new-line to printf message.
+ * mark.c: Remove unused GC_true_func(), GC_PUSH_ALL().
+ * pthread_support.c (GC_mark_thread): Add dummy "return 0" to
+ suppress compiler warning.
+ * pthread_support.c (start_mark_threads): Move the code limiting
+ "GC_markers" value (and printing a warning) to GC_thr_init().
+ * pthread_support.c (GC_thr_init): Silently limit "GC_markers" value
+ if based on the number of CPUs.
+ * pthread_support.c (GC_thr_init): Treat incorrect "GC_markers"
+ values as one.
+ * pthread_support.c (GC_register_my_thread_inner): Add a check for
+ "stack_end" is non-NULL (the same as in win32_threads.c).
+ * pthread_support.c (pthread_create): Call GC_oom_fn before giving up
+ with ENOMEM.
+ * thread_local_alloc.c (return_single_freelist): Convert "for" loop
+ to "while" one to suppress "possible extraneous ';'" warning.
+
2009-06-08 Hans Boehm <Hans.Boehm@hp.com> (Really Ivan Maidanski
and Zoltan Varga)
* darwin_stop_world.c (GC_push_all_stacks): Recognize ARM32.
/* Type of mark procedure used for marking from finalizable object. */
/* This procedure normally does not mark the object, only its */
/* descendents. */
-typedef void finalization_mark_proc(/* ptr_t finalizable_obj_ptr */);
+typedef void (* finalization_mark_proc)(ptr_t /* finalizable_obj_ptr */);
# define HASH3(addr,size,log_size) \
((((word)(addr) >> 3) ^ ((word)(addr) >> (3+(log_size)))) \
GC_finalization_proc fo_fn; /* Finalizer. */
ptr_t fo_client_data;
word fo_object_size; /* In bytes. */
- finalization_mark_proc * fo_mark_proc; /* Mark-through procedure */
+ finalization_mark_proc fo_mark_proc; /* Mark-through procedure */
} **fo_head = 0;
STATIC struct finalizable_object * GC_finalize_now = 0;
/* size. May be a no-op. */
/* *table is a pointer to an array of hash headers. If we succeed, we */
/* update both *table and *log_size_ptr. */
-/* Lock is held. Signals are disabled. */
+/* Lock is held. */
STATIC void GC_grow_table(struct hash_chain_entry ***table,
signed_word *log_size_ptr)
{
# ifdef THREADS
LOCK();
# endif
- GC_ASSERT(GC_base(obj) == obj);
+ GC_ASSERT(obj != NULL && GC_base(obj) == obj);
if (log_dl_table_size == -1
|| GC_dl_entries > ((word)1 << log_dl_table_size)) {
GC_grow_table((struct hash_chain_entry ***)(&dl_head),
size_t index;
DCL_LOCK_STATE;
+ if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
+
LOCK();
index = HASH2(link, log_dl_table_size);
- if (((word)link & (ALIGNMENT-1))) goto out;
prev_dl = 0; curr_dl = dl_head[index];
while (curr_dl != 0) {
if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
prev_dl = curr_dl;
curr_dl = dl_next(curr_dl);
}
-out:
UNLOCK();
return(0);
}
/* Register a finalization function. See gc.h for details. */
-/* in the nonthreads case, we try to avoid disabling signals, */
-/* since it can be expensive. Threads packages typically */
-/* make it cheaper. */
/* The last parameter is a procedure that determines */
/* marking for finalization ordering. Any objects marked */
/* by that procedure will be guaranteed to not have been */
(1 << (unsigned)log_fo_table_size));
}
}
- /* in the THREADS case signals are disabled and we hold allocation */
- /* lock; otherwise neither is true. Proceed carefully. */
+ /* in the THREADS case we hold allocation lock. */
base = (ptr_t)obj;
index = HASH2(base, log_fo_table_size);
prev_fo = 0; curr_fo = fo_head[index];
#endif
/* Called with held lock (but the world is running). */
-/* Cause disappearing links to disappear, and invoke finalizers. */
+/* Cause disappearing links to disappear and unreachable objects to be */
+/* enqueued for finalization. */
void GC_finalize(void)
{
struct disappearing_link * curr_dl, * prev_dl, * next_dl;
GC_MARKED_FOR_FINALIZATION(real_ptr);
GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
if (GC_is_marked(real_ptr)) {
- WARN("Finalization cycle involving %lx\n", real_ptr);
+ WARN("Finalization cycle involving %p\n", real_ptr);
}
}
}
#ifndef JAVA_FINALIZATION_NOT_NEEDED
-/* Enqueue all remaining finalizers to be run - Assumes lock is
- * held, and signals are disabled */
+/* Enqueue all remaining finalizers to be run - Assumes lock is held. */
STATIC void GC_enqueue_all_finalizers(void)
{
struct finalizable_object * curr_fo, * prev_fo, * next_fo;
{
struct finalizable_object * curr_fo;
int count = 0;
- word bytes_freed_before;
+ word bytes_freed_before = 0; /* initialized to prevent warning. */
DCL_LOCK_STATE;
while (GC_finalize_now != 0) {
#define GC_GCJ_H
-#ifndef MARK_DESCR_OFFSET
-# define MARK_DESCR_OFFSET sizeof(word)
-#endif
/* Gcj keeps GC descriptor as second word of vtable. This */
/* probably needs to be adjusted for other clients. */
/* We currently assume that this offset is such that: */
# include "gc.h"
#endif
+# ifdef __cplusplus
+ extern "C" {
+# endif
+
/* The following allocators signal an out of memory condition with */
/* return GC_oom_fn(bytes); */
GC_gcj_malloc_ignore_off_page(s,d)
# endif
+# ifdef __cplusplus
+ } /* end of extern "C" */
+# endif
+
#endif /* GC_GCJ_H */
extern mark_proc GC_mark_procs[MAX_MARK_PROCS];
*/
+#ifndef MARK_DESCR_OFFSET
+# define MARK_DESCR_OFFSET sizeof(word)
+#endif
+
/*
* Mark descriptor stuff that should remain private for now, mostly
* because it's hard to export WORDSZ without including gcconfig.h.
* of the University of Tokyo SGC in a less intrusive, though probably
* also less performant, way.
*/
- void GC_do_parallel_mark();
- /* initiate parallel marking. */
-
- extern GC_bool GC_help_wanted; /* Protected by mark lock */
- extern unsigned GC_helper_count; /* Number of running helpers. */
- /* Protected by mark lock */
- extern unsigned GC_active_count; /* Number of active helpers. */
- /* Protected by mark lock */
- /* May increase and decrease */
- /* within each mark cycle. But */
- /* once it returns to 0, it */
- /* stays zero for the cycle. */
- /* GC_mark_stack_top is also protected by mark lock. */
+
+ /* GC_mark_stack_top is protected by mark lock. */
+
/*
* GC_notify_all_marker() is used when GC_help_wanted is first set,
* when the last helper becomes inactive,
GC_ASSERT((ptr_t)(hhdr -> hb_block) < (ptr_t) current); \
} else { \
/* Accurate enough if HBLKSIZE <= 2**15. */ \
- GC_ASSERT(HBLKSIZE <= (1 << 15)); \
+ GC_STATIC_ASSERT(HBLKSIZE <= (1 << 15)); \
size_t obj_displ = (((low_prod >> 16) + 1) * (hhdr -> hb_sz)) >> 16; \
if (do_offset_check && !GC_valid_offsets[obj_displ]) { \
GC_ADD_TO_BLACK_LIST_NORMAL(current, source); \
/* We put this here to minimize the risk of inlining. */
/*VARARGS*/
-#ifdef __WATCOMC__
+#if defined(__BORLANDC__) || defined(__WATCOMC__)
+ /*ARGSUSED*/
void GC_noop(void *p, ...) {}
#else
# ifdef __DMC__
scan_ptr = 0;
}
+#ifdef PARALLEL_MARK
+ STATIC void GC_do_parallel_mark(void); /* initiate parallel marking. */
+#endif /* PARALLEL_MARK */
static void alloc_mark_stack(size_t);
/* exception handler, in case Windows unmaps one of our root */
/* segments. See below. In either case, we acquire the */
/* allocator lock long before we get here. */
- GC_bool GC_mark_some_inner(ptr_t cold_gc_frame)
+ STATIC GC_bool GC_mark_some_inner(ptr_t cold_gc_frame)
#else
GC_bool GC_mark_some(ptr_t cold_gc_frame)
#endif
continue;
}
goto retry;
+ default:
+ /* Can't happen. */
+ limit = 0; /* initialized to prevent warning. */
}
} else /* Small object with length descriptor */ {
mark_stack_top--;
#ifdef PARALLEL_MARK
-GC_bool GC_help_wanted = FALSE;
-unsigned GC_helper_count = 0;
-unsigned GC_active_count = 0;
+STATIC GC_bool GC_help_wanted = FALSE; /* Protected by mark lock */
+STATIC unsigned GC_helper_count = 0; /* Number of running helpers. */
+ /* Protected by mark lock */
+STATIC unsigned GC_active_count = 0; /* Number of active helpers. */
+ /* Protected by mark lock */
+ /* May increase and decrease */
+ /* within each mark cycle. But */
+ /* once it returns to 0, it */
+ /* stays zero for the cycle. */
+
word GC_mark_no = 0;
#define LOCAL_MARK_STACK_SIZE HBLKSIZE
my_start = my_top + 1;
if (my_start - GC_mark_stack + stack_size > GC_mark_stack_size) {
if (GC_print_stats) {
- GC_log_printf("No room to copy back mark stack.");
+ GC_log_printf("No room to copy back mark stack\n");
}
GC_mark_state = MS_INVALID;
GC_mark_stack_too_small = TRUE;
/* We hold the GC lock, not the mark lock. */
/* Currently runs until the mark stack is */
/* empty. */
-void GC_do_parallel_mark(void)
+STATIC void GC_do_parallel_mark(void)
{
mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
# ifndef SMALL_CONFIG
-#ifdef PARALLEL_MARK
- /* Break up root sections into page size chunks to better spread */
- /* out work. */
- STATIC GC_bool GC_true_func(struct hblk *h) { return TRUE; }
-# define GC_PUSH_ALL(b,t) GC_push_selected(b,t,GC_true_func,GC_push_all);
-#else
-# define GC_PUSH_ALL(b,t) GC_push_all(b,t);
-#endif
-
-
void GC_push_conditional(ptr_t bottom, ptr_t top, GC_bool all)
{
if (all) {
GC_PUSH_ONE_STACK((ptr_t)p, MARKED_FROM_REGISTER);
}
+/*ARGSUSED*/
struct GC_ms_entry *GC_mark_and_push(void *obj,
mse *mark_stack_ptr,
mse *mark_stack_limit,
static ptr_t marker_bsp[MAX_MARKERS - 1] = {0};
#endif
-void * GC_mark_thread(void * id)
+STATIC void * GC_mark_thread(void * id)
{
word my_mark_no = 0;
# ifdef IA64
marker_bsp[(word)id] = GC_save_regs_in_stack();
# endif
+
+ if ((word)id == (word)-1) return 0; /* to make compiler happy */
+
for (;; ++my_mark_no) {
/* GC_mark_no is passed only to allow GC_help_marker to terminate */
/* promptly. This is important if it were called from the signal */
unsigned i;
pthread_attr_t attr;
- if (GC_markers > MAX_MARKERS) {
- WARN("Limiting number of mark threads\n", 0);
- GC_markers = MAX_MARKERS;
- }
if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
char * markers_string = GETENV("GC_MARKERS");
if (markers_string != NULL) {
GC_markers = atoi(markers_string);
+ if (GC_markers > MAX_MARKERS) {
+ WARN("Limiting number of mark threads\n", 0);
+ GC_markers = MAX_MARKERS;
+ }
} else {
GC_markers = GC_nprocs;
+ if (GC_markers >= MAX_MARKERS)
+ GC_markers = MAX_MARKERS; /* silently limit GC_markers value */
}
}
# endif
GC_log_printf("Number of processors = %ld, "
"number of marker threads = %ld\n", GC_nprocs, GC_markers);
}
- if (GC_markers == 1) {
+ if (GC_markers <= 1) {
GC_parallel = FALSE;
if (GC_print_stats) {
GC_log_printf(
void *arg;
};
+/*ARGSUSED*/
static void GC_do_blocking_inner(ptr_t data, void * context) {
struct blocking_data * d = (struct blocking_data *) data;
GC_thread me;
me -> stop_info.stack_ptr = sb -> mem_base;
# endif
me -> stack_end = sb -> mem_base;
+ if (me -> stack_end == NULL)
+ ABORT("Bad stack base in GC_register_my_thread");
# ifdef IA64
me -> backing_store_end = sb -> reg_base;
# endif /* IA64 */
NORMAL);
UNLOCK();
if (!parallel_initialized) GC_init_parallel();
- if (0 == si) return(ENOMEM);
+ if (0 == si &&
+ (si = (struct start_info *)GC_oom_fn(sizeof(struct start_info))) == 0)
+ return(ENOMEM);
sem_init(&(si -> registered), 0, 0);
si -> start_routine = start_routine;
si -> arg = arg;
} else {
GC_ASSERT(GC_size(fl) == GC_size(*gfl));
/* Concatenate: */
- for (qptr = &(obj_link(fl)), q = *qptr;
- (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
+ qptr = &(obj_link(fl));
+ while ((word)(q = *qptr) >= HBLKSIZE)
+ qptr = &(obj_link(q));
GC_ASSERT(0 == q);
*qptr = *gfl;
*gfl = fl;
# define TYPD_EXTRA_BYTES (sizeof(word) - EXTRA_BYTES)
-GC_bool GC_explicit_typing_initialized = FALSE;
+STATIC GC_bool GC_explicit_typing_initialized = FALSE;
-int GC_explicit_kind; /* Object kind for objects with indirect */
+STATIC int GC_explicit_kind;
+ /* Object kind for objects with indirect */
/* (possibly extended) descriptors. */
-int GC_array_kind; /* Object kind for objects with complex */
+STATIC int GC_array_kind;
+ /* Object kind for objects with complex */
/* descriptors and GC_array_mark_proc. */
/* Extended descriptors. GC_typed_mark_proc understands these. */
} complex_descriptor;
#define TAG ld.ld_tag
-ext_descr * GC_ext_descriptors; /* Points to array of extended */
- /* descriptors. */
+STATIC ext_descr * GC_ext_descriptors; /* Points to array of extended */
+ /* descriptors. */
-size_t GC_ed_size = 0; /* Current size of above arrays. */
+STATIC size_t GC_ed_size = 0; /* Current size of above arrays. */
# define ED_INITIAL_SIZE 100;
-size_t GC_avail_descr = 0; /* Next available slot. */
+STATIC size_t GC_avail_descr = 0; /* Next available slot. */
-int GC_typed_mark_proc_index; /* Indices of my mark */
-int GC_array_mark_proc_index; /* procedures. */
+STATIC int GC_typed_mark_proc_index; /* Indices of my mark */
+STATIC int GC_array_mark_proc_index; /* procedures. */
static void GC_push_typed_structures_proc (void)
{
}
#endif
-ptr_t * GC_eobjfreelist;
+STATIC ptr_t * GC_eobjfreelist;
-ptr_t * GC_arobjfreelist;
+STATIC ptr_t * GC_arobjfreelist;
STATIC mse * GC_typed_mark_proc(word * addr, mse * mark_stack_ptr,
mse * mark_stack_limit, word env);