2009-09-19 Ivan Maidanski <ivmai@mail.ru>
+ (ivmai130a.diff, ivmai130b.diff - superseding diff44, diff69)
+
+ * include/gc.h (GC_do_blocking, GC_call_with_gc_active): New
+ function prototype.
+ * include/private/gc_priv.h (STOP_WORLD): Replace a no-op (for the
+ single-threaded case) with an assertion check for the state to be
+ not a "do-blocking" one.
+ * include/private/gc_priv.h (blocking_data): Move the structure
+ definition from pthread_support.c; change "fn" return type to void
+ pointer.
+ * include/private/gc_priv.h (GC_activation_frame_s): New structure
+ type.
+ * include/private/gc_priv.h (GC_push_all_stack_frames): New
+ function declaration (only if THREADS).
+ * include/private/gc_priv.h (GC_world_stopped): Don't declare
+ unless THREADS.
+ * include/private/gc_priv.h (GC_blocked_sp,
+ GC_activation_frame_s): New declaration (only if not THREADS).
+ * include/private/gc_priv.h (GC_push_all_register_frames): New
+ function declaration (only for IA-64).
+ * include/private/gc_priv.h (NURSERY, GC_push_proc): Remove
+ obsolete (unused) symbols.
+ * include/private/gc_priv.h (GC_push_all_stack_partially_eager):
+ Remove declaration (since it is static now).
+ * mark_rts.c (GC_push_all_stack_partially_eager): Move from mark.c
+ (for code locality) and make STATIC.
+ * mark_rts.c (GC_push_all_register_frames): New function (only for
+ IA-64).
+ * mark_rts.c (GC_push_all_stack_frames): New function (only if
+ THREADS).
+ * mark_rts.c (GC_add_trace_entry): New function prototype (used by
+ GC_push_all_stack_partially_eager(), only if TRACE_BUF).
+ * mark_rts.c (GC_push_all_stack_part_eager_frames): New function.
+ * mar_rts.c (GC_save_regs_ret_val): Move the declaration out of a
+ function body (only for IA-64).
+ * mark_rts.c (GC_push_current_stack): Call
+ GC_push_all_stack_part_eager_frames() instead of
+ GC_push_all_stack_partially_eager().
+ * mark_rts.c (GC_push_current_stack): Call
+ GC_push_all_register_frames() instead of GC_push_all_eager() for
+ IA-64 backing store.
+ * misc.c (GC_do_blocking_inner): Declare function (if THREADS
+ only).
+ * misc.c (GC_blocked_sp, GC_blocked_register_sp,
+ GC_activation_frame): New global variables (only if not THREADS).
+ * misc.c (GC_call_with_gc_active, GC_do_blocking_inner): New API
+ function (only if not THREADS).
+ * misc.c (GC_do_blocking): Move the function from
+ pthread_support.c.
+ * include/private/pthread_support.h (GC_Thread_Rep): Add
+ "activation_frame" field.
+ * pthread_stop_world.c (GC_push_all_stacks): Call
+ GC_push_all_stack_frames() and GC_push_all_register_frames instead
+ of GC_push_all_stack() and/or GC_push_all_eager(); don't check for
+ STACK_GROWS_UP here.
+ * pthread_support.c (GC_do_blocking_inner): Remove "static"; store
+ "fn" result back to "client_data" field.
+ * pthread_support.c (GC_call_with_gc_active): New API function.
+ * win32_threads.c (GC_call_with_gc_active): Ditto.
+ * win32_threads.c (GC_Thread_Rep): Add "thread_blocked_sp" and
+ "activation_frame" fields.
+ * win32_threads.c (GC_new_thread): Add assertion checking for
+ thread_blocked_sp is NULL.
+ * win32_threads.c (GC_do_blocking_inner): New function.
+ * win32_threads.c (GC_stop_world): Don't suspend a thread if its
+ thread_blocked_sp is non-NULL.
+ * win32_threads.c (GC_push_stack_for): Use thread
+ "activation_frame" (if non-NULL); use "thread_blocked_sp" if
+ non-NULL (instead of calling GetThreadContext()); "UNPROTECT" the
+ thread before modifying its last_stack_min; call
+ GC_push_all_stack_frames() instead of GC_push_all_stack(); update
+ the comments.
+
+2009-09-19 Ivan Maidanski <ivmai@mail.ru>
(ivmai129.diff - superseding diff47)
* alloc.c (GC_default_stop_func): New static variable (initialized
/* a GC callback function (except for GC_call_with_stack_base() one). */
GC_API int GC_CALL GC_unregister_my_thread(void);
+/* Wrapper for functions that are likely to block (or, at least, do not */
+/* allocate garbage collected memory and/or manipulate pointers to the */
+/* garbage collected heap) for an appreciable length of time. While fn */
+/* is running, the collector is said to be in the "inactive" state for */
+/* the current thread (this means that the thread is not suspended and */
+/* the thread's stack frames "belonging" to the functions in the */
+/* "inactive" state are not scanned during garbage collections). It is */
+/* allowed for fn to call GC_call_with_gc_active() (even recursively), */
+/* thus temporarily toggling the collector's state back to "active". */
+GC_API void * GC_CALL GC_do_blocking(GC_fn_type /* fn */,
+ void * /* client_data */);
+
+/* Call a function switching to the "active" state of the collector for */
+/* the current thread (i.e. the user function is allowed to call any */
+/* GC function and/or manipulate pointers to the garbage collected */
+/* heap). GC_call_with_gc_active() has the functionality opposite to */
+/* GC_do_blocking() one. It is assumed that the collector is already */
+/* initialized and the current thread is registered. fn may toggle */
+/* the collector thread's state temporarily to "inactive" one by using */
+/* GC_do_blocking. GC_call_with_gc_active() often can be used to */
+/* provide a sufficiently accurate stack base. */
+GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type /* fn */,
+ void * /* client_data */);
+
/* Attempt to fill in the GC_stack_base structure with the stack base */
/* for this thread. This appears to be required to implement anything */
/* like the JNI AttachCurrentThread in an environment in which new */
# define STOP_WORLD() GC_stop_world()
# define START_WORLD() GC_start_world()
# else
-# define STOP_WORLD()
+ /* Just do a sanity check: we are not inside GC_do_blocking(). */
+# define STOP_WORLD() GC_ASSERT(GC_blocked_sp == NULL)
# define START_WORLD()
# endif
# endif
extern long GC_large_alloc_warn_suppressed;
/* Number of warnings suppressed so far. */
+/* This is used by GC_do_blocking[_inner](). */
+struct blocking_data {
+ GC_fn_type fn;
+ void * client_data; /* and result */
+};
+
+/* This is used by GC_call_with_gc_active(), GC_push_all_stack_frames(). */
+struct GC_activation_frame_s {
+ ptr_t saved_stack_ptr;
+#ifdef IA64
+ ptr_t saved_backing_store_ptr;
+ ptr_t backing_store_end;
+#endif
+ struct GC_activation_frame_s *prev;
+};
+
#ifdef THREADS
+/* Process all activation "frames" - scan entire stack except for */
+/* frames belonging to the user functions invoked by GC_do_blocking(). */
+void GC_push_all_stack_frames(ptr_t lo, ptr_t hi,
+ struct GC_activation_frame_s *activation_frame);
+
extern GC_bool GC_world_stopped;
+#else
+ extern ptr_t GC_blocked_sp;
+ extern struct GC_activation_frame_s *GC_activation_frame;
+ /* Points to the "frame" data held in stack by */
+ /* the innermost GC_call_with_gc_active(). */
+ /* NULL if no such "frame" active. */
+#endif /* !THREADS */
+
+#ifdef IA64
+ /* Similar to GC_push_all_stack_frames() but for IA-64 registers store. */
+ void GC_push_all_register_frames(ptr_t bs_lo, ptr_t bs_hi, int eager,
+ struct GC_activation_frame_s *activation_frame);
#endif
/* Marks are in a reserved area in */
/* ensures that stack is scanned */
/* immediately, not just scheduled */
/* for scanning. */
-#ifndef THREADS
- void GC_push_all_stack_partially_eager(ptr_t bottom, ptr_t top,
- ptr_t cold_gc_frame);
- /* Similar to GC_push_all_eager, but only the */
- /* part hotter than cold_gc_frame is scanned */
- /* immediately. Needed to ensure that callee- */
- /* save registers are not missed. */
-#else
+
/* In the threads case, we push part of the current thread stack */
/* with GC_push_all_eager when we push the registers. This gets the */
/* callee-save registers that may disappear. The remainder of the */
/* stacks are scheduled for scanning in *GC_push_other_roots, which */
/* is thread-package-specific. */
-#endif
+
void GC_push_roots(GC_bool all, ptr_t cold_gc_frame);
/* Push all or dirty roots. */
extern void (*GC_push_other_roots)(void);
ptr_t GC_save_regs_in_stack(void);
# endif
/* Push register contents onto mark stack. */
- /* If NURSERY is defined, the default push */
- /* action can be overridden with GC_push_proc */
-# ifdef NURSERY
- extern void (*GC_push_proc)(ptr_t);
-# endif
# if defined(MSWIN32) || defined(MSWINCE)
void __cdecl GC_push_one(word p);
# else
ptr_t stack_end; /* Cold end of the stack (except for */
/* main thread). */
# ifdef IA64
- ptr_t backing_store_end;
- ptr_t backing_store_ptr;
+ ptr_t backing_store_end;
+ ptr_t backing_store_ptr;
# endif
- void * status; /* The value returned from the thread. */
- /* Used only to avoid premature */
- /* reclamation of any data it might */
+
+ struct GC_activation_frame_s *activation_frame;
+ /* Points to the "frame" data held in stack by */
+ /* the innermost GC_call_with_gc_active() of */
+ /* this thread. May be NULL. */
+
+ void * status; /* The value returned from the thread. */
+ /* Used only to avoid premature */
+ /* reclamation of any data it might */
/* reference. */
/* This is unfortunately also the */
/* reason we need to intercept join */
# undef GC_least_plausible_heap_addr
}
-#ifndef THREADS
-/*
- * A version of GC_push_all that treats all interior pointers as valid
- * and scans part of the area immediately, to make sure that saved
- * register values are not lost.
- * Cold_gc_frame delimits the stack section that must be scanned
- * eagerly. A zero value indicates that no eager scanning is needed.
- * We don't need to worry about the MANUAL_VDB case here, since this
- * is only called in the single-threaded case. We assume that we
- * cannot collect between an assignment and the corresponding
- * GC_dirty() call.
- */
-void GC_push_all_stack_partially_eager(ptr_t bottom, ptr_t top,
- ptr_t cold_gc_frame)
-{
- if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
- /* Push the hot end of the stack eagerly, so that register values */
- /* saved inside GC frames are marked before they disappear. */
- /* The rest of the marking can be deferred until later. */
- if (0 == cold_gc_frame) {
- GC_push_all_stack(bottom, top);
- return;
- }
- GC_ASSERT(bottom <= cold_gc_frame && cold_gc_frame <= top);
-# ifdef STACK_GROWS_DOWN
- GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
- GC_push_all_eager(bottom, cold_gc_frame);
-# else /* STACK_GROWS_UP */
- GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
- GC_push_all_eager(cold_gc_frame, top);
-# endif /* STACK_GROWS_UP */
- } else {
- GC_push_all_eager(bottom, top);
- }
-# ifdef TRACE_BUF
- GC_add_trace_entry("GC_push_all_stack", bottom, top);
-# endif
-}
-#endif /* !THREADS */
-
void GC_push_all_stack(ptr_t bottom, ptr_t top)
{
# if defined(THREADS) && defined(MPROTECT_VDB)
}
}
+#ifdef IA64
+/* Similar to GC_push_all_stack_frames() but for IA-64 registers store. */
+void GC_push_all_register_frames(ptr_t bs_lo, ptr_t bs_hi, int eager,
+ struct GC_activation_frame_s *activation_frame)
+{
+ while (activation_frame != NULL) {
+ ptr_t frame_bs_lo = activation_frame -> backing_store_end;
+ GC_ASSERT(frame_bs_lo <= bs_hi);
+ if (eager) {
+ GC_push_all_eager(frame_bs_lo, bs_hi);
+ } else {
+ GC_push_all_stack(frame_bs_lo, bs_hi);
+ }
+ bs_hi = activation_frame -> saved_backing_store_ptr;
+ activation_frame = activation_frame -> prev;
+ }
+ GC_ASSERT(bs_lo <= bs_hi);
+ if (eager) {
+ GC_push_all_eager(bs_lo, bs_hi);
+ } else {
+ GC_push_all_stack(bs_lo, bs_hi);
+ }
+}
+#endif /* IA64 */
+
+#ifdef THREADS
+
+void GC_push_all_stack_frames(ptr_t lo, ptr_t hi,
+ struct GC_activation_frame_s *activation_frame)
+{
+ while (activation_frame != NULL) {
+ GC_ASSERT(lo HOTTER_THAN (ptr_t)activation_frame);
+# ifdef STACK_GROWS_UP
+ GC_push_all_stack((ptr_t)activation_frame, lo);
+# else /* STACK_GROWS_DOWN */
+ GC_push_all_stack(lo, (ptr_t)activation_frame);
+# endif
+ lo = activation_frame -> saved_stack_ptr;
+ GC_ASSERT(lo != NULL);
+ activation_frame = activation_frame -> prev;
+ }
+ GC_ASSERT(!(hi HOTTER_THAN lo));
+# ifdef STACK_GROWS_UP
+ /* We got them backwards! */
+ GC_push_all_stack(hi, lo);
+# else /* STACK_GROWS_DOWN */
+ GC_push_all_stack(lo, hi);
+# endif
+}
+
+#else /* !THREADS */
+
+# ifdef TRACE_BUF
+ /* Defined in mark.c. */
+ void GC_add_trace_entry(char *kind, word arg1, word arg2);
+# endif
+
+ /* Similar to GC_push_all_eager, but only the */
+ /* part hotter than cold_gc_frame is scanned */
+ /* immediately. Needed to ensure that callee- */
+ /* save registers are not missed. */
+/*
+ * A version of GC_push_all that treats all interior pointers as valid
+ * and scans part of the area immediately, to make sure that saved
+ * register values are not lost.
+ * Cold_gc_frame delimits the stack section that must be scanned
+ * eagerly. A zero value indicates that no eager scanning is needed.
+ * We don't need to worry about the MANUAL_VDB case here, since this
+ * is only called in the single-threaded case. We assume that we
+ * cannot collect between an assignment and the corresponding
+ * GC_dirty() call.
+ */
+STATIC void GC_push_all_stack_partially_eager(ptr_t bottom, ptr_t top,
+ ptr_t cold_gc_frame)
+{
+ if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
+ /* Push the hot end of the stack eagerly, so that register values */
+ /* saved inside GC frames are marked before they disappear. */
+ /* The rest of the marking can be deferred until later. */
+ if (0 == cold_gc_frame) {
+ GC_push_all_stack(bottom, top);
+ return;
+ }
+ GC_ASSERT(bottom <= cold_gc_frame && cold_gc_frame <= top);
+# ifdef STACK_GROWS_DOWN
+ GC_push_all(cold_gc_frame - sizeof(ptr_t), top);
+ GC_push_all_eager(bottom, cold_gc_frame);
+# else /* STACK_GROWS_UP */
+ GC_push_all(bottom, cold_gc_frame + sizeof(ptr_t));
+ GC_push_all_eager(cold_gc_frame, top);
+# endif /* STACK_GROWS_UP */
+ } else {
+ GC_push_all_eager(bottom, top);
+ }
+# ifdef TRACE_BUF
+ GC_add_trace_entry("GC_push_all_stack", bottom, top);
+# endif
+}
+
+/* Similar to GC_push_all_stack_frames() but also uses cold_gc_frame. */
+STATIC void GC_push_all_stack_part_eager_frames(ptr_t lo, ptr_t hi,
+ ptr_t cold_gc_frame, struct GC_activation_frame_s *activation_frame)
+{
+ GC_ASSERT(activation_frame == NULL || cold_gc_frame == NULL ||
+ cold_gc_frame HOTTER_THAN (ptr_t)activation_frame);
+
+ while (activation_frame != NULL) {
+ GC_ASSERT(lo HOTTER_THAN (ptr_t)activation_frame);
+# ifdef STACK_GROWS_UP
+ GC_push_all_stack_partially_eager((ptr_t)activation_frame, lo,
+ cold_gc_frame);
+# else /* STACK_GROWS_DOWN */
+ GC_push_all_stack_partially_eager(lo, (ptr_t)activation_frame,
+ cold_gc_frame);
+# endif
+ lo = activation_frame -> saved_stack_ptr;
+ GC_ASSERT(lo != NULL);
+ activation_frame = activation_frame -> prev;
+ cold_gc_frame = NULL; /* Use at most once. */
+ }
+
+ GC_ASSERT(!(hi HOTTER_THAN lo));
+# ifdef STACK_GROWS_UP
+ /* We got them backwards! */
+ GC_push_all_stack_partially_eager(hi, lo, cold_gc_frame);
+# else /* STACK_GROWS_DOWN */
+ GC_push_all_stack_partially_eager(lo, hi, cold_gc_frame);
+# endif
+}
+
+# ifdef IA64
+ extern word GC_save_regs_ret_val;
+ /* Previously set to backing store pointer. */
+# endif
+
+#endif /* !THREADS */
+
/* Push enough of the current stack eagerly to */
/* ensure that callee-save registers saved in */
/* GC frames are scanned. */
GC_push_all_eager( cold_gc_frame, GC_approx_sp() );
# endif
# else
-# ifdef STACK_GROWS_DOWN
- GC_push_all_stack_partially_eager( GC_approx_sp(), GC_stackbottom,
- cold_gc_frame );
-# ifdef IA64
+ GC_push_all_stack_part_eager_frames(GC_approx_sp(), GC_stackbottom,
+ cold_gc_frame, GC_activation_frame);
+# ifdef IA64
/* We also need to push the register stack backing store. */
/* This should really be done in the same way as the */
/* regular stack. For now we fudge it a bit. */
/* Note that the backing store grows up, so we can't use */
/* GC_push_all_stack_partially_eager. */
{
- extern word GC_save_regs_ret_val;
- /* Previously set to backing store pointer. */
ptr_t bsp = (ptr_t) GC_save_regs_ret_val;
- ptr_t cold_gc_bs_pointer;
- if (GC_all_interior_pointers) {
- cold_gc_bs_pointer = bsp - 2048;
- if (cold_gc_bs_pointer < BACKING_STORE_BASE) {
- cold_gc_bs_pointer = BACKING_STORE_BASE;
- } else {
- GC_push_all_stack(BACKING_STORE_BASE, cold_gc_bs_pointer);
- }
+ ptr_t cold_gc_bs_pointer = bsp - 2048;
+ if (GC_all_interior_pointers &&
+ cold_gc_bs_pointer > BACKING_STORE_BASE) {
+ /* Adjust cold_gc_bs_pointer if below our innermost */
+ /* "activation frame" in backing store. */
+ if (GC_activation_frame != NULL && cold_gc_bs_pointer <
+ GC_activation_frame->backing_store_end)
+ cold_gc_bs_pointer = GC_activation_frame->backing_store_end;
+ GC_push_all_register_frames(BACKING_STORE_BASE,
+ cold_gc_bs_pointer, FALSE, GC_activation_frame);
+ GC_push_all_eager(cold_gc_bs_pointer, bsp);
} else {
- cold_gc_bs_pointer = BACKING_STORE_BASE;
+ GC_push_all_register_frames(BACKING_STORE_BASE, bsp,
+ TRUE /* eager */, GC_activation_frame);
}
- GC_push_all_eager(cold_gc_bs_pointer, bsp);
/* All values should be sufficiently aligned that we */
/* dont have to worry about the boundary. */
}
-# endif
-# else
- GC_push_all_stack_partially_eager( GC_stackbottom, GC_approx_sp(),
- cold_gc_frame );
# endif
# endif /* !THREADS */
}
return fn(&base, arg);
}
+#ifdef THREADS
+
+/* Defined in pthread_support.c or win32_threads.c. */
+void GC_do_blocking_inner(ptr_t data, void * context);
+
+#else
+
+ptr_t GC_blocked_sp = NULL;
+ /* NULL value means we are not inside GC_do_blocking() call. */
+# ifdef IA64
+ STATIC ptr_t GC_blocked_register_sp = NULL;
+# endif
+
+struct GC_activation_frame_s *GC_activation_frame = NULL;
+
+/* This is nearly the same as in win32_threads.c */
+GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
+ void * client_data) {
+ struct GC_activation_frame_s frame;
+ GC_ASSERT(GC_is_initialized);
+
+ /* Adjust our stack base value (this could happen if */
+ /* GC_get_main_stack_base() is unimplemented or broken for */
+ /* the platform). */
+ if (GC_stackbottom HOTTER_THAN (ptr_t)(&frame))
+ GC_stackbottom = (ptr_t)(&frame);
+
+ if (GC_blocked_sp == NULL) {
+ /* We are not inside GC_do_blocking() - do nothing more. */
+ return fn(client_data);
+ }
+
+ /* Setup new "frame". */
+ frame.saved_stack_ptr = GC_blocked_sp;
+# ifdef IA64
+ /* This is the same as in GC_call_with_stack_base(). */
+ frame.backing_store_end = GC_save_regs_in_stack();
+ /* Unnecessarily flushes register stack, */
+ /* but that probably doesn't hurt. */
+ frame.saved_backing_store_ptr = GC_blocked_register_sp;
+# endif
+ frame.prev = GC_activation_frame;
+ GC_blocked_sp = NULL;
+ GC_activation_frame = &frame;
+
+ client_data = fn(client_data);
+ GC_ASSERT(GC_blocked_sp == NULL);
+ GC_ASSERT(GC_activation_frame == &frame);
+
+ /* Restore original "frame". */
+ GC_activation_frame = frame.prev;
+# ifdef IA64
+ GC_blocked_register_sp = frame.saved_backing_store_ptr;
+# endif
+ GC_blocked_sp = frame.saved_stack_ptr;
+
+ return client_data; /* result */
+}
+
+/* This is nearly the same as in win32_threads.c */
+/*ARGSUSED*/
+STATIC void GC_do_blocking_inner(ptr_t data, void * context) {
+ struct blocking_data * d = (struct blocking_data *) data;
+ GC_ASSERT(GC_is_initialized);
+ GC_ASSERT(GC_blocked_sp == NULL);
+# ifdef SPARC
+ GC_blocked_sp = GC_save_regs_in_stack();
+# else
+ GC_blocked_sp = (ptr_t) &d; /* save approx. sp */
+# endif
+# ifdef IA64
+ GC_blocked_register_sp = GC_save_regs_in_stack();
+# endif
+
+ d -> client_data = (d -> fn)(d -> client_data);
+
+# ifdef SPARC
+ GC_ASSERT(GC_blocked_sp != NULL);
+# else
+ GC_ASSERT(GC_blocked_sp == (ptr_t) &d);
+# endif
+ GC_blocked_sp = NULL;
+}
+
+#endif /* !THREADS */
+
+/* Wrapper for functions that are likely to block (or, at least, do not */
+/* allocate garbage collected memory and/or manipulate pointers to the */
+/* garbage collected heap) for an appreciable length of time. */
+/* In the single threaded case, GC_do_blocking() (together */
+/* with GC_call_with_gc_active()) might be used to make stack scanning */
+/* more precise (i.e. scan only stack frames of functions that allocate */
+/* garbage collected memory and/or manipulate pointers to the garbage */
+/* collected heap). */
+GC_API void * GC_CALL GC_do_blocking(GC_fn_type fn, void * client_data) {
+ struct blocking_data my_data;
+
+ my_data.fn = fn;
+ my_data.client_data = client_data;
+ GC_with_callee_saves_pushed(GC_do_blocking_inner, (ptr_t)(&my_data));
+ return my_data.client_data; /* result */
+}
+
#if !defined(NO_DEBUGGING)
GC_API void GC_CALL GC_dump(void)
(unsigned)(p -> id), lo, hi);
# endif
if (0 == lo) ABORT("GC_push_all_stacks: sp not set!\n");
-# ifdef STACK_GROWS_UP
- /* We got them backwards! */
- GC_push_all_stack(hi, lo);
-# else
- GC_push_all_stack(lo, hi);
-# endif
+ GC_push_all_stack_frames(lo, hi, p -> activation_frame);
# ifdef IA64
# if DEBUG_THREADS
GC_printf("Reg stack for thread 0x%x = [%p,%p)\n",
(unsigned)p -> id, bs_lo, bs_hi);
# endif
- if (THREAD_EQUAL(p -> id, me)) {
- /* FIXME: This may add an unbounded number of entries, */
- /* and hence overflow the mark stack, which is bad. */
- GC_push_all_eager(bs_lo, bs_hi);
- } else {
- GC_push_all_stack(bs_lo, bs_hi);
- }
+ /* FIXME: This (if p->id==me) may add an unbounded number of */
+ /* entries, and hence overflow the mark stack, which is bad. */
+ GC_push_all_register_frames(bs_lo, bs_hi,
+ THREAD_EQUAL(p -> id, me), p -> activation_frame);
# endif
}
}
/* Wrapper for functions that are likely to block for an appreciable */
/* length of time. */
-struct blocking_data {
- void (GC_CALLBACK *fn)(void *);
- void *arg;
-};
-
/*ARGSUSED*/
-static void GC_do_blocking_inner(ptr_t data, void * context) {
+void GC_do_blocking_inner(ptr_t data, void * context) {
struct blocking_data * d = (struct blocking_data *) data;
GC_thread me;
LOCK();
me -> thread_blocked = TRUE;
/* Save context here if we want to support precise stack marking */
UNLOCK();
- (d -> fn)(d -> arg);
+ d -> client_data = (d -> fn)(d -> client_data);
LOCK(); /* This will block if the world is stopped. */
me -> thread_blocked = FALSE;
UNLOCK();
}
-void GC_CALL GC_do_blocking(void (GC_CALLBACK *fn)(void *), void *arg) {
- struct blocking_data my_data;
+/* GC_call_with_gc_active() has the opposite to GC_do_blocking() */
+/* functionality. It might be called from a user function invoked by */
+/* GC_do_blocking() to temporarily back allow calling any GC function */
+/* and/or manipulating pointers to the garbage collected heap. */
+GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
+ void * client_data) {
+ struct GC_activation_frame_s frame;
+ GC_thread me;
+ LOCK(); /* This will block if the world is stopped. */
+ me = GC_lookup_thread(pthread_self());
- my_data.fn = fn;
- my_data.arg = arg;
- GC_with_callee_saves_pushed(GC_do_blocking_inner, (ptr_t)(&my_data));
-}
+ /* Adjust our stack base value (this could happen unless */
+ /* GC_get_stack_base() was used which returned GC_SUCCESS). */
+ if ((me -> flags & MAIN_THREAD) == 0) {
+ GC_ASSERT(me -> stack_end != NULL);
+ if (me -> stack_end HOTTER_THAN (ptr_t)(&frame))
+ me -> stack_end = (ptr_t)(&frame);
+ } else {
+ /* The original stack. */
+ if (GC_stackbottom HOTTER_THAN (ptr_t)(&frame))
+ GC_stackbottom = (ptr_t)(&frame);
+ }
+
+ if (me -> thread_blocked == FALSE) {
+ /* We are not inside GC_do_blocking() - do nothing more. */
+ UNLOCK();
+ return fn(client_data);
+ }
+
+ /* Setup new "frame". */
+# ifdef GC_DARWIN_THREADS
+ /* FIXME: Implement it for Darwin ("frames" are ignored at present). */
+# else
+ frame.saved_stack_ptr = me -> stop_info.stack_ptr;
+# endif
+# ifdef IA64
+ /* This is the same as in GC_call_with_stack_base(). */
+ frame.backing_store_end = GC_save_regs_in_stack();
+ /* Unnecessarily flushes register stack, */
+ /* but that probably doesn't hurt. */
+ frame.saved_backing_store_ptr = me -> backing_store_ptr;
+# endif
+ frame.prev = me -> activation_frame;
+ me -> thread_blocked = FALSE;
+ me -> activation_frame = &frame;
+ UNLOCK();
+ client_data = fn(client_data);
+ GC_ASSERT(me -> thread_blocked == FALSE);
+ GC_ASSERT(me -> activation_frame == &frame);
+
+ /* Restore original "frame". */
+ LOCK();
+ me -> activation_frame = frame.prev;
+# ifdef IA64
+ me -> backing_store_ptr = frame.saved_backing_store_ptr;
+# endif
+ me -> thread_blocked = TRUE;
+# ifndef GC_DARWIN_THREADS
+ me -> stop_info.stack_ptr = frame.saved_stack_ptr;
+# endif
+ UNLOCK();
+
+ return client_data; /* result */
+}
+
struct start_info {
void *(*start_routine)(void *);
void *arg;
ptr_t backing_store_ptr;
# endif
+ ptr_t thread_blocked_sp; /* Protected by GC lock. */
+ /* NULL value means thread unblocked. */
+ /* If set to non-NULL, thread will */
+ /* acquire GC lock before doing any */
+ /* pointer manipulations. Thus it does */
+ /* not need to stop this thread. */
+
+ struct GC_activation_frame_s *activation_frame;
+ /* Points to the "frame" data held in stack by */
+ /* the innermost GC_call_with_gc_active() of */
+ /* this thread. May be NULL. */
+
unsigned finalizer_nested;
unsigned finalizer_skipped; /* Used by GC_check_finalizer_nested() */
/* to minimize the level of recursion */
result -> next = GC_threads[hv];
GC_threads[hv] = result;
# ifdef GC_PTHREADS
- GC_ASSERT(result -> flags == 0 /* && result -> thread_blocked == 0 */);
+ GC_ASSERT(result -> flags == 0);
# endif
+ GC_ASSERT(result -> thread_blocked_sp == NULL);
return(result);
}
return GC_SUCCESS;
}
+/* Wrapper for functions that are likely to block for an appreciable */
+/* length of time. */
+
+/* GC_do_blocking_inner() is nearly the same as in pthread_support.c */
+/*ARGSUSED*/
+void GC_do_blocking_inner(ptr_t data, void * context) {
+ struct blocking_data * d = (struct blocking_data *) data;
+ DWORD t = GetCurrentThreadId();
+ GC_thread me;
+ LOCK();
+ me = GC_lookup_thread_inner(t);
+ GC_ASSERT(me -> thread_blocked_sp == NULL);
+# ifdef IA64
+ me -> backing_store_ptr = GC_save_regs_in_stack();
+# endif
+ me -> thread_blocked_sp = (ptr_t) &d; /* save approx. sp */
+ /* Save context here if we want to support precise stack marking */
+ UNLOCK();
+ d -> client_data = (d -> fn)(d -> client_data);
+ LOCK(); /* This will block if the world is stopped. */
+ me -> thread_blocked_sp = NULL;
+ UNLOCK();
+}
+
+/* GC_call_with_gc_active() has the opposite to GC_do_blocking() */
+/* functionality. It might be called from a user function invoked by */
+/* GC_do_blocking() to temporarily back allow calling any GC function */
+/* and/or manipulating pointers to the garbage collected heap. */
+GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
+ void * client_data) {
+ struct GC_activation_frame_s frame;
+ GC_thread me;
+ LOCK(); /* This will block if the world is stopped. */
+ me = GC_lookup_thread_inner(GetCurrentThreadId());
+
+ /* Adjust our stack base value (this could happen unless */
+ /* GC_get_stack_base() was used which returned GC_SUCCESS). */
+ GC_ASSERT(me -> stack_base != NULL);
+ if (me -> stack_base < (ptr_t)(&frame))
+ me -> stack_base = (ptr_t)(&frame);
+
+ if (me -> thread_blocked_sp == NULL) {
+ /* We are not inside GC_do_blocking() - do nothing more. */
+ UNLOCK();
+ return fn(client_data);
+ }
+
+ /* Setup new "frame". */
+ frame.saved_stack_ptr = me -> thread_blocked_sp;
+# ifdef IA64
+ /* This is the same as in GC_call_with_stack_base(). */
+ frame.backing_store_end = GC_save_regs_in_stack();
+ /* Unnecessarily flushes register stack, */
+ /* but that probably doesn't hurt. */
+ frame.saved_backing_store_ptr = me -> backing_store_ptr;
+# endif
+ frame.prev = me -> activation_frame;
+ me -> thread_blocked_sp = NULL;
+ me -> activation_frame = &frame;
+
+ UNLOCK();
+ client_data = fn(client_data);
+ GC_ASSERT(me -> thread_blocked_sp == NULL);
+ GC_ASSERT(me -> activation_frame == &frame);
+
+ /* Restore original "frame". */
+ LOCK();
+ me -> activation_frame = frame.prev;
+# ifdef IA64
+ me -> backing_store_ptr = frame.saved_backing_store_ptr;
+# endif
+ me -> thread_blocked_sp = frame.saved_stack_ptr;
+ UNLOCK();
+
+ return client_data; /* result */
+}
#ifdef GC_PTHREADS
my_max = (int)GC_get_max_thread_index();
for (i = 0; i <= my_max; i++) {
GC_vthread t = dll_thread_table + i;
- if (t -> stack_base != 0
+ if (t -> stack_base != 0 && t -> thread_blocked_sp == NULL
&& t -> id != thread_id) {
GC_suspend((GC_thread)t);
}
for (i = 0; i < THREAD_TABLE_SZ; i++) {
for (t = GC_threads[i]; t != 0; t = t -> next) {
- if (t -> stack_base != 0
+ if (t -> stack_base != 0 && t -> thread_blocked_sp == NULL
&& !KNOWN_FINISHED(t)
&& t -> id != thread_id) {
GC_suspend(t);
DWORD me = GetCurrentThreadId();
if (thread -> stack_base) {
+ struct GC_activation_frame_s *activation_frame =
+ thread -> activation_frame;
if (thread -> id == me) {
+ GC_ASSERT(thread -> thread_blocked_sp == NULL);
sp = (ptr_t) &dummy;
- } else {
+ } else if ((sp = thread -> thread_blocked_sp) == NULL) {
+ /* Use saved sp value for blocked threads. */
+ /* For unblocked threads call GetThreadContext(). */
CONTEXT context;
context.ContextFlags = CONTEXT_INTEGER|CONTEXT_CONTROL;
if (!GetThreadContext(THREAD_HANDLE(thread), &context))
/* taking advantage of the old value to avoid slow traversals */
/* of large stacks. */
if (thread -> last_stack_min == ADDR_LIMIT) {
- stack_min = GC_get_stack_min(thread -> stack_base);
+ stack_min = GC_get_stack_min(activation_frame != NULL ?
+ (ptr_t)activation_frame : thread -> stack_base);
UNPROTECT(thread);
thread -> last_stack_min = stack_min;
} else {
+ /* First, adjust the latest known minimum stack address if we */
+ /* are inside GC_call_with_gc_active(). */
+ if (activation_frame != NULL &&
+ thread -> last_stack_min > (ptr_t)activation_frame) {
+ UNPROTECT(thread);
+ thread -> last_stack_min = (ptr_t)activation_frame;
+ }
+
if (sp < thread -> stack_base && sp >= thread -> last_stack_min) {
stack_min = sp;
} else {
# ifdef MSWINCE
- stack_min = GC_get_stack_min(thread -> stack_base);
+ stack_min = GC_get_stack_min(activation_frame != NULL ?
+ (ptr_t)activation_frame : thread -> stack_base);
# else
- if (GC_may_be_in_stack(thread -> last_stack_min)) {
- stack_min = GC_get_stack_min(thread -> last_stack_min);
+ /* In the current thread it is always safe to use sp value. */
+ if (GC_may_be_in_stack(thread -> id == me &&
+ sp < thread -> last_stack_min ?
+ sp : thread -> last_stack_min)) {
+ stack_min = last_info.BaseAddress;
+ /* Do not probe rest of the stack if sp is correct. */
+ if (sp < stack_min || sp >= thread->stack_base)
+ stack_min = GC_get_stack_min(thread -> last_stack_min);
} else {
/* Stack shrunk? Is this possible? */
stack_min = GC_get_stack_min(thread -> stack_base);
GC_printf("Pushing stack for 0x%x from sp %p to %p from 0x%x\n",
(int)thread -> id, sp, thread -> stack_base, (int)me);
# endif
- GC_push_all_stack(sp, thread->stack_base);
+ GC_push_all_stack_frames(sp, thread->stack_base, activation_frame);
} else {
/* If not current thread then it is possible for sp to point to */
/* the guarded (untouched yet) page just below the current */
(int)thread -> id, stack_min,
thread -> stack_base, (int)me);
# endif
+ /* Push everything - ignore activation "frames" data. */
GC_push_all_stack(stack_min, thread->stack_base);
}
} /* thread looks live */