/* collections to amortize the collection cost. */
static word min_bytes_allocd(void)
{
- volatile int dummy;
- /* GC_stackbottom is used only for a single-threaded case. */
# ifdef STACK_GROWS_UP
- word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
+ word stack_size = GC_approx_sp() - GC_stackbottom;
+ /* GC_stackbottom is used only for a single-threaded case. */
# else
- word stack_size = GC_stackbottom - (ptr_t)(&dummy);
+ word stack_size = GC_stackbottom - GC_approx_sp();
# endif
word total_root_size; /* includes double stack size, */
STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func)
{
unsigned i;
- volatile int dummy;
# ifndef SMALL_CONFIG
CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
CLOCK_TYPE current_time;
START_WORLD();
return(FALSE);
}
- if (GC_mark_some((ptr_t)(&dummy))) break;
+ if (GC_mark_some(GC_approx_sp())) break;
}
GC_gc_no++;
if ((word)curr_base < (word)limit)
GC_add_roots_inner(curr_base, limit, TRUE);
# else
- volatile int dummy;
char * stack_top
- = (char *) ((word)(&dummy) & ~(GC_sysinfo.dwAllocationGranularity-1));
+ = (char *)((word)GC_approx_sp() &
+ ~(GC_sysinfo.dwAllocationGranularity - 1));
if (base == limit) return;
if ((word)limit > (word)stack_top
void * GC_clear_stack_inner(void *arg, ptr_t limit)
{
volatile word dummy[CLEAR_SIZE];
- /* volatile prevents the following condition to */
- /* be 'optimized' to TRUE constant. */
- BZERO((/* no volatile */ void *)dummy, sizeof(dummy));
- if ((word)(&dummy[0]) COOLER_THAN (word)limit) {
+ dummy[0] = (word)(&dummy);
+ /* Store to a volatile variable prevents the following */
+ /* condition to be 'optimized' to TRUE constant. */
+ BZERO((/* no volatile */ void *)&dummy[1], (CLEAR_SIZE-1) * sizeof(word));
+ if (dummy[0] COOLER_THAN (word)limit) {
(void) GC_clear_stack_inner(arg, limit);
}
/* Make sure the recursive call is not a tail call, and the bzero */
GC_API void GC_CALL GC_init(void)
{
/* LOCK(); -- no longer does anything this early. */
-# if !defined(THREADS) && defined(GC_ASSERTIONS)
- volatile int dummy;
-# endif
word initial_heap_sz;
IF_CANCEL(int cancel_state;)
GC_STATIC_ASSERT(sizeof (signed_word) == sizeof(word));
GC_STATIC_ASSERT(sizeof (struct hblk) == HBLKSIZE);
# ifndef THREADS
- GC_ASSERT(!((word)GC_stackbottom HOTTER_THAN (word)(&dummy)));
+ GC_ASSERT(!((word)GC_stackbottom HOTTER_THAN (word)GC_approx_sp()));
# endif
# if !defined(_AUX_SOURCE) || defined(__GNUC__)
GC_STATIC_ASSERT((word)(-1) > (word)0);
GC_API void * GC_CALL GC_call_with_stack_base(GC_stack_base_func fn, void *arg)
{
- volatile int dummy;
struct GC_stack_base base;
void *result;
- base.mem_base = (void *)&dummy;
+ base.mem_base = (void *)&base;
# ifdef IA64
base.reg_base = (void *)GC_save_regs_in_stack();
/* Unnecessarily flushes register stack, */
GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
{
- volatile int dummy;
- ptr_t trunc_sp = (ptr_t)((word)(&dummy) & ~(GC_page_size - 1));
+ ptr_t trunc_sp = (ptr_t)((word)GC_approx_sp() & ~(GC_page_size - 1));
/* FIXME: This won't work if called from a deeply recursive */
/* client code (and the committed stack space has grown). */
word size = GC_get_writable_length(trunc_sp, 0);
ptr_t GC_get_main_stack_base(void)
{
-# if defined(GC_ASSERTIONS) \
- || (!defined(STACKBOTTOM) \
- && (defined(HEURISTIC1) || defined(HEURISTIC2)))
- volatile int dummy;
-# endif
ptr_t result;
# if defined(LINUX) && !defined(NACL) \
&& (defined(USE_GET_STACKBASE_FOR_MAIN) \
# define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
# ifdef HEURISTIC1
# ifdef STACK_GROWS_DOWN
- result = (ptr_t)((((word)(&dummy)) + STACKBOTTOM_ALIGNMENT_M1)
+ result = (ptr_t)(((word)GC_approx_sp() + STACKBOTTOM_ALIGNMENT_M1)
& ~STACKBOTTOM_ALIGNMENT_M1);
# else
- result = (ptr_t)(((word)(&dummy)) & ~STACKBOTTOM_ALIGNMENT_M1);
+ result = (ptr_t)((word)GC_approx_sp() & ~STACKBOTTOM_ALIGNMENT_M1);
# endif
# endif /* HEURISTIC1 */
# ifdef LINUX_STACKBOTTOM
result = GC_freebsd_main_stack_base();
# endif
# ifdef HEURISTIC2
-# ifdef STACK_GROWS_DOWN
- result = GC_find_limit((ptr_t)(&dummy), TRUE);
-# ifdef HEURISTIC2_LIMIT
- if ((word)result > (word)HEURISTIC2_LIMIT
- && (word)(&dummy) < (word)HEURISTIC2_LIMIT) {
- result = HEURISTIC2_LIMIT;
- }
-# endif
-# else
- result = GC_find_limit((ptr_t)(&dummy), FALSE);
-# ifdef HEURISTIC2_LIMIT
- if ((word)result < (word)HEURISTIC2_LIMIT
- && (word)(&dummy) > (word)HEURISTIC2_LIMIT) {
- result = HEURISTIC2_LIMIT;
- }
+ {
+ ptr_t sp = GC_approx_sp();
+# ifdef STACK_GROWS_DOWN
+ result = GC_find_limit(sp, TRUE);
+# ifdef HEURISTIC2_LIMIT
+ if ((word)result > (word)HEURISTIC2_LIMIT
+ && (word)sp < (word)HEURISTIC2_LIMIT) {
+ result = HEURISTIC2_LIMIT;
+ }
+# endif
+# else
+ result = GC_find_limit(sp, FALSE);
+# ifdef HEURISTIC2_LIMIT
+ if ((word)result < (word)HEURISTIC2_LIMIT
+ && (word)sp > (word)HEURISTIC2_LIMIT) {
+ result = HEURISTIC2_LIMIT;
+ }
+# endif
# endif
-# endif
+ }
# endif /* HEURISTIC2 */
# ifdef STACK_GROWS_DOWN
if (result == 0)
result = (ptr_t)(signed_word)(-sizeof(ptr_t));
# endif
# endif
- GC_ASSERT((word)(&dummy) HOTTER_THAN (word)result);
+ GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)result);
return(result);
}
# define GET_MAIN_STACKBASE_SPECIAL
GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
{
-# ifdef GC_ASSERTIONS
- volatile int dummy;
-# endif
/* pthread_get_stackaddr_np() should return stack bottom (highest */
/* stack address plus 1). */
b->mem_base = pthread_get_stackaddr_np(pthread_self());
- GC_ASSERT((word)(&dummy) HOTTER_THAN (word)b->mem_base);
+ GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)b->mem_base);
return GC_SUCCESS;
}
# define HAVE_GET_STACK_BASE
GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
{
-# ifdef GC_ASSERTIONS
- volatile int dummy;
-# endif
stack_t s;
pthread_t self = pthread_self();
ABORT("thr_stksegment failed");
}
/* s.ss_sp holds the pointer to the stack bottom. */
- GC_ASSERT((word)(&dummy) HOTTER_THAN (word)s.ss_sp);
+ GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)s.ss_sp);
if (!stackbase_main_self && thr_main() != 0)
{
/* FIXME - Implement better strategies here. */
GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
{
- volatile int dummy;
IF_CANCEL(int cancel_state;)
DCL_LOCK_STATE;
LOCK();
DISABLE_CANCEL(cancel_state); /* May be unnecessary? */
# ifdef STACK_GROWS_DOWN
- b -> mem_base = GC_find_limit((ptr_t)(&dummy), TRUE);
+ b -> mem_base = GC_find_limit(GC_approx_sp(), TRUE);
# ifdef IA64
b -> reg_base = GC_find_limit(GC_save_regs_in_stack(), FALSE);
# endif
# else
- b -> mem_base = GC_find_limit((ptr_t)(&dummy), FALSE);
+ b -> mem_base = GC_find_limit(GC_approx_sp(), FALSE);
# endif
RESTORE_CANCEL(cancel_state);
UNLOCK();
/* This is always called from the main thread. Default implementation. */
ptr_t GC_get_main_stack_base(void)
{
-# ifdef GC_ASSERTIONS
- volatile int dummy;
-# endif
struct GC_stack_base sb;
if (GC_get_stack_base(&sb) != GC_SUCCESS)
ABORT("GC_get_stack_base failed");
- GC_ASSERT((word)(&dummy) HOTTER_THAN (word)sb.mem_base);
+ GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)sb.mem_base);
return (ptr_t)sb.mem_base;
}
#endif /* !GET_MAIN_STACKBASE_SPECIAL */
/* fail than the old code, which had no reported failures. Thus we */
/* leave it this way while we think of something better, or support */
/* GC_test_and_set on the remaining platforms. */
- static volatile word currently_updating = 0;
+ static int * volatile currently_updating = 0;
static void async_set_pht_entry_from_index(volatile page_hash_table db,
size_t index)
{
- volatile int update_dummy;
- currently_updating = (word)(&update_dummy);
+ int update_dummy;
+ currently_updating = &update_dummy;
set_pht_entry_from_index(db, index);
/* If we get contention in the 10 or so instruction window here, */
/* and we get stopped by a GC between the two updates, we lose! */
- if (currently_updating != (word)(&update_dummy)) {
+ if (currently_updating != &update_dummy) {
set_pht_entry_from_index_safe(db, index);
/* We claim that if two threads concurrently try to update the */
/* dirty bit vector, the first one to execute UPDATE_START */
STATIC void GC_suspend_handler_inner(ptr_t sig_arg,
void * context GC_ATTR_UNUSED)
{
-# ifndef SPARC
- volatile int dummy;
-# endif
pthread_t self = pthread_self();
GC_thread me;
IF_CANCEL(int cancel_state;)
# ifdef SPARC
me -> stop_info.stack_ptr = GC_save_regs_in_stack();
# else
- me -> stop_info.stack_ptr = (ptr_t)(&dummy);
+ me -> stop_info.stack_ptr = GC_approx_sp();
# endif
# ifdef IA64
me -> backing_store_ptr = GC_save_regs_in_stack();
GC_API_OSCALL void nacl_pre_syscall_hook(void)
{
- volatile int dummy;
-
if (GC_nacl_thread_idx != -1) {
NACL_STORE_REGS();
- GC_nacl_gc_thread_self->stop_info.stack_ptr = (ptr_t)(&dummy);
+ GC_nacl_gc_thread_self->stop_info.stack_ptr = GC_approx_sp();
GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
}
}
GC_API_OSCALL void __nacl_suspend_thread_if_needed(void)
{
- volatile int dummy;
-
if (GC_nacl_park_threads_now) {
pthread_t self = pthread_self();
/* so don't bother storing registers again, the GC has a set. */
if (!GC_nacl_thread_parked[GC_nacl_thread_idx]) {
NACL_STORE_REGS();
- GC_nacl_gc_thread_self->stop_info.stack_ptr = (ptr_t)(&dummy);
+ GC_nacl_gc_thread_self->stop_info.stack_ptr = GC_approx_sp();
}
GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
while (GC_nacl_park_threads_now) {
/* We hold the allocation lock. */
GC_INNER void GC_thr_init(void)
{
-# ifndef GC_DARWIN_THREADS
- volatile int dummy;
-# endif
if (GC_thr_initialized) return;
GC_thr_initialized = TRUE;
# ifdef GC_DARWIN_THREADS
t -> stop_info.mach_thread = mach_thread_self();
# else
- t -> stop_info.stack_ptr = (ptr_t)(&dummy);
+ t -> stop_info.stack_ptr = GC_approx_sp();
# endif
t -> flags = DETACHED | MAIN_THREAD;
}
/* Could p be a stack address? */
STATIC GC_bool GC_on_stack(ptr_t p)
{
- volatile int dummy;
-# ifdef STACK_GROWS_DOWN
- if ((word)p >= (word)(&dummy) && (word)p < (word)GC_stackbottom) {
- return(TRUE);
- }
-# else
- if ((word)p <= (word)(&dummy) && (word)p > (word)GC_stackbottom) {
- return(TRUE);
- }
-# endif
- return(FALSE);
+# ifdef STACK_GROWS_DOWN
+ if ((word)p >= (word)GC_approx_sp()
+ && (word)p < (word)GC_stackbottom) {
+ return(TRUE);
+ }
+# else
+ if ((word)p <= (word)GC_approx_sp()
+ && (word)p > (word)GC_stackbottom) {
+ return(TRUE);
+ }
+# endif
+ return(FALSE);
}
#endif
int main(void)
{
- volatile int dummy;
+ volatile word sp;
long ps = GETPAGESIZE();
jmp_buf b;
register int x = (int)strlen("a"); /* 1, slightly disguised */
static int y = 0;
+ sp = (word)(&sp);
printf("This appears to be a %s running %s\n", MACH_TYPE, OS_TYPE);
- if ((word)nested_sp() < (word)(&dummy)) {
+ if ((word)nested_sp() < sp) {
printf("Stack appears to grow down, which is the default.\n");
printf("A good guess for STACKBOTTOM on this machine is 0x%lx.\n",
- ((unsigned long)(word)(&dummy) + ps) & ~(ps-1));
+ ((unsigned long)sp + ps) & ~(ps-1));
} else {
printf("Stack appears to grow up.\n");
printf("Define STACK_GROWS_UP in gc_private.h\n");
printf("A good guess for STACKBOTTOM on this machine is 0x%lx.\n",
- ((unsigned long)(word)(&dummy) + ps) & ~(ps-1));
+ ((unsigned long)sp + ps) & ~(ps-1));
}
printf("Note that this may vary between machines of ostensibly\n");
printf("the same architecture (e.g. Sun 3/50s and 3/80s).\n");
STATIC word GC_push_stack_for(GC_thread thread, DWORD me)
{
- volatile int dummy;
ptr_t sp, stack_min;
struct GC_traced_stack_sect_s *traced_stack_sect =
thread -> traced_stack_sect;
if (thread -> id == me) {
GC_ASSERT(thread -> thread_blocked_sp == NULL);
- sp = (ptr_t)(&dummy);
+ sp = GC_approx_sp();
} else if ((sp = thread -> thread_blocked_sp) == NULL) {
/* Use saved sp value for blocked threads. */
/* For unblocked threads call GetThreadContext(). */