DEFINE_THREAD_VP_OFFSET (fp);
DEFINE_THREAD_VP_OFFSET (sp);
DEFINE_THREAD_VP_OFFSET (ip);
-DEFINE_THREAD_VP_OFFSET (sp_min_since_gc);
DEFINE_THREAD_VP_OFFSET (stack_limit);
/* The current scm_thread*. Preserved across callouts. */
static void
emit_alloc_frame_for_sp (scm_jit_state *j, jit_gpr_t t)
{
- jit_reloc_t k, fast, watermark;
+ jit_reloc_t k, fast;
uint32_t saved_state = save_reloadable_register_state (j);
ASSERT_HAS_REGISTER_STATE (SP_IN_REGISTER);
- emit_ldxi (j, t, THREAD, thread_offset_sp_min_since_gc);
- fast = jit_bger (j->jit, SP, t);
emit_ldxi (j, t, THREAD, thread_offset_stack_limit);
- watermark = jit_bger (j->jit, SP, t);
+ fast = jit_bger (j->jit, SP, t);
/* Slow case: call out to expand stack. */
emit_store_current_ip (j, t);
restore_reloadable_register_state (j, saved_state);
k = jit_jmp (j->jit);
- /* Past sp_min_since_gc, but within stack_limit: update watermark and
- fall through. */
- jit_patch_here (j->jit, watermark);
- jit_stxi (j->jit, thread_offset_sp_min_since_gc, THREAD, SP);
- jit_patch_here (j->jit, fast);
/* Fast case: Just update sp. */
+ jit_patch_here (j->jit, fast);
emit_store_sp (j);
+
jit_patch_here (j->jit, k);
clear_register_state (j, SP_CACHE_GPR | SP_CACHE_FPR);
#define ALLOC_FRAME(n) \
do { \
sp = VP->fp - (n); \
- if (sp < VP->sp_min_since_gc) \
+ if (SCM_UNLIKELY (sp < VP->stack_limit)) \
{ \
- if (SCM_UNLIKELY (sp < VP->stack_limit)) \
- { \
- SYNC_IP (); \
- CALL_INTRINSIC (expand_stack, (thread, sp)); \
- CACHE_SP (); \
- } \
- else \
- VP->sp_min_since_gc = VP->sp = sp; \
+ SYNC_IP (); \
+ CALL_INTRINSIC (expand_stack, (thread, sp)); \
+ CACHE_SP (); \
} \
else \
VP->sp = sp; \
stack expansion is needed. Note that in some cases this may lower
SP, e.g. after a return but where there are more locals below, but we
know it was preceded by an alloc-frame in that case, so no stack need
- be allocated.
-
- As an optimization, we don't update sp_min_since_gc in this case; the
- principal place stacks are expanded is in ALLOC_FRAME. it doesn't
- need to strictly be the min since GC, as it's just an optimization to
- prevent passing too-large of a range to madvise. */
+ be allocated. */
#define RESET_FRAME(n) \
do { \
VP->sp = sp = VP->fp - (n); \
vm_increase_sp (struct scm_vm *vp, union scm_vm_stack_element *new_sp,
enum vm_increase_sp_kind kind)
{
- if (new_sp >= vp->sp_min_since_gc)
- {
- vp->sp = new_sp;
- return;
- }
-
if (kind == VM_SP_PUSH && new_sp < vp->stack_limit)
vm_expand_stack (vp, new_sp);
else
- vp->sp_min_since_gc = vp->sp = new_sp;
+ vp->sp = new_sp;
}
static inline void
vp->overflow_handler_stack = SCM_EOL;
vp->ip = NULL;
vp->sp = vp->stack_top;
- vp->sp_min_since_gc = vp->sp;
vp->fp = vp->stack_top;
vp->compare_result = SCM_F_COMPARE_NONE;
vp->engine = vm_default_engine;
#if HAVE_SYS_MMAN_H
uintptr_t lo = (uintptr_t) vp->stack_bottom;
uintptr_t hi = (uintptr_t) vp->sp;
- /* The second condition is needed to protect against wrap-around. */
- if (vp->sp_min_since_gc >= vp->stack_bottom && vp->sp >= vp->sp_min_since_gc)
- lo = (uintptr_t) vp->sp_min_since_gc;
lo &= ~(page_size - 1U); /* round down */
hi &= ~(page_size - 1U); /* round down */
if (ret && errno != ENOSYS)
perror ("madvise failed");
}
-
- vp->sp_min_since_gc = vp->sp;
#endif
}
new_sp = data.new_sp;
}
- vp->sp_min_since_gc = vp->sp = new_sp;
+ vp->sp = new_sp;
if (should_handle_stack_overflow (vp, stack_size))
{
{
union scm_vm_stack_element *sp = thread->vm.fp - nlocals;
- if (sp < thread->vm.sp_min_since_gc)
- {
- if (SCM_UNLIKELY (sp < thread->vm.stack_limit))
- thread_expand_stack (thread, sp);
- else
- thread->vm.sp_min_since_gc = thread->vm.sp = sp;
- }
+ if (SCM_UNLIKELY (sp < thread->vm.stack_limit))
+ thread_expand_stack (thread, sp);
else
thread->vm.sp = sp;
}
scm_dynwind_unwind_handler (unwind_overflow_handler, &data,
SCM_F_WIND_EXPLICITLY);
- /* Reset sp_min_since_gc so that the VM checks actually trigger. */
- return_unused_stack_to_os (&t->vm);
-
ret = scm_call_0 (thunk);
scm_dynwind_end ();
uint32_t *ip; /* instruction pointer */
union scm_vm_stack_element *sp; /* stack pointer */
union scm_vm_stack_element *fp; /* frame pointer */
- union scm_vm_stack_element *sp_min_since_gc; /* deepest sp since last gc */
union scm_vm_stack_element *stack_limit; /* stack limit address */
uint8_t compare_result; /* flags register: a value from scm_compare */
uint8_t apply_hook_enabled; /* if apply hook is enabled */