stack_size, n, (td)->ip - (td)->il_code); \
} while (0)
+#define ENSURE_STACK_SIZE(td, size) \
+ do { \
+ if ((size) > td->max_stack_size) \
+ td->max_stack_size = size; \
+ } while (0)
+
#define ENSURE_I4(td, sp_off) \
do { \
- if ((td)->sp [-sp_off].type == STACK_TYPE_I8) { \
+ if ((td)->sp [-(sp_off)].type == STACK_TYPE_I8) { \
/* Same representation in memory, nothing to do */ \
- (td)->sp [-sp_off].type = STACK_TYPE_I4; \
+ (td)->sp [-(sp_off)].type = STACK_TYPE_I4; \
} \
} while (0)
}
static int
-create_interp_stack_local (TransformData *td, int type, MonoClass *k, int type_size)
+get_tos_offset (TransformData *td)
{
- int local = create_interp_local_explicit (td, get_type_from_stack (type, k), type_size);
+ if (td->sp == td->stack)
+ return 0;
+ else
+ return td->sp [-1].offset + td->sp [-1].size;
+}
+
+// Create a local for sp
+static void
+create_interp_stack_local (TransformData *td, StackInfo *sp, int type_size)
+{
+ int local = create_interp_local_explicit (td, get_type_from_stack (sp->type, sp->klass), type_size);
td->locals [local].flags |= INTERP_LOCAL_FLAG_EXECUTION_STACK;
- return local;
+ if (!td->optimized) {
+ td->locals [local].stack_offset = sp->offset;
+ // Additional space that is allocated for the frame, when we don't run the var offset allocator
+ ENSURE_STACK_SIZE(td, sp->offset + sp->size);
+ }
+ sp->local = local;
}
static void
td->sp->type = GINT_TO_UINT8 (type);
td->sp->klass = k;
td->sp->flags = 0;
- td->sp->local = create_interp_stack_local (td, type, k, type_size);
+ td->sp->offset = get_tos_offset (td);
td->sp->size = ALIGN_TO (type_size, MINT_STACK_SLOT_SIZE);
+ create_interp_stack_local (td, td->sp, type_size);
td->sp++;
}
set_type_and_local (TransformData *td, StackInfo *sp, MonoClass *klass, int type)
{
SET_TYPE (sp, type, klass);
- sp->local = create_interp_stack_local (td, type, NULL, MINT_STACK_SLOT_SIZE);
+ create_interp_stack_local (td, sp, MINT_STACK_SLOT_SIZE);
}
static void
static void
fixup_newbb_stack_locals (TransformData *td, InterpBasicBlock *newbb)
{
+ // If not optimized, it is enough for vars to have same offset on the stack. It is not
+ // mandatory for sregs and dregs to match.
+ if (!td->optimized)
+ return;
if (newbb->stack_height <= 0)
return;
if (offset < 0 && td->sp == td->stack && !td->inlined_method) {
// Backwards branch inside unoptimized method where the IL stack is empty
// This is candidate for a patchpoint
- if (!td->rtm->optimized)
+ if (!td->optimized)
target_bb->emit_patchpoint = TRUE;
- if (mono_interp_tiering_enabled () && !target_bb->patchpoint_data && td->rtm->optimized) {
+ if (mono_interp_tiering_enabled () && !target_bb->patchpoint_data && td->optimized) {
// The optimized imethod will store mapping from bb index to native offset so it
// can resume execution in the optimized method, once we tier up in patchpoint
td->patchpoint_data_n++;
td->last_ins->data [0] = get_data_item_index (td, target_method);
td->sp -= 2;
- int *call_args = (int*)mono_mempool_alloc (td->mempool, 3 * sizeof (int));
- call_args [0] = td->sp [0].local;
- call_args [1] = td->sp [1].local;
- call_args [2] = -1;
interp_add_ins (td, MINT_ICALL_PP_V);
interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG);
td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func);
- td->last_ins->info.call_args = call_args;
td->last_ins->flags |= INTERP_INST_FLAG_CALL;
+ if (td->optimized) {
+ int *call_args = (int*)mono_mempool_alloc (td->mempool, 3 * sizeof (int));
+ call_args [0] = td->sp [0].local;
+ call_args [1] = td->sp [1].local;
+ call_args [2] = -1;
+ td->last_ins->info.call_args = call_args;
+ } else {
+ // Unoptimized code needs every call to have a dreg for offset allocation,
+ // even if call is void
+ td->last_ins->dreg = td->sp [0].local;
+ }
}
static void
td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func);
td->last_ins->info.call_args = NULL;
td->last_ins->flags |= INTERP_INST_FLAG_CALL;
+ if (!td->optimized) {
+ push_simple_type (td, STACK_TYPE_I4);
+ td->sp--;
+ td->last_ins->dreg = td->sp [0].local;
+ }
}
static void
td->last_ins->data [0] = get_data_item_index (td, msg);
td->sp -= 1;
- int *call_args = (int*)mono_mempool_alloc (td->mempool, 2 * sizeof (int));
- call_args [0] = td->sp [0].local;
- call_args [1] = -1;
interp_add_ins (td, MINT_ICALL_P_V);
interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG);
td->last_ins->data [0] = get_data_item_index (td, (gpointer)info->func);
- td->last_ins->info.call_args = call_args;
td->last_ins->flags |= INTERP_INST_FLAG_CALL;
+ if (td->optimized) {
+ int *call_args = (int*)mono_mempool_alloc (td->mempool, 2 * sizeof (int));
+ call_args [0] = td->sp [0].local;
+ call_args [1] = -1;
+ td->last_ins->info.call_args = call_args;
+ } else {
+ // Unoptimized code needs every call to have a dreg for offset allocation,
+ // even if call is void
+ td->last_ins->dreg = td->sp [0].local;
+ }
}
static int
g_string_free (str, TRUE);
}
-static G_GNUC_UNUSED void
+static void
dump_interp_bb (InterpBasicBlock *bb)
{
g_print ("BB%d:\n", bb->index);
- for (InterpInst *ins = bb->first_ins; ins != NULL; ins = ins->next)
- dump_interp_inst (ins);
+ for (InterpInst *ins = bb->first_ins; ins != NULL; ins = ins->next) {
+ // Avoid some noise
+ if (ins->opcode != MINT_NOP && ins->opcode != MINT_IL_SEQ_POINT)
+ dump_interp_inst (ins);
+ }
}
void
mono_interp_print_td_code (TransformData *td)
{
- InterpInst *ins = td->first_ins;
-
- char *name = mono_method_full_name (td->method, TRUE);
- g_print ("IR for \"%s\"\n", name);
- g_free (name);
- while (ins) {
- dump_interp_inst (ins);
- ins = ins->next;
- }
+ g_print ("Unoptimized IR:\n");
+ for (InterpBasicBlock *bb = td->entry_bb; bb != NULL; bb = bb->next_bb)
+ dump_interp_bb (bb);
}
else
vtsize = MINT_STACK_SLOT_SIZE;
- dreg = create_interp_stack_local (td, stack_type [ret_mt], klass, vtsize);
+ dreg = create_interp_local (td, get_type_from_stack (stack_type [ret_mt], klass));
// For valuetypes, we need to control the lifetime of the valuetype.
// MINT_NEWOBJ_VT_INLINED takes the address of this reg and we should keep
interp_add_ins (td, MINT_DEF);
interp_ins_set_dreg (td->last_ins, dreg);
} else {
- dreg = create_interp_stack_local (td, stack_type [ret_mt], klass, MINT_STACK_SLOT_SIZE);
+ dreg = create_interp_local (td, get_type_from_stack (stack_type [ret_mt], klass));
}
// Allocate `this` pointer
static int*
create_call_args (TransformData *td, int num_args)
{
+ // We don't need to know the sregs for calls in unoptimized code
+ if (!td->optimized)
+ return NULL;
int *call_args = (int*) mono_mempool_alloc (td->mempool, (num_args + 1) * sizeof (int));
for (int i = 0; i < num_args; i++)
call_args [i] = td->sp [i].local;
calli = FALSE;
native = FALSE;
// The function pointer is passed last, but the wrapper expects it as first argument
- // Switch the arguments
- StackInfo sp_fp = td->sp [-1];
- StackInfo *start = &td->sp [-csignature->param_count - 1];
- memmove (start + 1, start, csignature->param_count * sizeof (StackInfo));
- *start = sp_fp;
+ // Switch the arguments.
+ // When the var offset allocator is not used, in unoptimized code, we have to manually
+ // push the values into the correct order. In optimized code, we just need to know what
+ // local is the execution stack position during compilation, so we can just do a memmove
+ // of the StackInfo
+ if (td->optimized) {
+ StackInfo sp_fp = td->sp [-1];
+ StackInfo *start = &td->sp [-csignature->param_count - 1];
+ memmove (start + 1, start, csignature->param_count * sizeof (StackInfo));
+ *start = sp_fp;
+ } else {
+ int *arg_locals = mono_mempool_alloc0 (td->mempool, sizeof (int) * csignature->param_count);
+ int fp_local = create_interp_local (td, m_class_get_byval_arg (mono_defaults.int_class));
+ // Pop everything into locals. Push after into correct order
+ store_local (td, fp_local);
+ for (int i = csignature->param_count - 1; i >= 0; i--) {
+ arg_locals [i] = create_interp_local (td, csignature->params [i]);
+ store_local (td, arg_locals [i]);
+ }
+ load_local (td, fp_local);
+ for (int i = 0; i < csignature->param_count; i++)
+ load_local (td, arg_locals [i]);
+ }
// The method we are calling has a different signature
csignature = mono_method_signature_internal (target_method);
td->sp -= num_args;
guint32 params_stack_size = get_stack_size (td->sp, num_args);
- int *call_args = create_call_args (td, num_args);
-
if (is_virtual) {
interp_add_ins (td, MINT_CKNULL);
interp_ins_set_sreg (td->last_ins, td->sp->local);
td->last_ins->data [0] = get_data_item_index_imethod (td, mono_interp_get_imethod (target_method));
td->last_ins->data [1] = GUINT32_TO_UINT16 (params_stack_size);
td->last_ins->flags |= INTERP_INST_FLAG_CALL;
- td->last_ins->info.call_args = call_args;
+
+ if (td->optimized) {
+ int *call_args = create_call_args (td, num_args);
+ td->last_ins->info.call_args = call_args;
+ } else {
+ // Dummy dreg
+ push_simple_type (td, STACK_TYPE_I4);
+ interp_ins_set_dreg (td->last_ins, td->sp [-1].local);
+ td->sp--;
+ }
int in_offset = GPTRDIFF_TO_INT (td->ip - td->il_code);
if (interp_ip_in_cbb (td, in_offset + 5))
if (td->verbose_level) {
char *tmp = mono_disasm_code (NULL, method, td->ip, end);
char *name = mono_method_full_name (method, TRUE);
- g_print ("Method %s, optimized %d, original code:\n", name, rtm->optimized);
+ g_print ("Method %s, optimized %d, original code:\n", name, td->optimized);
g_print ("%s\n", tmp);
g_free (tmp);
g_free (name);
init_bb_stack_state (td, new_bb);
}
link_bblocks = TRUE;
+ // Unoptimized code cannot access exception object directly from the exvar, we need
+ // to push it explicitly on the execution stack
+ if (!td->optimized) {
+ int index = td->clause_indexes [in_offset];
+ if (index != -1 && new_bb->stack_height == 1 && header->clauses [index].handler_offset == in_offset) {
+ int exvar = td->clause_vars [index];
+ g_assert (td->stack [0].local == exvar);
+ td->sp--;
+ push_simple_type (td, STACK_TYPE_O);
+ interp_add_ins (td, MINT_MOV_P);
+ interp_ins_set_sreg (td->last_ins, exvar);
+ interp_ins_set_dreg (td->last_ins, td->sp [-1].local);
+ }
+ }
}
td->offset_to_bb [in_offset] = td->cbb;
td->in_start = td->ip;
td->last_ins->flags |= INTERP_INST_FLAG_CALL;
td->last_ins->info.call_args = call_args;
} else if (klass == mono_defaults.string_class) {
- int *call_args = (int*)mono_mempool_alloc (td->mempool, (csignature->param_count + 2) * sizeof (int));
- td->sp -= csignature->param_count;
+ if (!td->optimized) {
+ int tos_offset = get_tos_offset (td);
+ td->sp -= csignature->param_count;
+ guint32 params_stack_size = tos_offset - get_tos_offset (td);
+
+ interp_add_ins (td, MINT_NEWOBJ_STRING_UNOPT);
+ td->last_ins->data [0] = get_data_item_index (td, mono_interp_get_imethod (m));
+ td->last_ins->data [1] = params_stack_size;
+ push_type (td, stack_type [ret_mt], klass);
+ interp_ins_set_dreg (td->last_ins, td->sp [-1].local);
+ } else {
+ int *call_args = (int*)mono_mempool_alloc (td->mempool, (csignature->param_count + 2) * sizeof (int));
+ td->sp -= csignature->param_count;
- // First arg is dummy var, it is null when passed to the ctor
- call_args [0] = create_interp_stack_local (td, stack_type [ret_mt], NULL, MINT_STACK_SLOT_SIZE);
- for (int i = 0; i < csignature->param_count; i++) {
- call_args [i + 1] = td->sp [i].local;
- }
- call_args [csignature->param_count + 1] = -1;
+ // First arg is dummy var, it is null when passed to the ctor
+ call_args [0] = create_interp_local (td, get_type_from_stack (stack_type [ret_mt], NULL));
+ for (int i = 0; i < csignature->param_count; i++) {
+ call_args [i + 1] = td->sp [i].local;
+ }
+ call_args [csignature->param_count + 1] = -1;
- interp_add_ins (td, MINT_NEWOBJ_STRING);
- td->last_ins->data [0] = get_data_item_index_imethod (td, mono_interp_get_imethod (m));
- push_type (td, stack_type [ret_mt], klass);
+ interp_add_ins (td, MINT_NEWOBJ_STRING);
+ td->last_ins->data [0] = get_data_item_index_imethod (td, mono_interp_get_imethod (m));
+ push_type (td, stack_type [ret_mt], klass);
- interp_ins_set_dreg (td->last_ins, td->sp [-1].local);
- interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG);
- td->last_ins->flags |= INTERP_INST_FLAG_CALL;
- td->last_ins->info.call_args = call_args;
+ interp_ins_set_dreg (td->last_ins, td->sp [-1].local);
+ interp_ins_set_sreg (td->last_ins, MINT_CALL_ARGS_SREG);
+ td->last_ins->flags |= INTERP_INST_FLAG_CALL;
+ td->last_ins->info.call_args = call_args;
+ }
} else if (m_class_get_image (klass) == mono_defaults.corlib &&
(!strcmp (m_class_get_name (m->klass), "Span`1") ||
!strcmp (m_class_get_name (m->klass), "ReadOnlySpan`1")) &&
interp_ins_set_sregs2 (td->last_ins, td->sp [0].local, td->sp [1].local);
push_type_vt (td, klass, mono_class_value_size (klass, NULL));
interp_ins_set_dreg (td->last_ins, td->sp [-1].local);
+ } else if (!td->optimized) {
+ int tos = get_tos_offset (td);
+ td->sp -= csignature->param_count;
+ int param_size = tos - get_tos_offset (td);
+
+ interp_add_ins (td, MINT_NEWOBJ_SLOW_UNOPT);
+ td->last_ins->data [0] = get_data_item_index_imethod (td, mono_interp_get_imethod (m));
+ td->last_ins->data [1] = param_size;
+
+ gboolean is_vt = m_class_is_valuetype (klass);
+ if (is_vt) {
+ int vtsize = mono_class_value_size (klass, NULL);
+ vtsize = ALIGN_TO (vtsize, MINT_STACK_SLOT_SIZE);
+ td->last_ins->data [2] = vtsize;
+ ENSURE_STACK_SIZE(td, (int)(tos + vtsize + MINT_STACK_SLOT_SIZE));
+ if (ret_mt == MINT_TYPE_VT)
+ push_type_vt (td, klass, vtsize);
+ else
+ push_type (td, stack_type [ret_mt], klass);
+ } else {
+ td->last_ins->data [2] = 0;
+ ENSURE_STACK_SIZE(td, (int)(tos + 2 * MINT_STACK_SLOT_SIZE));
+ push_type (td, stack_type [ret_mt], klass);
+ }
+ interp_ins_set_dreg (td->last_ins, td->sp [-1].local);
} else {
td->sp -= csignature->param_count;
mt = mint_type (info->sig->ret);
push_simple_type (td, stack_type [mt]);
dreg = td->sp [-1].local;
+ } else if (!td->optimized) {
+ // Dummy dreg
+ push_simple_type (td, stack_type [STACK_TYPE_I4]);
+ dreg = td->sp [-1].local;
+ td->sp--;
}
if (jit_icall_id == MONO_JIT_ICALL_mono_threads_attach_coop) {
rtm->needs_thread_attach = 1;
+ // Add dummy return value
+ interp_add_ins (td, MINT_LDNULL);
+ interp_ins_set_dreg (td->last_ins, dreg);
} else if (jit_icall_id == MONO_JIT_ICALL_mono_threads_detach_coop) {
g_assert (rtm->needs_thread_attach);
} else {
if (info->sig->ret->type != MONO_TYPE_VOID) {
// Push a dummy coop gc var
+ interp_add_ins (td, MINT_LDNULL);
push_simple_type (td, STACK_TYPE_I);
+ td->last_ins->dreg = td->sp [-1].local;
interp_add_ins (td, MINT_MONO_ENABLE_GCTRANS);
} else {
// Pop the unused gc var
}
}
+static void
+alloc_unopt_global_local (TransformData *td, int local, gpointer data)
+{
+ // Execution stack locals are resolved when we emit the instruction in the code stream,
+ // once all global locals have their offset resolved
+ if (td->locals [local].flags & INTERP_LOCAL_FLAG_EXECUTION_STACK)
+ return;
+ // Check if already resolved
+ if (td->locals [local].offset != -1)
+ return;
+
+ int offset = td->total_locals_size;
+ int size = td->locals [local].size;
+ td->locals [local].offset = offset;
+ td->total_locals_size = ALIGN_TO (offset + size, MINT_STACK_SLOT_SIZE);
+}
+
static int
get_inst_length (InterpInst *ins)
{
return mono_interp_oplen [ins->opcode];
}
+static void
+foreach_local_var (TransformData *td, InterpInst *ins, gpointer data, void (*callback)(TransformData*, int, gpointer))
+{
+ int opcode = ins->opcode;
+ if (mono_interp_op_sregs [opcode]) {
+ for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) {
+ int sreg = ins->sregs [i];
+
+ if (sreg == MINT_CALL_ARGS_SREG) {
+ int *call_args = ins->info.call_args;
+ if (call_args) {
+ int var = *call_args;
+ while (var != -1) {
+ callback (td, var, data);
+ call_args++;
+ var = *call_args;
+ }
+ }
+ } else {
+ callback (td, sreg, data);
+ }
+ }
+ }
+
+ if (mono_interp_op_dregs [opcode])
+ callback (td, ins->dreg, data);
+}
+
static int
compute_native_offset_estimates (TransformData *td)
{
if (MINT_IS_NOP (opcode))
continue;
noe += get_inst_length (ins);
+ if (!td->optimized)
+ foreach_local_var (td, ins, NULL, alloc_unopt_global_local);
}
}
return noe;
return opcode;
}
+static int
+get_local_offset (TransformData *td, int local)
+{
+ if (td->locals [local].offset != -1)
+ return td->locals [local].offset;
+
+ // FIXME Some vars might end up with unitialized offset because they are not declared at all in the code.
+ // This can happen if the bblock declaring the var gets removed, while other unreachable bblocks, that access
+ // the var are also not removed. This limitation is due to bblock removal using IN count for removing a bblock,
+ // which doesn't account for cycles.
+ if (td->optimized)
+ return -1;
+
+ // If we use the optimized offset allocator, all locals should have had their offsets already allocated
+ g_assert (!td->optimized);
+ // The only remaining locals to allocate are the ones from the execution stack
+ g_assert (td->locals [local].flags & INTERP_LOCAL_FLAG_EXECUTION_STACK);
+
+ td->locals [local].offset = td->total_locals_size + td->locals [local].stack_offset;
+ return td->locals [local].offset;
+}
static guint16*
emit_compacted_instruction (TransformData *td, guint16* start_ip, InterpInst *ins)
*ip++ = opcode;
if (opcode == MINT_SWITCH) {
int labels = READ32 (&ins->data [0]);
- *ip++ = GINT_TO_UINT16 (td->locals [ins->sregs [0]].offset);
+ *ip++ = GINT_TO_UINT16 (get_local_offset (td, ins->sregs [0]));
// Write number of switch labels
*ip++ = ins->data [0];
*ip++ = ins->data [1];
const int br_offset = GPTRDIFF_TO_INT (start_ip - td->new_code);
gboolean has_imm = opcode >= MINT_BEQ_I4_IMM_SP && opcode <= MINT_BLT_UN_I8_IMM_SP;
for (int i = 0; i < mono_interp_op_sregs [opcode]; i++)
- *ip++ = GINT_TO_UINT16 (td->locals [ins->sregs [i]].offset);
+ *ip++ = GINT_TO_UINT16 (get_local_offset (td, ins->sregs [i]));
if (has_imm)
*ip++ = ins->data [0];
guint16 mt = ins->data [1];
guint16 fsize = ins->data [2];
- int dest_off = td->locals [ins->dreg].offset;
- int src_off = td->locals [ins->sregs [0]].offset + foff;
+ int dest_off = get_local_offset (td, ins->dreg);
+ int src_off = get_local_offset (td, ins->sregs [0]) + foff;
if (mt == MINT_TYPE_VT || fsize)
opcode = MINT_MOV_VT;
else
// actually vars. Resolve their offset
int num_vars = mono_interp_oplen [opcode] - 1;
for (int i = 0; i < num_vars; i++)
- *ip++ = GINT_TO_UINT16 (td->locals [ins->data [i]].offset);
+ *ip++ = GINT_TO_UINT16 (get_local_offset (td, ins->data [i]));
} else {
if (mono_interp_op_dregs [opcode])
- *ip++ = GINT_TO_UINT16 (td->locals [ins->dreg].offset);
+ *ip++ = GINT_TO_UINT16 (get_local_offset (td, ins->dreg));
if (mono_interp_op_sregs [opcode]) {
for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) {
- if (ins->sregs [i] == MINT_CALL_ARGS_SREG)
- *ip++ = GINT_TO_UINT16 (td->locals [ins->info.call_args [0]].offset);
- else
- *ip++ = GINT_TO_UINT16 (td->locals [ins->sregs [i]].offset);
+ if (ins->sregs [i] == MINT_CALL_ARGS_SREG) {
+ int offset;
+ // In the unoptimized case the return and the start of the param area are always at the
+ // same offset. Use the dreg offset so we don't need to rely on existing call_args.
+ if (td->optimized)
+ offset = get_local_offset (td, ins->info.call_args [0]);
+ else
+ offset = get_local_offset (td, ins->dreg);
+ *ip++ = GINT_TO_UINT16 (offset);
+ } else {
+ *ip++ = GINT_TO_UINT16 (get_local_offset (td, ins->sregs [i]));
+ }
}
} else if (opcode == MINT_LDLOCA_S) {
// This opcode receives a local but it is not viewed as a sreg since we don't load the value
- *ip++ = GINT_TO_UINT16 (td->locals [ins->sregs [0]].offset);
+ *ip++ = GINT_TO_UINT16 (get_local_offset (td, ins->sregs [0]));
}
int left = get_inst_length (ins) - GPTRDIFF_TO_INT(ip - start_ip);
static int
add_patchpoint_data (TransformData *td, int patchpoint_data_index, int native_offset, int key)
{
- if (td->rtm->optimized) {
+ if (td->optimized) {
td->patchpoint_data [patchpoint_data_index++] = key;
td->patchpoint_data [patchpoint_data_index++] = native_offset;
} else {
}
static void
-foreach_local_var (TransformData *td, InterpInst *ins, gpointer data, void (*callback)(TransformData*, int, gpointer))
-{
- int opcode = ins->opcode;
- if (mono_interp_op_sregs [opcode]) {
- for (int i = 0; i < mono_interp_op_sregs [opcode]; i++) {
- int sreg = ins->sregs [i];
-
- if (sreg == MINT_CALL_ARGS_SREG) {
- int *call_args = ins->info.call_args;
- if (call_args) {
- int var = *call_args;
- while (var != -1) {
- callback (td, var, data);
- call_args++;
- var = *call_args;
- }
- }
- } else {
- callback (td, sreg, data);
- }
- }
- }
-
- if (mono_interp_op_dregs [opcode])
- callback (td, ins->dreg, data);
-}
-
-static void
clear_local_defs (TransformData *td, int var, void *data)
{
LocalValue *local_defs = (LocalValue*) data;
gint32 *sregs = &ins->sregs [0];
gint32 dreg = ins->dreg;
- if (td->verbose_level && ins->opcode != MINT_NOP)
+ if (td->verbose_level && ins->opcode != MINT_NOP && ins->opcode != MINT_IL_SEQ_POINT)
dump_interp_inst (ins);
for (int i = 0; i < num_sregs; i++) {
if (td->prof_coverage)
td->coverage_info = mono_profiler_coverage_alloc (method, header->code_size);
- interp_method_compute_offsets (td, rtm, mono_method_signature_internal (method), header, error);
- goto_if_nok (error, exit);
-
if (verbose_method_name) {
const char *name = verbose_method_name;
}
}
+ interp_method_compute_offsets (td, rtm, mono_method_signature_internal (method), header, error);
+ goto_if_nok (error, exit);
+
td->stack = (StackInfo*)g_malloc0 ((header->max_stack + 1) * sizeof (td->stack [0]));
td->stack_capacity = header->max_stack + 1;
td->sp = td->stack;
if (td->has_localloc)
interp_fix_localloc_ret (td);
- if (td->optimized)
- interp_optimize_code (td);
+ if (td->verbose_level)
+ mono_interp_print_td_code (td);
- interp_alloc_offsets (td);
+ if (td->optimized) {
+ interp_optimize_code (td);
+ interp_alloc_offsets (td);
+ }
generate_compacted_code (td);
if (c->flags & MONO_EXCEPTION_CLAUSE_FILTER)
c->data.filter_offset = get_native_offset (td, c->data.filter_offset);
}
- rtm->alloca_size = td->total_locals_size;
- rtm->locals_size = td->param_area_offset;
+ // When optimized (using the var offset allocator), total_locals_size contains also the param area.
+ // When unoptimized, the param area is stored in the same order, within the IL execution stack.
+ g_assert (!td->optimized || !td->max_stack_size);
+ rtm->alloca_size = td->total_locals_size + td->max_stack_size;
+ rtm->locals_size = td->optimized ? td->param_area_offset : td->total_locals_size;
rtm->data_items = (gpointer*)mono_mem_manager_alloc0 (td->mem_manager, td->n_data_items * sizeof (td->data_items [0]));
memcpy (rtm->data_items, td->data_items, td->n_data_items * sizeof (td->data_items [0]));