case CEE_RET: {
link_bblocks = FALSE;
MonoType *ult = mini_type_get_underlying_type (signature->ret);
- if (ult->type != MONO_TYPE_VOID) {
+ mt = mint_type (ult);
+ if (mt != MINT_TYPE_VOID) {
// Convert stack contents to return type if necessary
CHECK_STACK (td, 1);
emit_convert (td, td->sp - 1, ult);
}
int vt_size = 0;
- if (ult->type != MONO_TYPE_VOID) {
+ if (mt != MINT_TYPE_VOID) {
--td->sp;
- if (mint_type (ult) == MINT_TYPE_VT)
+ if (mt == MINT_TYPE_VT)
vt_size = mono_class_value_size (mono_class_from_mono_type_internal (ult), NULL);
}
if (td->sp > td->stack) {
exit_profiling |= PROFILING_FLAG;
if (exit_profiling) {
/* This does the return as well */
- gboolean is_void = ult->type == MONO_TYPE_VOID;
+ gboolean is_void = mt == MINT_TYPE_VOID;
interp_add_ins (td, is_void ? MINT_PROF_EXIT_VOID : MINT_PROF_EXIT);
td->last_ins->data [0] = exit_profiling;
if (!is_void) {
}
} else {
if (vt_size == 0) {
- if (ult->type == MONO_TYPE_VOID) {
+ if (mt == MINT_TYPE_VOID) {
interp_add_ins (td, MINT_RET_VOID);
} else {
- interp_add_ins (td, MINT_RET);
+ if (mt == MINT_TYPE_I1)
+ interp_add_ins (td, MINT_RET_I1);
+ else if (mt == MINT_TYPE_U1)
+ interp_add_ins (td, MINT_RET_U1);
+ else if (mt == MINT_TYPE_I2)
+ interp_add_ins (td, MINT_RET_I2);
+ else if (mt == MINT_TYPE_U2)
+ interp_add_ins (td, MINT_RET_U2);
+ else
+ interp_add_ins (td, MINT_RET);
interp_ins_set_sreg (td->last_ins, td->sp [0].local);
}
} else {
INTERP_FOLD_UNOP (MINT_NOT_I8, LOCAL_VALUE_I8, l, ~);
INTERP_FOLD_UNOP (MINT_CEQ0_I4, LOCAL_VALUE_I4, i, 0 ==);
- // MOV's are just a copy, if the contents of sreg are known
- INTERP_FOLD_CONV (MINT_MOV_I1, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint32);
- INTERP_FOLD_CONV (MINT_MOV_U1, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint32);
- INTERP_FOLD_CONV (MINT_MOV_I2, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint32);
- INTERP_FOLD_CONV (MINT_MOV_U2, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint32);
-
INTERP_FOLD_CONV (MINT_CONV_I1_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, gint8);
INTERP_FOLD_CONV (MINT_CONV_I1_I8, LOCAL_VALUE_I4, i, LOCAL_VALUE_I8, l, gint8);
INTERP_FOLD_CONV (MINT_CONV_U1_I4, LOCAL_VALUE_I4, i, LOCAL_VALUE_I4, i, guint8);
local_defs [dreg].def_index = ins_index;
}
+ // We always store to the full i4, except as part of STIND opcodes. These opcodes can be
+ // applied to a local var only if that var has LDLOCA applied to it
+ if ((opcode >= MINT_MOV_I1 && opcode <= MINT_MOV_U2) && !td->locals [sregs [0]].indirects) {
+ ins->opcode = MINT_MOV_4;
+ opcode = MINT_MOV_4;
+ }
+
if (opcode == MINT_MOV_4 || opcode == MINT_MOV_8 || opcode == MINT_MOV_VT) {
int sreg = sregs [0];
if (dreg == sreg) {
local_defs [dreg].type = LOCAL_VALUE_I4;
local_defs [dreg].i = (gint32)td->data_items [ins->data [0]];
#endif
- } else if (MINT_IS_UNOP (opcode) || (opcode >= MINT_MOV_I1 && opcode <= MINT_MOV_U2)) {
+ } else if (MINT_IS_UNOP (opcode)) {
ins = interp_fold_unop (td, local_defs, ins);
} else if (MINT_IS_UNOP_CONDITIONAL_BRANCH (opcode)) {
ins = interp_fold_unop_cond_br (td, bb, local_defs, ins);
}
static gboolean
-get_sreg_imm (TransformData *td, int sreg, gint16 *imm)
+get_sreg_imm (TransformData *td, int sreg, gint16 *imm, int result_mt)
{
InterpInst *def = td->locals [sreg].def;
if (def != NULL && td->local_ref_count [sreg] == 1) {
ct = interp_get_const_from_ldc_i8 (def);
else
return FALSE;
- if (ct >= G_MININT16 && ct <= G_MAXINT16) {
+ gint64 min_val, max_val;
+ // We only propagate the immediate only if it fits into the desired type,
+ // so we don't accidentaly handle conversions wrong
+ switch (result_mt) {
+ case MINT_TYPE_I1:
+ min_val = G_MININT8;
+ max_val = G_MAXINT8;
+ break;
+ case MINT_TYPE_I2:
+ min_val = G_MININT16;
+ max_val = G_MAXINT16;
+ break;
+ case MINT_TYPE_U1:
+ min_val = 0;
+ max_val = G_MAXUINT8;
+ break;
+ case MINT_TYPE_U2:
+ min_val = 0;
+ max_val = G_MAXINT16;
+ break;
+ default:
+ g_assert_not_reached ();
+
+ }
+ if (ct >= min_val && ct <= max_val) {
*imm = (gint16)ct;
mono_interp_stats.super_instructions++;
return TRUE;
if (mono_interp_op_dregs [opcode] && !(td->locals [ins->dreg].flags & INTERP_LOCAL_FLAG_GLOBAL))
td->locals [ins->dreg].def = ins;
- if (opcode == MINT_RET) {
+ if (opcode == MINT_RET || (opcode >= MINT_RET_I1 && opcode <= MINT_RET_U2)) {
// ldc + ret -> ret.imm
int sreg = ins->sregs [0];
gint16 imm;
- if (get_sreg_imm (td, sreg, &imm)) {
+ if (get_sreg_imm (td, sreg, &imm, (opcode == MINT_RET) ? MINT_TYPE_I2 : opcode - MINT_RET_I1)) {
InterpInst *def = td->locals [sreg].def;
int ret_op = MINT_IS_LDC_I4 (def->opcode) ? MINT_RET_I4_IMM : MINT_RET_I8_IMM;
InterpInst *new_inst = interp_insert_ins (td, ins, ret_op);
int sreg = -1;
int sreg_imm = -1;
gint16 imm;
- if (get_sreg_imm (td, ins->sregs [0], &imm)) {
+ if (get_sreg_imm (td, ins->sregs [0], &imm, MINT_TYPE_I2)) {
sreg = ins->sregs [1];
sreg_imm = ins->sregs [0];
- } else if (get_sreg_imm (td, ins->sregs [1], &imm)) {
+ } else if (get_sreg_imm (td, ins->sregs [1], &imm, MINT_TYPE_I2)) {
sreg = ins->sregs [0];
sreg_imm = ins->sregs [1];
}
// ldc + sub -> add.-imm
gint16 imm;
int sreg_imm = ins->sregs [1];
- if (get_sreg_imm (td, sreg_imm, &imm) && imm != G_MININT16) {
+ if (get_sreg_imm (td, sreg_imm, &imm, MINT_TYPE_I2) && imm != G_MININT16) {
int add_op = opcode == MINT_SUB_I4 ? MINT_ADD_I4_IMM : MINT_ADD_I8_IMM;
InterpInst *new_inst = interp_insert_ins (td, ins, add_op);
new_inst->dreg = ins->dreg;
// ldc + sh -> sh.imm
gint16 imm;
int sreg_imm = ins->sregs [1];
- if (get_sreg_imm (td, sreg_imm, &imm)) {
+ if (get_sreg_imm (td, sreg_imm, &imm, MINT_TYPE_I2)) {
int shift_op = MINT_SHR_UN_I4_IMM + (opcode - MINT_SHR_UN_I4);
InterpInst *new_inst = interp_insert_ins (td, ins, shift_op);
new_inst->dreg = ins->dreg;
} else if (MINT_IS_BINOP_CONDITIONAL_BRANCH (opcode) && is_short_offset (noe, ins->info.target_bb->native_offset_estimate)) {
gint16 imm;
int sreg_imm = ins->sregs [1];
- if (get_sreg_imm (td, sreg_imm, &imm)) {
+ if (get_sreg_imm (td, sreg_imm, &imm, MINT_TYPE_I2)) {
int condbr_op = get_binop_condbr_imm_sp (opcode);
if (condbr_op != MINT_NOP) {
InterpInst *prev_ins = interp_prev_ins (ins);
for (InterpBasicBlock *bb = td->entry_bb; bb != NULL; bb = bb->next_bb) {
InterpInst *ins = bb->first_ins;
while (ins) {
- if (ins->opcode >= MINT_RET && ins->opcode <= MINT_RET_VT)
+ if (ins->opcode >= MINT_RET && ins->opcode <= MINT_RET_VT) {
ins->opcode += MINT_RET_LOCALLOC - MINT_RET;
+ } else if (ins->opcode >= MINT_RET_I1 && ins->opcode <= MINT_RET_U2) {
+ int mt = ins->opcode - MINT_RET_I1;
+ int var = ins->sregs [0];
+ int opcode;
+ switch (mt) {
+ case MINT_TYPE_I1: opcode = MINT_CONV_I1_I4; break;
+ case MINT_TYPE_U1: opcode = MINT_CONV_U1_I4; break;
+ case MINT_TYPE_I2: opcode = MINT_CONV_I2_I4; break;
+ case MINT_TYPE_U2: opcode = MINT_CONV_U2_I4; break;
+ default: g_assert_not_reached ();
+ }
+ // This path should be rare enough not to bother with specific opcodes
+ // Add implicit conversion and then return
+ interp_clear_ins (ins);
+ ins = interp_insert_ins (td, ins, opcode);
+ interp_ins_set_dreg (ins, var);
+ interp_ins_set_sreg (ins, var);
+
+ ins = interp_insert_ins (td, ins, MINT_RET_LOCALLOC);
+ interp_ins_set_dreg (ins, var);
+ interp_ins_set_sreg (ins, var);
+ }
ins = ins->next;
}
}