DEF_HELPER_1(sxtb16, i32, i32)
DEF_HELPER_1(uxtb16, i32, i32)
-DEF_HELPER_2(add_setq, i32, i32, i32)
-DEF_HELPER_2(add_saturate, i32, i32, i32)
-DEF_HELPER_2(sub_saturate, i32, i32, i32)
-DEF_HELPER_2(add_usaturate, i32, i32, i32)
-DEF_HELPER_2(sub_usaturate, i32, i32, i32)
-DEF_HELPER_1(double_saturate, i32, s32)
+DEF_HELPER_3(add_setq, i32, env, i32, i32)
+DEF_HELPER_3(add_saturate, i32, env, i32, i32)
+DEF_HELPER_3(sub_saturate, i32, env, i32, i32)
+DEF_HELPER_3(add_usaturate, i32, env, i32, i32)
+DEF_HELPER_3(sub_usaturate, i32, env, i32, i32)
+DEF_HELPER_2(double_saturate, i32, env, s32)
DEF_HELPER_2(sdiv, s32, s32, s32)
DEF_HELPER_2(udiv, i32, i32, i32)
DEF_HELPER_1(rbit, i32, i32)
PAS_OP(uh)
#undef PAS_OP
-DEF_HELPER_2(ssat, i32, i32, i32)
-DEF_HELPER_2(usat, i32, i32, i32)
-DEF_HELPER_2(ssat16, i32, i32, i32)
-DEF_HELPER_2(usat16, i32, i32, i32)
+DEF_HELPER_3(ssat, i32, env, i32, i32)
+DEF_HELPER_3(usat, i32, env, i32, i32)
+DEF_HELPER_3(ssat16, i32, env, i32, i32)
+DEF_HELPER_3(usat16, i32, env, i32, i32)
DEF_HELPER_2(usad8, i32, i32, i32)
DEF_HELPER_1(wfi, void, env)
DEF_HELPER_3(cpsr_write, void, env, i32, i32)
-DEF_HELPER_0(cpsr_read, i32)
+DEF_HELPER_1(cpsr_read, i32, env)
DEF_HELPER_3(v7m_msr, void, env, i32, i32)
DEF_HELPER_2(v7m_mrs, i32, env, i32)
DEF_HELPER_2(get_r13_banked, i32, env, i32)
DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
-DEF_HELPER_1(get_user_reg, i32, i32)
+DEF_HELPER_2(get_user_reg, i32, env, i32)
DEF_HELPER_3(set_user_reg, void, env, i32, i32)
DEF_HELPER_1(vfp_get_fpscr, i32, env)
DEF_HELPER_2(rsqrte_f32, f32, f32, env)
DEF_HELPER_2(recpe_u32, i32, i32, env)
DEF_HELPER_2(rsqrte_u32, i32, i32, env)
-DEF_HELPER_4(neon_tbl, i32, i32, i32, i32, i32)
-
-DEF_HELPER_2(add_cc, i32, i32, i32)
-DEF_HELPER_2(adc_cc, i32, i32, i32)
-DEF_HELPER_2(sub_cc, i32, i32, i32)
-DEF_HELPER_2(sbc_cc, i32, i32, i32)
-
-DEF_HELPER_2(shl, i32, i32, i32)
-DEF_HELPER_2(shr, i32, i32, i32)
-DEF_HELPER_2(sar, i32, i32, i32)
-DEF_HELPER_2(shl_cc, i32, i32, i32)
-DEF_HELPER_2(shr_cc, i32, i32, i32)
-DEF_HELPER_2(sar_cc, i32, i32, i32)
-DEF_HELPER_2(ror_cc, i32, i32, i32)
+DEF_HELPER_5(neon_tbl, i32, env, i32, i32, i32, i32)
+
+DEF_HELPER_3(add_cc, i32, env, i32, i32)
+DEF_HELPER_3(adc_cc, i32, env, i32, i32)
+DEF_HELPER_3(sub_cc, i32, env, i32, i32)
+DEF_HELPER_3(sbc_cc, i32, env, i32, i32)
+
+DEF_HELPER_3(shl, i32, env, i32, i32)
+DEF_HELPER_3(shr, i32, env, i32, i32)
+DEF_HELPER_3(sar, i32, env, i32, i32)
+DEF_HELPER_3(shl_cc, i32, env, i32, i32)
+DEF_HELPER_3(shr_cc, i32, env, i32, i32)
+DEF_HELPER_3(sar_cc, i32, env, i32, i32)
+DEF_HELPER_3(ror_cc, i32, env, i32, i32)
/* neon_helper.c */
DEF_HELPER_3(neon_qadd_u8, i32, env, i32, i32)
cpu_loop_exit(env);
}
-uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def,
+uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
uint32_t rn, uint32_t maxindex)
{
uint32_t val;
/* FIXME: Pass an explicit pointer to QF to CPUARMState, and move saturating
instructions into helper.c */
-uint32_t HELPER(add_setq)(uint32_t a, uint32_t b)
+uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t res = a + b;
if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
return res;
}
-uint32_t HELPER(add_saturate)(uint32_t a, uint32_t b)
+uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t res = a + b;
if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
return res;
}
-uint32_t HELPER(sub_saturate)(uint32_t a, uint32_t b)
+uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t res = a - b;
if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
return res;
}
-uint32_t HELPER(double_saturate)(int32_t val)
+uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
{
uint32_t res;
if (val >= 0x40000000) {
return res;
}
-uint32_t HELPER(add_usaturate)(uint32_t a, uint32_t b)
+uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t res = a + b;
if (res < a) {
return res;
}
-uint32_t HELPER(sub_usaturate)(uint32_t a, uint32_t b)
+uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t res = a - b;
if (res > a) {
}
/* Signed saturation. */
-static inline uint32_t do_ssat(int32_t val, int shift)
+static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
{
int32_t top;
uint32_t mask;
}
/* Unsigned saturation. */
-static inline uint32_t do_usat(int32_t val, int shift)
+static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
{
uint32_t max;
}
/* Signed saturate. */
-uint32_t HELPER(ssat)(uint32_t x, uint32_t shift)
+uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
{
- return do_ssat(x, shift);
+ return do_ssat(env, x, shift);
}
/* Dual halfword signed saturate. */
-uint32_t HELPER(ssat16)(uint32_t x, uint32_t shift)
+uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
{
uint32_t res;
- res = (uint16_t)do_ssat((int16_t)x, shift);
- res |= do_ssat(((int32_t)x) >> 16, shift) << 16;
+ res = (uint16_t)do_ssat(env, (int16_t)x, shift);
+ res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
return res;
}
/* Unsigned saturate. */
-uint32_t HELPER(usat)(uint32_t x, uint32_t shift)
+uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
{
- return do_usat(x, shift);
+ return do_usat(env, x, shift);
}
/* Dual halfword unsigned saturate. */
-uint32_t HELPER(usat16)(uint32_t x, uint32_t shift)
+uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
{
uint32_t res;
- res = (uint16_t)do_usat((int16_t)x, shift);
- res |= do_usat(((int32_t)x) >> 16, shift) << 16;
+ res = (uint16_t)do_usat(env, (int16_t)x, shift);
+ res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
return res;
}
cpu_loop_exit(env);
}
-uint32_t HELPER(cpsr_read)(void)
+uint32_t HELPER(cpsr_read)(CPUARMState *env)
{
return cpsr_read(env) & ~CPSR_EXEC;
}
}
/* Access to user mode registers from privileged modes. */
-uint32_t HELPER(get_user_reg)(uint32_t regno)
+uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
{
uint32_t val;
The only way to do that in TCG is a conditional branch, which clobbers
all our temporaries. For now implement these as helper functions. */
-uint32_t HELPER (add_cc)(uint32_t a, uint32_t b)
+uint32_t HELPER (add_cc)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t result;
result = a + b;
return result;
}
-uint32_t HELPER(adc_cc)(uint32_t a, uint32_t b)
+uint32_t HELPER(adc_cc)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t result;
if (!env->CF) {
return result;
}
-uint32_t HELPER(sub_cc)(uint32_t a, uint32_t b)
+uint32_t HELPER(sub_cc)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t result;
result = a - b;
return result;
}
-uint32_t HELPER(sbc_cc)(uint32_t a, uint32_t b)
+uint32_t HELPER(sbc_cc)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t result;
if (!env->CF) {
/* Similarly for variable shift instructions. */
-uint32_t HELPER(shl)(uint32_t x, uint32_t i)
+uint32_t HELPER(shl)(CPUARMState *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32)
return x << shift;
}
-uint32_t HELPER(shr)(uint32_t x, uint32_t i)
+uint32_t HELPER(shr)(CPUARMState *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32)
return (uint32_t)x >> shift;
}
-uint32_t HELPER(sar)(uint32_t x, uint32_t i)
+uint32_t HELPER(sar)(CPUARMState *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32)
return (int32_t)x >> shift;
}
-uint32_t HELPER(shl_cc)(uint32_t x, uint32_t i)
+uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32) {
return x;
}
-uint32_t HELPER(shr_cc)(uint32_t x, uint32_t i)
+uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32) {
return x;
}
-uint32_t HELPER(sar_cc)(uint32_t x, uint32_t i)
+uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
{
int shift = i & 0xff;
if (shift >= 32) {
return x;
}
-uint32_t HELPER(ror_cc)(uint32_t x, uint32_t i)
+uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
{
int shift1, shift;
shift1 = i & 0xff;
{
if (flags) {
switch (shiftop) {
- case 0: gen_helper_shl_cc(var, var, shift); break;
- case 1: gen_helper_shr_cc(var, var, shift); break;
- case 2: gen_helper_sar_cc(var, var, shift); break;
- case 3: gen_helper_ror_cc(var, var, shift); break;
+ case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
+ case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
+ case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
+ case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
}
} else {
switch (shiftop) {
- case 0: gen_helper_shl(var, var, shift); break;
- case 1: gen_helper_shr(var, var, shift); break;
- case 2: gen_helper_sar(var, var, shift); break;
+ case 0: gen_helper_shl(var, cpu_env, var, shift); break;
+ case 1: gen_helper_shr(var, cpu_env, var, shift); break;
+ case 2: gen_helper_sar(var, cpu_env, var, shift); break;
case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
tcg_gen_rotr_i32(var, var, shift); break;
}
tmp2 = neon_load_reg(rm, 0);
tmp4 = tcg_const_i32(rn);
tmp5 = tcg_const_i32(n);
- gen_helper_neon_tbl(tmp2, tmp2, tmp, tmp4, tmp5);
+ gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
tcg_temp_free_i32(tmp);
if (insn & (1 << 6)) {
tmp = neon_load_reg(rd, 1);
tcg_gen_movi_i32(tmp, 0);
}
tmp3 = neon_load_reg(rm, 1);
- gen_helper_neon_tbl(tmp3, tmp3, tmp, tmp4, tmp5);
+ gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
tcg_temp_free_i32(tmp5);
tcg_temp_free_i32(tmp4);
neon_store_reg(rd, 0, tmp2);
tmp = load_cpu_field(spsr);
} else {
tmp = tcg_temp_new_i32();
- gen_helper_cpsr_read(tmp);
+ gen_helper_cpsr_read(tmp, cpu_env);
}
store_reg(s, rd, tmp);
}
tmp = load_reg(s, rm);
tmp2 = load_reg(s, rn);
if (op1 & 2)
- gen_helper_double_saturate(tmp2, tmp2);
+ gen_helper_double_saturate(tmp2, cpu_env, tmp2);
if (op1 & 1)
- gen_helper_sub_saturate(tmp, tmp, tmp2);
+ gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_add_saturate(tmp, tmp, tmp2);
+ gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
tcg_temp_free_i64(tmp64);
if ((sh & 2) == 0) {
tmp2 = load_reg(s, rn);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
store_reg(s, rd, tmp);
} else {
if (op1 == 0) {
tmp2 = load_reg(s, rn);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
store_reg(s, rd, tmp);
if (IS_USER(s)) {
goto illegal_op;
}
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
gen_exception_return(s, tmp);
} else {
if (set_cc) {
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
} else {
tcg_gen_sub_i32(tmp, tmp, tmp2);
}
break;
case 0x03:
if (set_cc) {
- gen_helper_sub_cc(tmp, tmp2, tmp);
+ gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp);
} else {
tcg_gen_sub_i32(tmp, tmp2, tmp);
}
break;
case 0x04:
if (set_cc) {
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
} else {
tcg_gen_add_i32(tmp, tmp, tmp2);
}
break;
case 0x05:
if (set_cc) {
- gen_helper_adc_cc(tmp, tmp, tmp2);
+ gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
} else {
gen_add_carry(tmp, tmp, tmp2);
}
break;
case 0x06:
if (set_cc) {
- gen_helper_sbc_cc(tmp, tmp, tmp2);
+ gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
} else {
gen_sub_carry(tmp, tmp, tmp2);
}
break;
case 0x07:
if (set_cc) {
- gen_helper_sbc_cc(tmp, tmp2, tmp);
+ gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
} else {
gen_sub_carry(tmp, tmp2, tmp);
}
break;
case 0x0a:
if (set_cc) {
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp);
break;
case 0x0b:
if (set_cc) {
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp);
break;
sh = (insn >> 16) & 0x1f;
tmp2 = tcg_const_i32(sh);
if (insn & (1 << 22))
- gen_helper_usat(tmp, tmp, tmp2);
+ gen_helper_usat(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_ssat(tmp, tmp, tmp2);
+ gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else if ((insn & 0x00300fe0) == 0x00200f20) {
sh = (insn >> 16) & 0x1f;
tmp2 = tcg_const_i32(sh);
if (insn & (1 << 22))
- gen_helper_usat16(tmp, tmp, tmp2);
+ gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_ssat16(tmp, tmp, tmp2);
+ gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
} else if ((insn & 0x00700fe0) == 0x00000fa0) {
* however it may overflow considered as a signed
* operation, in which case we must set the Q flag.
*/
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp2);
if (insn & (1 << 22)) {
if (rd != 15)
{
tmp2 = load_reg(s, rd);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
store_reg(s, rn, tmp);
} else if (user) {
tmp = tcg_temp_new_i32();
tmp2 = tcg_const_i32(i);
- gen_helper_get_user_reg(tmp, tmp2);
+ gen_helper_get_user_reg(tmp, cpu_env, tmp2);
tcg_temp_free_i32(tmp2);
} else {
tmp = load_reg(s, i);
break;
case 8: /* add */
if (conds)
- gen_helper_add_cc(t0, t0, t1);
+ gen_helper_add_cc(t0, cpu_env, t0, t1);
else
tcg_gen_add_i32(t0, t0, t1);
break;
case 10: /* adc */
if (conds)
- gen_helper_adc_cc(t0, t0, t1);
+ gen_helper_adc_cc(t0, cpu_env, t0, t1);
else
gen_adc(t0, t1);
break;
case 11: /* sbc */
if (conds)
- gen_helper_sbc_cc(t0, t0, t1);
+ gen_helper_sbc_cc(t0, cpu_env, t0, t1);
else
gen_sub_carry(t0, t0, t1);
break;
case 13: /* sub */
if (conds)
- gen_helper_sub_cc(t0, t0, t1);
+ gen_helper_sub_cc(t0, cpu_env, t0, t1);
else
tcg_gen_sub_i32(t0, t0, t1);
break;
case 14: /* rsb */
if (conds)
- gen_helper_sub_cc(t0, t1, t0);
+ gen_helper_sub_cc(t0, cpu_env, t1, t0);
else
tcg_gen_sub_i32(t0, t1, t0);
break;
gen_st32(tmp, addr, 0);
tcg_gen_addi_i32(addr, addr, 4);
tmp = tcg_temp_new_i32();
- gen_helper_cpsr_read(tmp);
+ gen_helper_cpsr_read(tmp, cpu_env);
gen_st32(tmp, addr, 0);
if (insn & (1 << 21)) {
if ((insn & (1 << 24)) == 0) {
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
if (op & 1)
- gen_helper_double_saturate(tmp, tmp);
+ gen_helper_double_saturate(tmp, cpu_env, tmp);
if (op & 2)
- gen_helper_sub_saturate(tmp, tmp2, tmp);
+ gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
else
- gen_helper_add_saturate(tmp, tmp, tmp2);
+ gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
} else {
tmp = load_reg(s, rn);
tcg_temp_free_i32(tmp2);
if (rs != 15) {
tmp2 = load_reg(s, rs);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
break;
* however it may overflow considered as a signed
* operation, in which case we must set the Q flag.
*/
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp2);
if (rs != 15)
{
tmp2 = load_reg(s, rs);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
break;
if (rs != 15)
{
tmp2 = load_reg(s, rs);
- gen_helper_add_setq(tmp, tmp, tmp2);
+ gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
}
break;
gen_helper_v7m_mrs(tmp, cpu_env, addr);
tcg_temp_free_i32(addr);
} else {
- gen_helper_cpsr_read(tmp);
+ gen_helper_cpsr_read(tmp, cpu_env);
}
store_reg(s, rd, tmp);
break;
if (op & 4) {
/* Unsigned. */
if ((op & 1) && shift == 0)
- gen_helper_usat16(tmp, tmp, tmp2);
+ gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_usat(tmp, tmp, tmp2);
+ gen_helper_usat(tmp, cpu_env, tmp, tmp2);
} else {
/* Signed. */
if ((op & 1) && shift == 0)
- gen_helper_ssat16(tmp, tmp, tmp2);
+ gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
else
- gen_helper_ssat(tmp, tmp, tmp2);
+ gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp2);
break;
if (s->condexec_mask)
tcg_gen_sub_i32(tmp, tmp, tmp2);
else
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
} else {
if (s->condexec_mask)
tcg_gen_add_i32(tmp, tmp, tmp2);
else
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
}
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
tcg_gen_movi_i32(tmp2, insn & 0xff);
switch (op) {
case 1: /* cmp */
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp);
tcg_temp_free_i32(tmp2);
break;
if (s->condexec_mask)
tcg_gen_add_i32(tmp, tmp, tmp2);
else
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
if (s->condexec_mask)
tcg_gen_sub_i32(tmp, tmp, tmp2);
else
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
store_reg(s, rd, tmp);
break;
case 1: /* cmp */
tmp = load_reg(s, rd);
tmp2 = load_reg(s, rm);
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
tcg_temp_free_i32(tmp2);
tcg_temp_free_i32(tmp);
break;
break;
case 0x2: /* lsl */
if (s->condexec_mask) {
- gen_helper_shl(tmp2, tmp2, tmp);
+ gen_helper_shl(tmp2, cpu_env, tmp2, tmp);
} else {
- gen_helper_shl_cc(tmp2, tmp2, tmp);
+ gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
gen_logic_CC(tmp2);
}
break;
case 0x3: /* lsr */
if (s->condexec_mask) {
- gen_helper_shr(tmp2, tmp2, tmp);
+ gen_helper_shr(tmp2, cpu_env, tmp2, tmp);
} else {
- gen_helper_shr_cc(tmp2, tmp2, tmp);
+ gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
gen_logic_CC(tmp2);
}
break;
case 0x4: /* asr */
if (s->condexec_mask) {
- gen_helper_sar(tmp2, tmp2, tmp);
+ gen_helper_sar(tmp2, cpu_env, tmp2, tmp);
} else {
- gen_helper_sar_cc(tmp2, tmp2, tmp);
+ gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
gen_logic_CC(tmp2);
}
break;
if (s->condexec_mask)
gen_adc(tmp, tmp2);
else
- gen_helper_adc_cc(tmp, tmp, tmp2);
+ gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
break;
case 0x6: /* sbc */
if (s->condexec_mask)
gen_sub_carry(tmp, tmp, tmp2);
else
- gen_helper_sbc_cc(tmp, tmp, tmp2);
+ gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
break;
case 0x7: /* ror */
if (s->condexec_mask) {
tcg_gen_andi_i32(tmp, tmp, 0x1f);
tcg_gen_rotr_i32(tmp2, tmp2, tmp);
} else {
- gen_helper_ror_cc(tmp2, tmp2, tmp);
+ gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
gen_logic_CC(tmp2);
}
break;
if (s->condexec_mask)
tcg_gen_neg_i32(tmp, tmp2);
else
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
break;
case 0xa: /* cmp */
- gen_helper_sub_cc(tmp, tmp, tmp2);
+ gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
rd = 16;
break;
case 0xb: /* cmn */
- gen_helper_add_cc(tmp, tmp, tmp2);
+ gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
rd = 16;
break;
case 0xc: /* orr */