uint32_t exception;
/* Routine used to access memory */
int mem_idx;
+ int access_type;
/* Translation flags */
-#if !defined(CONFIG_USER_ONLY)
- int supervisor;
-#endif
+ int le_mode;
#if defined(TARGET_PPC64)
int sf_mode;
#endif
#endif
}
-static always_inline void gen_set_access_type(int access_type)
+static always_inline void gen_set_access_type (DisasContext *ctx, int access_type)
{
- tcg_gen_movi_i32(cpu_access_type, access_type);
+ if (ctx->access_type != access_type) {
+ tcg_gen_movi_i32(cpu_access_type, access_type);
+ ctx->access_type = access_type;
+ }
}
static always_inline void gen_update_nip (DisasContext *ctx, target_ulong nip)
break;
#if !defined(CONFIG_USER_ONLY)
case 31:
- if (ctx->supervisor > 0) {
+ if (ctx->mem_idx > 0) {
/* Set process priority to very low */
prio = 1;
}
break;
case 5:
- if (ctx->supervisor > 0) {
+ if (ctx->mem_idx > 0) {
/* Set process priority to medium-hight */
prio = 5;
}
break;
case 3:
- if (ctx->supervisor > 0) {
+ if (ctx->mem_idx > 0) {
/* Set process priority to high */
prio = 6;
}
break;
case 7:
- if (ctx->supervisor > 1) {
+ if (ctx->mem_idx > 1) {
/* Set process priority to very high */
prio = 7;
}
/*** Addressing modes ***/
/* Register indirect with immediate index : EA = (rA|0) + SIMM */
-static always_inline void gen_addr_imm_index (TCGv EA,
- DisasContext *ctx,
- target_long maskl)
+static always_inline void gen_addr_imm_index (DisasContext *ctx, TCGv EA, target_long maskl)
{
target_long simm = SIMM(ctx->opcode);
simm &= ~maskl;
- if (rA(ctx->opcode) == 0)
+ if (rA(ctx->opcode) == 0) {
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_movi_tl(EA, (uint32_t)simm);
+ } else
+#endif
tcg_gen_movi_tl(EA, simm);
- else if (likely(simm != 0))
+ } else if (likely(simm != 0)) {
tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], simm);
- else
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+#endif
+ } else {
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ } else
+#endif
tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ }
}
-static always_inline void gen_addr_reg_index (TCGv EA,
- DisasContext *ctx)
+static always_inline void gen_addr_reg_index (DisasContext *ctx, TCGv EA)
{
- if (rA(ctx->opcode) == 0)
+ if (rA(ctx->opcode) == 0) {
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rB(ctx->opcode)]);
+ } else
+#endif
tcg_gen_mov_tl(EA, cpu_gpr[rB(ctx->opcode)]);
- else
+ } else {
tcg_gen_add_tl(EA, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+#endif
+ }
}
-static always_inline void gen_addr_register (TCGv EA,
- DisasContext *ctx)
+static always_inline void gen_addr_register (DisasContext *ctx, TCGv EA)
{
- if (rA(ctx->opcode) == 0)
+ if (rA(ctx->opcode) == 0) {
tcg_gen_movi_tl(EA, 0);
- else
- tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ } else {
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ } else
+#endif
+ tcg_gen_mov_tl(EA, cpu_gpr[rA(ctx->opcode)]);
+ }
+}
+
+static always_inline void gen_addr_add (DisasContext *ctx, TCGv ret, TCGv arg1, target_long val)
+{
+ tcg_gen_addi_tl(ret, arg1, val);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(ret, ret);
+ }
+#endif
}
static always_inline void gen_check_align (DisasContext *ctx, TCGv EA, int mask)
}
/*** Integer load ***/
+static always_inline void gen_qemu_ld8u(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_qemu_ld8u(arg1, arg2, ctx->mem_idx);
+}
+
+static always_inline void gen_qemu_ld8s(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_qemu_ld8s(arg1, arg2, ctx->mem_idx);
+}
+
+static always_inline void gen_qemu_ld16u(DisasContext *ctx, TCGv arg1, TCGv arg2)
+{
+ tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
+ if (unlikely(ctx->le_mode)) {
#if defined(TARGET_PPC64)
-#define GEN_QEMU_LD_PPC64(width) \
-static always_inline void gen_qemu_ld##width##_ppc64(TCGv t0, TCGv t1, int flags)\
-{ \
- if (likely(flags & 2)) \
- tcg_gen_qemu_ld##width(t0, t1, flags >> 2); \
- else { \
- TCGv addr = tcg_temp_new(); \
- tcg_gen_ext32u_tl(addr, t1); \
- tcg_gen_qemu_ld##width(t0, addr, flags >> 2); \
- tcg_temp_free(addr); \
- } \
-}
-GEN_QEMU_LD_PPC64(8u)
-GEN_QEMU_LD_PPC64(8s)
-GEN_QEMU_LD_PPC64(16u)
-GEN_QEMU_LD_PPC64(16s)
-GEN_QEMU_LD_PPC64(32u)
-GEN_QEMU_LD_PPC64(32s)
-GEN_QEMU_LD_PPC64(64)
-
-#define GEN_QEMU_ST_PPC64(width) \
-static always_inline void gen_qemu_st##width##_ppc64(TCGv t0, TCGv t1, int flags)\
-{ \
- if (likely(flags & 2)) \
- tcg_gen_qemu_st##width(t0, t1, flags >> 2); \
- else { \
- TCGv addr = tcg_temp_new(); \
- tcg_gen_ext32u_tl(addr, t1); \
- tcg_gen_qemu_st##width(t0, addr, flags >> 2); \
- tcg_temp_free(addr); \
- } \
-}
-GEN_QEMU_ST_PPC64(8)
-GEN_QEMU_ST_PPC64(16)
-GEN_QEMU_ST_PPC64(32)
-GEN_QEMU_ST_PPC64(64)
-
-static always_inline void gen_qemu_ld8u(TCGv arg0, TCGv arg1, int flags)
-{
- gen_qemu_ld8u_ppc64(arg0, arg1, flags);
-}
-
-static always_inline void gen_qemu_ld8s(TCGv arg0, TCGv arg1, int flags)
-{
- gen_qemu_ld8s_ppc64(arg0, arg1, flags);
-}
-
-static always_inline void gen_qemu_ld16u(TCGv arg0, TCGv arg1, int flags)
-{
- if (unlikely(flags & 1)) {
- TCGv_i32 t0;
- gen_qemu_ld16u_ppc64(arg0, arg1, flags);
- t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, arg0);
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, arg1);
tcg_gen_bswap16_i32(t0, t0);
- tcg_gen_extu_i32_tl(arg0, t0);
+ tcg_gen_extu_i32_tl(arg1, t0);
tcg_temp_free_i32(t0);
- } else
- gen_qemu_ld16u_ppc64(arg0, arg1, flags);
+#else
+ tcg_gen_bswap16_i32(arg1, arg1);
+#endif
+ }
}
-static always_inline void gen_qemu_ld16s(TCGv arg0, TCGv arg1, int flags)
+static always_inline void gen_qemu_ld16s(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- if (unlikely(flags & 1)) {
+ if (unlikely(ctx->le_mode)) {
+#if defined(TARGET_PPC64)
TCGv_i32 t0;
- gen_qemu_ld16u_ppc64(arg0, arg1, flags);
+ tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, arg0);
+ tcg_gen_trunc_tl_i32(t0, arg1);
tcg_gen_bswap16_i32(t0, t0);
- tcg_gen_extu_i32_tl(arg0, t0);
- tcg_gen_ext16s_tl(arg0, arg0);
+ tcg_gen_extu_i32_tl(arg1, t0);
+ tcg_gen_ext16s_tl(arg1, arg1);
tcg_temp_free_i32(t0);
- } else
- gen_qemu_ld16s_ppc64(arg0, arg1, flags);
+#else
+ tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
+ tcg_gen_bswap16_i32(arg1, arg1);
+ tcg_gen_ext16s_i32(arg1, arg1);
+#endif
+ } else {
+ tcg_gen_qemu_ld16s(arg1, arg2, ctx->mem_idx);
+ }
}
-static always_inline void gen_qemu_ld32u(TCGv arg0, TCGv arg1, int flags)
+static always_inline void gen_qemu_ld32u(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- if (unlikely(flags & 1)) {
- TCGv_i32 t0;
- gen_qemu_ld32u_ppc64(arg0, arg1, flags);
- t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, arg0);
+ tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
+ if (unlikely(ctx->le_mode)) {
+#if defined(TARGET_PPC64)
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, arg1);
tcg_gen_bswap_i32(t0, t0);
- tcg_gen_extu_i32_tl(arg0, t0);
+ tcg_gen_extu_i32_tl(arg1, t0);
tcg_temp_free_i32(t0);
- } else
- gen_qemu_ld32u_ppc64(arg0, arg1, flags);
+#else
+ tcg_gen_bswap_i32(arg1, arg1);
+#endif
+ }
}
-static always_inline void gen_qemu_ld32s(TCGv arg0, TCGv arg1, int flags)
+#if defined(TARGET_PPC64)
+static always_inline void gen_qemu_ld32s(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- if (unlikely(flags & 1)) {
+ if (unlikely(ctx->mem_idx)) {
TCGv_i32 t0;
- gen_qemu_ld32u_ppc64(arg0, arg1, flags);
+ tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, arg0);
+ tcg_gen_trunc_tl_i32(t0, arg1);
tcg_gen_bswap_i32(t0, t0);
- tcg_gen_ext_i32_tl(arg0, t0);
+ tcg_gen_ext_i32_tl(arg1, t0);
tcg_temp_free_i32(t0);
} else
- gen_qemu_ld32s_ppc64(arg0, arg1, flags);
+ tcg_gen_qemu_ld32s(arg1, arg2, ctx->mem_idx);
}
+#endif
-static always_inline void gen_qemu_ld64(TCGv arg0, TCGv arg1, int flags)
+static always_inline void gen_qemu_ld64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
{
- gen_qemu_ld64_ppc64(arg0, arg1, flags);
- if (unlikely(flags & 1))
- tcg_gen_bswap_i64(arg0, arg0);
+ tcg_gen_qemu_ld64(arg1, arg2, ctx->mem_idx);
+ if (unlikely(ctx->le_mode)) {
+ tcg_gen_bswap_i64(arg1, arg1);
+ }
}
-static always_inline void gen_qemu_st8(TCGv arg0, TCGv arg1, int flags)
+static always_inline void gen_qemu_st8(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- gen_qemu_st8_ppc64(arg0, arg1, flags);
+ tcg_gen_qemu_st8(arg1, arg2, ctx->mem_idx);
}
-static always_inline void gen_qemu_st16(TCGv arg0, TCGv arg1, int flags)
+static always_inline void gen_qemu_st16(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- if (unlikely(flags & 1)) {
+ if (unlikely(ctx->le_mode)) {
+#if defined(TARGET_PPC64)
TCGv_i32 t0;
- TCGv_i64 t1;
+ TCGv t1;
t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, arg0);
+ tcg_gen_trunc_tl_i32(t0, arg1);
tcg_gen_ext16u_i32(t0, t0);
tcg_gen_bswap16_i32(t0, t0);
- t1 = tcg_temp_new_i64();
+ t1 = tcg_temp_new();
tcg_gen_extu_i32_tl(t1, t0);
tcg_temp_free_i32(t0);
- gen_qemu_st16_ppc64(t1, arg1, flags);
- tcg_temp_free_i64(t1);
- } else
- gen_qemu_st16_ppc64(arg0, arg1, flags);
+ tcg_gen_qemu_st16(t1, arg2, ctx->mem_idx);
+ tcg_temp_free(t1);
+#else
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext16u_tl(t0, arg1);
+ tcg_gen_bswap16_i32(t0, t0);
+ tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx);
+ tcg_temp_free(t0);
+#endif
+ } else {
+ tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx);
+ }
}
-static always_inline void gen_qemu_st32(TCGv arg0, TCGv arg1, int flags)
+static always_inline void gen_qemu_st32(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- if (unlikely(flags & 1)) {
+ if (unlikely(ctx->le_mode)) {
+#if defined(TARGET_PPC64)
TCGv_i32 t0;
- TCGv_i64 t1;
+ TCGv t1;
t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, arg0);
+ tcg_gen_trunc_tl_i32(t0, arg1);
tcg_gen_bswap_i32(t0, t0);
- t1 = tcg_temp_new_i64();
+ t1 = tcg_temp_new();
tcg_gen_extu_i32_tl(t1, t0);
tcg_temp_free_i32(t0);
- gen_qemu_st32_ppc64(t1, arg1, flags);
- tcg_temp_free_i64(t1);
- } else
- gen_qemu_st32_ppc64(arg0, arg1, flags);
+ tcg_gen_qemu_st32(t1, arg2, ctx->mem_idx);
+ tcg_temp_free(t1);
+#else
+ TCGv t0 = tcg_temp_new_i32();
+ tcg_gen_bswap_i32(t0, arg1);
+ tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx);
+ tcg_temp_free(t0);
+#endif
+ } else {
+ tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx);
+ }
}
-static always_inline void gen_qemu_st64(TCGv arg0, TCGv arg1, int flags)
+static always_inline void gen_qemu_st64(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
{
- if (unlikely(flags & 1)) {
+ if (unlikely(ctx->le_mode)) {
TCGv_i64 t0 = tcg_temp_new_i64();
- tcg_gen_bswap_i64(t0, arg0);
- gen_qemu_st64_ppc64(t0, arg1, flags);
+ tcg_gen_bswap_i64(t0, arg1);
+ tcg_gen_qemu_st64(t0, arg2, ctx->mem_idx);
tcg_temp_free_i64(t0);
} else
- gen_qemu_st64_ppc64(arg0, arg1, flags);
-}
-
-
-#else /* defined(TARGET_PPC64) */
-#define GEN_QEMU_LD_PPC32(width) \
-static always_inline void gen_qemu_ld##width##_ppc32(TCGv arg0, TCGv arg1, int flags) \
-{ \
- tcg_gen_qemu_ld##width(arg0, arg1, flags >> 1); \
-}
-GEN_QEMU_LD_PPC32(8u)
-GEN_QEMU_LD_PPC32(8s)
-GEN_QEMU_LD_PPC32(16u)
-GEN_QEMU_LD_PPC32(16s)
-GEN_QEMU_LD_PPC32(32u)
-GEN_QEMU_LD_PPC32(32s)
-static always_inline void gen_qemu_ld64_ppc32(TCGv_i64 arg0, TCGv arg1, int flags)
-{
- tcg_gen_qemu_ld64(arg0, arg1, flags >> 1);
-}
-
-#define GEN_QEMU_ST_PPC32(width) \
-static always_inline void gen_qemu_st##width##_ppc32(TCGv arg0, TCGv arg1, int flags) \
-{ \
- tcg_gen_qemu_st##width(arg0, arg1, flags >> 1); \
-}
-GEN_QEMU_ST_PPC32(8)
-GEN_QEMU_ST_PPC32(16)
-GEN_QEMU_ST_PPC32(32)
-static always_inline void gen_qemu_st64_ppc32(TCGv_i64 arg0, TCGv arg1, int flags)
-{
- tcg_gen_qemu_st64(arg0, arg1, flags >> 1);
-}
-
-static always_inline void gen_qemu_ld8u(TCGv arg0, TCGv arg1, int flags)
-{
- gen_qemu_ld8u_ppc32(arg0, arg1, flags >> 1);
-}
-
-static always_inline void gen_qemu_ld8s(TCGv arg0, TCGv arg1, int flags)
-{
- gen_qemu_ld8s_ppc32(arg0, arg1, flags >> 1);
-}
-
-static always_inline void gen_qemu_ld16u(TCGv arg0, TCGv arg1, int flags)
-{
- gen_qemu_ld16u_ppc32(arg0, arg1, flags >> 1);
- if (unlikely(flags & 1))
- tcg_gen_bswap16_i32(arg0, arg0);
-}
-
-static always_inline void gen_qemu_ld16s(TCGv arg0, TCGv arg1, int flags)
-{
- if (unlikely(flags & 1)) {
- gen_qemu_ld16u_ppc32(arg0, arg1, flags);
- tcg_gen_bswap16_i32(arg0, arg0);
- tcg_gen_ext16s_i32(arg0, arg0);
- } else
- gen_qemu_ld16s_ppc32(arg0, arg1, flags);
-}
-
-static always_inline void gen_qemu_ld32u(TCGv arg0, TCGv arg1, int flags)
-{
- gen_qemu_ld32u_ppc32(arg0, arg1, flags);
- if (unlikely(flags & 1))
- tcg_gen_bswap_i32(arg0, arg0);
-}
-
-static always_inline void gen_qemu_ld64(TCGv_i64 arg0, TCGv arg1, int flags)
-{
- gen_qemu_ld64_ppc32(arg0, arg1, flags);
- if (unlikely(flags & 1))
- tcg_gen_bswap_i64(arg0, arg0);
-}
-
-static always_inline void gen_qemu_st8(TCGv arg0, TCGv arg1, int flags)
-{
- gen_qemu_st8_ppc32(arg0, arg1, flags);
-}
-
-static always_inline void gen_qemu_st16(TCGv arg0, TCGv arg1, int flags)
-{
- if (unlikely(flags & 1)) {
- TCGv_i32 temp = tcg_temp_new_i32();
- tcg_gen_ext16u_i32(temp, arg0);
- tcg_gen_bswap16_i32(temp, temp);
- gen_qemu_st16_ppc32(temp, arg1, flags);
- tcg_temp_free_i32(temp);
- } else
- gen_qemu_st16_ppc32(arg0, arg1, flags);
-}
-
-static always_inline void gen_qemu_st32(TCGv arg0, TCGv arg1, int flags)
-{
- if (unlikely(flags & 1)) {
- TCGv_i32 temp = tcg_temp_new_i32();
- tcg_gen_bswap_i32(temp, arg0);
- gen_qemu_st32_ppc32(temp, arg1, flags);
- tcg_temp_free_i32(temp);
- } else
- gen_qemu_st32_ppc32(arg0, arg1, flags);
+ tcg_gen_qemu_st64(arg1, arg2, ctx->mem_idx);
}
-static always_inline void gen_qemu_st64(TCGv_i64 arg0, TCGv arg1, int flags)
-{
- if (unlikely(flags & 1)) {
- TCGv_i64 temp = tcg_temp_new_i64();
- tcg_gen_bswap_i64(temp, arg0);
- gen_qemu_st64_ppc32(temp, arg1, flags);
- tcg_temp_free_i64(temp);
- } else
- gen_qemu_st64_ppc32(arg0, arg1, flags);
-}
-#endif
-
#define GEN_LD(name, ldop, opc, type) \
GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
- TCGv EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##ldop(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ TCGv EA; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_EXCP_INVAL(ctx); \
return; \
} \
+ gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
if (type == PPC_64B) \
- gen_addr_imm_index(EA, ctx, 0x03); \
+ gen_addr_imm_index(ctx, EA, 0x03); \
else \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##ldop(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_EXCP_INVAL(ctx); \
return; \
} \
+ gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##ldop(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
#define GEN_LDX(name, ldop, opc2, opc3, type) \
GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
- TCGv EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##ldop(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ TCGv EA; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_gpr[rD(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
return;
}
}
+ gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
- gen_set_access_type(ACCESS_INT);
- gen_addr_imm_index(EA, ctx, 0x03);
+ gen_addr_imm_index(ctx, EA, 0x03);
if (ctx->opcode & 0x02) {
/* lwa (lwau is undefined) */
- gen_qemu_ld32s(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx);
+ gen_qemu_ld32s(ctx, cpu_gpr[rD(ctx->opcode)], EA);
} else {
/* ld - ldu */
- gen_qemu_ld64(cpu_gpr[rD(ctx->opcode)], EA, ctx->mem_idx);
+ gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], EA);
}
if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
TCGv EA;
/* Restore CPU state */
- if (unlikely(ctx->supervisor == 0)) {
+ if (unlikely(ctx->mem_idx == 0)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
GEN_EXCP_INVAL(ctx);
return;
}
- if (unlikely(ctx->mem_idx & 1)) {
+ if (unlikely(ctx->le_mode)) {
/* Little-endian mode is not handled */
GEN_EXCP(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
return;
}
+ gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
- gen_set_access_type(ACCESS_INT);
- gen_addr_imm_index(EA, ctx, 0x0F);
- gen_qemu_ld64(cpu_gpr[rd], EA, ctx->mem_idx);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_ld64(cpu_gpr[rd+1], EA, ctx->mem_idx);
+ gen_addr_imm_index(ctx, EA, 0x0F);
+ gen_qemu_ld64(ctx, cpu_gpr[rd], EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ gen_qemu_ld64(ctx, cpu_gpr[rd+1], EA);
tcg_temp_free(EA);
#endif
}
#define GEN_ST(name, stop, opc, type) \
GEN_HANDLER(name, opc, 0xFF, 0xFF, 0x00000000, type) \
{ \
- TCGv EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##stop(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ TCGv EA; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_EXCP_INVAL(ctx); \
return; \
} \
+ gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
if (type == PPC_64B) \
- gen_addr_imm_index(EA, ctx, 0x03); \
+ gen_addr_imm_index(ctx, EA, 0x03); \
else \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##stop(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_EXCP_INVAL(ctx); \
return; \
} \
+ gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##stop(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
#define GEN_STX(name, stop, opc2, opc3, type) \
GEN_HANDLER(name##x, 0x1F, opc2, opc3, 0x00000001, type) \
{ \
- TCGv EA = tcg_temp_new(); \
- gen_set_access_type(ACCESS_INT); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##stop(cpu_gpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ TCGv EA; \
+ gen_set_access_type(ctx, ACCESS_INT); \
+ EA = tcg_temp_new(); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_gpr[rS(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_EXCP_PRIVOPC(ctx);
#else
/* stq */
- if (unlikely(ctx->supervisor == 0)) {
+ if (unlikely(ctx->mem_idx == 0)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
GEN_EXCP_INVAL(ctx);
return;
}
- if (unlikely(ctx->mem_idx & 1)) {
+ if (unlikely(ctx->le_mode)) {
/* Little-endian mode is not handled */
GEN_EXCP(ctx, POWERPC_EXCP_ALIGN, POWERPC_EXCP_ALIGN_LE);
return;
}
+ gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
- gen_set_access_type(ACCESS_INT);
- gen_addr_imm_index(EA, ctx, 0x03);
- gen_qemu_st64(cpu_gpr[rs], EA, ctx->mem_idx);
- tcg_gen_addi_tl(EA, EA, 8);
- gen_qemu_st64(cpu_gpr[rs+1], EA, ctx->mem_idx);
+ gen_addr_imm_index(ctx, EA, 0x03);
+ gen_qemu_st64(ctx, cpu_gpr[rs], EA);
+ gen_addr_add(ctx, EA, EA, 8);
+ gen_qemu_st64(ctx, cpu_gpr[rs+1], EA);
tcg_temp_free(EA);
#endif
} else {
return;
}
}
+ gen_set_access_type(ctx, ACCESS_INT);
EA = tcg_temp_new();
- gen_set_access_type(ACCESS_INT);
- gen_addr_imm_index(EA, ctx, 0x03);
- gen_qemu_st64(cpu_gpr[rs], EA, ctx->mem_idx);
+ gen_addr_imm_index(ctx, EA, 0x03);
+ gen_qemu_st64(ctx, cpu_gpr[rs], EA);
if (Rc(ctx->opcode))
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA);
tcg_temp_free(EA);
#endif
/*** Integer load and store with byte reverse ***/
/* lhbrx */
-static void always_inline gen_qemu_ld16ur(TCGv t0, TCGv t1, int flags)
+static void always_inline gen_qemu_ld16ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- TCGv_i32 temp = tcg_temp_new_i32();
- gen_qemu_ld16u(t0, t1, flags);
- tcg_gen_trunc_tl_i32(temp, t0);
- tcg_gen_bswap16_i32(temp, temp);
- tcg_gen_extu_i32_tl(t0, temp);
- tcg_temp_free_i32(temp);
+ tcg_gen_qemu_ld16u(arg1, arg2, ctx->mem_idx);
+ if (likely(!ctx->le_mode)) {
+#if defined(TARGET_PPC64)
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, arg1);
+ tcg_gen_bswap16_i32(t0, t0);
+ tcg_gen_extu_i32_tl(arg1, t0);
+ tcg_temp_free_i32(t0);
+#else
+ tcg_gen_bswap16_i32(arg1, arg1);
+#endif
+ }
}
GEN_LDX(lhbr, ld16ur, 0x16, 0x18, PPC_INTEGER);
/* lwbrx */
-static void always_inline gen_qemu_ld32ur(TCGv t0, TCGv t1, int flags)
+static void always_inline gen_qemu_ld32ur(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- TCGv_i32 temp = tcg_temp_new_i32();
- gen_qemu_ld32u(t0, t1, flags);
- tcg_gen_trunc_tl_i32(temp, t0);
- tcg_gen_bswap_i32(temp, temp);
- tcg_gen_extu_i32_tl(t0, temp);
- tcg_temp_free_i32(temp);
+ tcg_gen_qemu_ld32u(arg1, arg2, ctx->mem_idx);
+ if (likely(!ctx->le_mode)) {
+#if defined(TARGET_PPC64)
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, arg1);
+ tcg_gen_bswap_i32(t0, t0);
+ tcg_gen_extu_i32_tl(arg1, t0);
+ tcg_temp_free_i32(t0);
+#else
+ tcg_gen_bswap_i32(arg1, arg1);
+#endif
+ }
}
GEN_LDX(lwbr, ld32ur, 0x16, 0x10, PPC_INTEGER);
/* sthbrx */
-static void always_inline gen_qemu_st16r(TCGv t0, TCGv t1, int flags)
+static void always_inline gen_qemu_st16r(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- TCGv_i32 temp = tcg_temp_new_i32();
- TCGv t2 = tcg_temp_new();
- tcg_gen_trunc_tl_i32(temp, t0);
- tcg_gen_ext16u_i32(temp, temp);
- tcg_gen_bswap16_i32(temp, temp);
- tcg_gen_extu_i32_tl(t2, temp);
- tcg_temp_free_i32(temp);
- gen_qemu_st16(t2, t1, flags);
- tcg_temp_free(t2);
+ if (likely(!ctx->le_mode)) {
+#if defined(TARGET_PPC64)
+ TCGv_i32 t0;
+ TCGv t1;
+ t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, arg1);
+ tcg_gen_ext16u_i32(t0, t0);
+ tcg_gen_bswap16_i32(t0, t0);
+ t1 = tcg_temp_new();
+ tcg_gen_extu_i32_tl(t1, t0);
+ tcg_temp_free_i32(t0);
+ tcg_gen_qemu_st16(t1, arg2, ctx->mem_idx);
+ tcg_temp_free(t1);
+#else
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_ext16u_tl(t0, arg1);
+ tcg_gen_bswap16_i32(t0, t0);
+ tcg_gen_qemu_st16(t0, arg2, ctx->mem_idx);
+ tcg_temp_free(t0);
+#endif
+ } else {
+ tcg_gen_qemu_st16(arg1, arg2, ctx->mem_idx);
+ }
}
GEN_STX(sthbr, st16r, 0x16, 0x1C, PPC_INTEGER);
/* stwbrx */
-static void always_inline gen_qemu_st32r(TCGv t0, TCGv t1, int flags)
+static void always_inline gen_qemu_st32r(DisasContext *ctx, TCGv arg1, TCGv arg2)
{
- TCGv_i32 temp = tcg_temp_new_i32();
- TCGv t2 = tcg_temp_new();
- tcg_gen_trunc_tl_i32(temp, t0);
- tcg_gen_bswap_i32(temp, temp);
- tcg_gen_extu_i32_tl(t2, temp);
- tcg_temp_free_i32(temp);
- gen_qemu_st32(t2, t1, flags);
- tcg_temp_free(t2);
+ if (likely(!ctx->le_mode)) {
+#if defined(TARGET_PPC64)
+ TCGv_i32 t0;
+ TCGv t1;
+ t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, arg1);
+ tcg_gen_bswap_i32(t0, t0);
+ t1 = tcg_temp_new();
+ tcg_gen_extu_i32_tl(t1, t0);
+ tcg_temp_free_i32(t0);
+ tcg_gen_qemu_st32(t1, arg2, ctx->mem_idx);
+ tcg_temp_free(t1);
+#else
+ TCGv t0 = tcg_temp_new_i32();
+ tcg_gen_bswap_i32(t0, arg1);
+ tcg_gen_qemu_st32(t0, arg2, ctx->mem_idx);
+ tcg_temp_free(t0);
+#endif
+ } else {
+ tcg_gen_qemu_st32(arg1, arg2, ctx->mem_idx);
+ }
}
GEN_STX(stwbr, st32r, 0x16, 0x14, PPC_INTEGER);
/* lmw */
GEN_HANDLER(lmw, 0x2E, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
{
- TCGv t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode));
+ TCGv t0;
+ TCGv_i32 t1;
+ gen_set_access_type(ctx, ACCESS_INT);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_imm_index(t0, ctx, 0);
+ t0 = tcg_temp_new();
+ t1 = tcg_const_i32(rD(ctx->opcode));
+ gen_addr_imm_index(ctx, t0, 0);
gen_helper_lmw(t0, t1);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
/* stmw */
GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
{
- TCGv t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_const_i32(rS(ctx->opcode));
+ TCGv t0;
+ TCGv_i32 t1;
+ gen_set_access_type(ctx, ACCESS_INT);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_imm_index(t0, ctx, 0);
+ t0 = tcg_temp_new();
+ t1 = tcg_const_i32(rS(ctx->opcode));
+ gen_addr_imm_index(ctx, t0, 0);
gen_helper_stmw(t0, t1);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_LSWX);
return;
}
+ gen_set_access_type(ctx, ACCESS_INT);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
t0 = tcg_temp_new();
- gen_addr_register(t0, ctx);
+ gen_addr_register(ctx, t0);
t1 = tcg_const_i32(nb);
t2 = tcg_const_i32(start);
gen_helper_lsw(t0, t1, t2);
/* lswx */
GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING)
{
- TCGv t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode));
- TCGv_i32 t2 = tcg_const_i32(rA(ctx->opcode));
- TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode));
+ TCGv t0;
+ TCGv_i32 t1, t2, t3;
+ gen_set_access_type(ctx, ACCESS_INT);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(t0, ctx);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ t1 = tcg_const_i32(rD(ctx->opcode));
+ t2 = tcg_const_i32(rA(ctx->opcode));
+ t3 = tcg_const_i32(rB(ctx->opcode));
gen_helper_lswx(t0, t1, t2, t3);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
/* stswi */
GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING)
{
+ TCGv t0;
+ TCGv_i32 t1, t2;
int nb = NB(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- TCGv_i32 t1;
- TCGv_i32 t2 = tcg_const_i32(rS(ctx->opcode));
+ gen_set_access_type(ctx, ACCESS_INT);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_register(t0, ctx);
+ t0 = tcg_temp_new();
+ gen_addr_register(ctx, t0);
if (nb == 0)
nb = 32;
t1 = tcg_const_i32(nb);
+ t2 = tcg_const_i32(rS(ctx->opcode));
gen_helper_stsw(t0, t1, t2);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
/* stswx */
GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING)
{
- TCGv t0 = tcg_temp_new();
- TCGv_i32 t1 = tcg_temp_new_i32();
- TCGv_i32 t2 = tcg_const_i32(rS(ctx->opcode));
+ TCGv t0;
+ TCGv_i32 t1, t2;
+ gen_set_access_type(ctx, ACCESS_INT);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(t0, ctx);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ t1 = tcg_temp_new_i32();
tcg_gen_trunc_tl_i32(t1, cpu_xer);
tcg_gen_andi_i32(t1, t1, 0x7F);
+ t2 = tcg_const_i32(rS(ctx->opcode));
gen_helper_stsw(t0, t1, t2);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
/* lwarx */
GEN_HANDLER(lwarx, 0x1F, 0x14, 0x00, 0x00000001, PPC_RES)
{
- TCGv t0 = tcg_temp_local_new();
- gen_set_access_type(ACCESS_RES);
- gen_addr_reg_index(t0, ctx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_RES);
+ t0 = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, t0);
gen_check_align(ctx, t0, 0x03);
-#if defined(TARGET_PPC64)
- if (!ctx->sf_mode)
- tcg_gen_ext32u_tl(t0, t0);
-#endif
- gen_qemu_ld32u(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx);
+ gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], t0);
tcg_gen_mov_tl(cpu_reserve, t0);
tcg_temp_free(t0);
}
/* stwcx. */
GEN_HANDLER2(stwcx_, "stwcx.", 0x1F, 0x16, 0x04, 0x00000000, PPC_RES)
{
- int l1 = gen_new_label();
- TCGv t0 = tcg_temp_local_new();
- gen_set_access_type(ACCESS_RES);
- gen_addr_reg_index(t0, ctx);
+ int l1;
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_RES);
+ t0 = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, t0);
gen_check_align(ctx, t0, 0x03);
-#if defined(TARGET_PPC64)
- if (!ctx->sf_mode)
- tcg_gen_ext32u_tl(t0, t0);
-#endif
tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_xer);
tcg_gen_shri_i32(cpu_crf[0], cpu_crf[0], XER_SO);
tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 1);
+ l1 = gen_new_label();
tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
- gen_qemu_st32(cpu_gpr[rS(ctx->opcode)], t0, ctx->mem_idx);
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], t0);
gen_set_label(l1);
tcg_gen_movi_tl(cpu_reserve, -1);
tcg_temp_free(t0);
/* ldarx */
GEN_HANDLER(ldarx, 0x1F, 0x14, 0x02, 0x00000001, PPC_64B)
{
- TCGv t0 = tcg_temp_local_new();
- gen_set_access_type(ACCESS_RES);
- gen_addr_reg_index(t0, ctx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_RES);
+ t0 = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, t0);
gen_check_align(ctx, t0, 0x07);
- if (!ctx->sf_mode)
- tcg_gen_ext32u_tl(t0, t0);
- gen_qemu_ld64(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx);
+ gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], t0);
tcg_gen_mov_tl(cpu_reserve, t0);
tcg_temp_free(t0);
}
/* stdcx. */
GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B)
{
- int l1 = gen_new_label();
- TCGv t0 = tcg_temp_local_new();
- gen_set_access_type(ACCESS_RES);
- gen_addr_reg_index(t0, ctx);
+ int l1;
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_RES);
+ t0 = tcg_temp_local_new();
+ gen_addr_reg_index(ctx, t0);
gen_check_align(ctx, t0, 0x07);
- if (!ctx->sf_mode)
- tcg_gen_ext32u_tl(t0, t0);
tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_xer);
tcg_gen_shri_i32(cpu_crf[0], cpu_crf[0], XER_SO);
tcg_gen_andi_i32(cpu_crf[0], cpu_crf[0], 1);
+ l1 = gen_new_label();
tcg_gen_brcond_tl(TCG_COND_NE, t0, cpu_reserve, l1);
tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], 1 << CRF_EQ);
- gen_qemu_st64(cpu_gpr[rS(ctx->opcode)], t0, ctx->mem_idx);
+ gen_qemu_st64(ctx, cpu_gpr[rS(ctx->opcode)], t0);
gen_set_label(l1);
tcg_gen_movi_tl(cpu_reserve, -1);
tcg_temp_free(t0);
GEN_EXCP_NO_FP(ctx); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_EXCP_INVAL(ctx); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_EXCP_INVAL(ctx); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_EXCP_NO_FP(ctx); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##ldop(cpu_fpr[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##ldop(ctx, cpu_fpr[rD(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_LDUXF(name, ldop, op | 0x01, type); \
GEN_LDXF(name, ldop, 0x17, op | 0x00, type)
-static always_inline void gen_qemu_ld32fs(TCGv_i64 arg1, TCGv arg2, int flags)
+static always_inline void gen_qemu_ld32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
{
TCGv t0 = tcg_temp_new();
TCGv_i32 t1 = tcg_temp_new_i32();
- gen_qemu_ld32u(t0, arg2, flags);
+ gen_qemu_ld32u(ctx, t0, arg2);
tcg_gen_trunc_tl_i32(t1, t0);
tcg_temp_free(t0);
gen_helper_float32_to_float64(arg1, t1);
GEN_EXCP_NO_FP(ctx); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_EXCP_INVAL(ctx); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_imm_index(EA, ctx, 0); \
- gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_imm_index(ctx, EA, 0); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_EXCP_INVAL(ctx); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
tcg_gen_mov_tl(cpu_gpr[rA(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_EXCP_NO_FP(ctx); \
return; \
} \
- gen_set_access_type(ACCESS_FLOAT); \
+ gen_set_access_type(ctx, ACCESS_FLOAT); \
EA = tcg_temp_new(); \
- gen_addr_reg_index(EA, ctx); \
- gen_qemu_##stop(cpu_fpr[rS(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_addr_reg_index(ctx, EA); \
+ gen_qemu_##stop(ctx, cpu_fpr[rS(ctx->opcode)], EA); \
tcg_temp_free(EA); \
}
GEN_STUXF(name, stop, op | 0x01, type); \
GEN_STXF(name, stop, 0x17, op | 0x00, type)
-static always_inline void gen_qemu_st32fs(TCGv_i64 arg1, TCGv arg2, int flags)
+static always_inline void gen_qemu_st32fs(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
{
TCGv_i32 t0 = tcg_temp_new_i32();
TCGv t1 = tcg_temp_new();
gen_helper_float64_to_float32(t0, arg1);
tcg_gen_extu_i32_tl(t1, t0);
tcg_temp_free_i32(t0);
- gen_qemu_st32(t1, arg2, flags);
+ gen_qemu_st32(ctx, t1, arg2);
tcg_temp_free(t1);
}
GEN_STFS(stfs, st32fs, 0x14, PPC_FLOAT);
/* Optional: */
-static always_inline void gen_qemu_st32fiw(TCGv_i64 arg1, TCGv arg2, int flags)
+static always_inline void gen_qemu_st32fiw(DisasContext *ctx, TCGv_i64 arg1, TCGv arg2)
{
TCGv t0 = tcg_temp_new();
tcg_gen_trunc_i64_tl(t0, arg1),
- gen_qemu_st32(t0, arg2, flags);
+ gen_qemu_st32(ctx, t0, arg2);
tcg_temp_free(t0);
}
/* stfiwx */
}
/*** System linkage ***/
-/* rfi (supervisor only) */
+/* rfi (mem_idx only) */
GEN_HANDLER(rfi, 0x13, 0x12, 0x01, 0x03FF8001, PPC_FLOW)
{
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
/* Restore CPU state */
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
GEN_EXCP_PRIVOPC(ctx);
#else
/* Restore CPU state */
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
GEN_EXCP_PRIVOPC(ctx);
#else
/* Restore CPU state */
- if (unlikely(ctx->supervisor <= 1)) {
+ if (unlikely(ctx->mem_idx <= 1)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVREG(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
uint32_t sprn = SPR(ctx->opcode);
#if !defined(CONFIG_USER_ONLY)
- if (ctx->supervisor == 2)
+ if (ctx->mem_idx == 2)
read_cb = ctx->spr_cb[sprn].hea_read;
- else if (ctx->supervisor)
+ else if (ctx->mem_idx)
read_cb = ctx->spr_cb[sprn].oea_read;
else
#endif
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVREG(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVREG(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
uint32_t sprn = SPR(ctx->opcode);
#if !defined(CONFIG_USER_ONLY)
- if (ctx->supervisor == 2)
+ if (ctx->mem_idx == 2)
write_cb = ctx->spr_cb[sprn].hea_write;
- else if (ctx->supervisor)
+ else if (ctx->mem_idx)
write_cb = ctx->spr_cb[sprn].oea_write;
else
#endif
GEN_HANDLER(dcbf, 0x1F, 0x16, 0x02, 0x03C00001, PPC_CACHE)
{
/* XXX: specification says this is treated as a load by the MMU */
- TCGv t0 = tcg_temp_new();
- gen_set_access_type(ACCESS_CACHE);
- gen_addr_reg_index(t0, ctx);
- gen_qemu_ld8u(t0, t0, ctx->mem_idx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld8u(ctx, t0, t0);
tcg_temp_free(t0);
}
GEN_EXCP_PRIVOPC(ctx);
#else
TCGv EA, val;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
EA = tcg_temp_new();
- gen_set_access_type(ACCESS_CACHE);
- gen_addr_reg_index(EA, ctx);
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ gen_addr_reg_index(ctx, EA);
val = tcg_temp_new();
/* XXX: specification says this should be treated as a store by the MMU */
- gen_qemu_ld8u(val, EA, ctx->mem_idx);
- gen_qemu_st8(val, EA, ctx->mem_idx);
+ gen_qemu_ld8u(ctx, val, EA);
+ gen_qemu_st8(ctx, val, EA);
tcg_temp_free(val);
tcg_temp_free(EA);
#endif
GEN_HANDLER(dcbst, 0x1F, 0x16, 0x01, 0x03E00001, PPC_CACHE)
{
/* XXX: specification say this is treated as a load by the MMU */
- TCGv t0 = tcg_temp_new();
- gen_set_access_type(ACCESS_CACHE);
- gen_addr_reg_index(t0, ctx);
- gen_qemu_ld8u(t0, t0, ctx->mem_idx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld8u(ctx, t0, t0);
tcg_temp_free(t0);
}
/* dcbz */
GEN_HANDLER(dcbz, 0x1F, 0x16, 0x1F, 0x03E00001, PPC_CACHE_DCBZ)
{
- TCGv t0 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
gen_helper_dcbz(t0);
tcg_temp_free(t0);
}
GEN_HANDLER2(dcbz_970, "dcbz", 0x1F, 0x16, 0x1F, 0x03C00001, PPC_CACHE_DCBZT)
{
- TCGv t0 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
if (ctx->opcode & 0x00200000)
gen_helper_dcbz(t0);
else
/* icbi */
GEN_HANDLER(icbi, 0x1F, 0x16, 0x1E, 0x03E00001, PPC_CACHE_ICBI)
{
- TCGv t0 = tcg_temp_new();
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_CACHE);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
- gen_addr_reg_index(t0, ctx);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
gen_helper_icbi(t0);
tcg_temp_free(t0);
}
GEN_EXCP_PRIVREG(ctx);
#else
TCGv t0;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
GEN_EXCP_PRIVREG(ctx);
#else
TCGv t0;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
GEN_EXCP_PRIVREG(ctx);
#else
TCGv t0;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
GEN_EXCP_PRIVREG(ctx);
#else
TCGv t0;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
GEN_EXCP_PRIVREG(ctx);
#else
TCGv t0;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
GEN_EXCP_PRIVREG(ctx);
#else
TCGv t0;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
GEN_EXCP_PRIVREG(ctx);
#else
TCGv t0;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
GEN_EXCP_PRIVREG(ctx);
#else
TCGv t0;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
#endif /* defined(TARGET_PPC64) */
/*** Lookaside buffer management ***/
-/* Optional & supervisor only: */
+/* Optional & mem_idx only: */
/* tlbia */
GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA)
{
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
/* eciwx */
GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN)
{
+ TCGv t0;
/* Should check EAR[E] ! */
- TCGv t0 = tcg_temp_new();
- gen_set_access_type(ACCESS_RES);
- gen_addr_reg_index(t0, ctx);
+ gen_set_access_type(ctx, ACCESS_EXT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
gen_check_align(ctx, t0, 0x03);
- gen_qemu_ld32u(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx);
+ gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], t0);
tcg_temp_free(t0);
}
/* ecowx */
GEN_HANDLER(ecowx, 0x1F, 0x16, 0x09, 0x00000001, PPC_EXTERN)
{
+ TCGv t0;
/* Should check EAR[E] ! */
- TCGv t0 = tcg_temp_new();
- gen_set_access_type(ACCESS_RES);
- gen_addr_reg_index(t0, ctx);
+ gen_set_access_type(ctx, ACCESS_EXT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
gen_check_align(ctx, t0, 0x03);
- gen_qemu_st32(cpu_gpr[rD(ctx->opcode)], t0, ctx->mem_idx);
+ gen_qemu_st32(ctx, cpu_gpr[rD(ctx->opcode)], t0);
tcg_temp_free(t0);
}
TCGv_i32 t2 = tcg_const_i32(rA(ctx->opcode));
TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode));
- gen_addr_reg_index(t0, ctx);
+ gen_addr_reg_index(ctx, t0);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
gen_helper_lscbx(t0, t0, t1, t2, t3);
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
int ra = rA(ctx->opcode);
int rd = rD(ctx->opcode);
TCGv t0;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
t0 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
+ gen_addr_reg_index(ctx, t0);
tcg_gen_shri_tl(t0, t0, 28);
tcg_gen_andi_tl(t0, t0, 0xF);
gen_helper_load_sr(cpu_gpr[rd], t0);
GEN_EXCP_PRIVOPC(ctx);
#else
TCGv t0;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
t0 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
+ gen_addr_reg_index(ctx, t0);
gen_helper_rac(cpu_gpr[rD(ctx->opcode)], t0);
tcg_temp_free(t0);
#endif
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
GEN_HANDLER(lfq, 0x38, 0xFF, 0xFF, 0x00000003, PPC_POWER2)
{
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- gen_addr_imm_index(t0, ctx, 0);
- gen_qemu_ld64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t0, t0, 8);
- gen_qemu_ld64(cpu_fpr[(rd + 1) % 32], t0, ctx->mem_idx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
{
int ra = rA(ctx->opcode);
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- gen_addr_imm_index(t0, ctx, 0);
- gen_qemu_ld64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t1, t0, 8);
- gen_qemu_ld64(cpu_fpr[(rd + 1) % 32], t1, ctx->mem_idx);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ t1 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
tcg_temp_free(t0);
{
int ra = rA(ctx->opcode);
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
- gen_qemu_ld64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t1, t0, 8);
- gen_qemu_ld64(cpu_fpr[(rd + 1) % 32], t1, ctx->mem_idx);
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ TCGv t0, t1;
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* lfqx */
GEN_HANDLER(lfqx, 0x1F, 0x17, 0x18, 0x00000001, PPC_POWER2)
{
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
- gen_qemu_ld64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t0, t0, 8);
- gen_qemu_ld64(cpu_fpr[(rd + 1) % 32], t0, ctx->mem_idx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_ld64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_ld64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
GEN_HANDLER(stfq, 0x3C, 0xFF, 0xFF, 0x00000003, PPC_POWER2)
{
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- gen_addr_imm_index(t0, ctx, 0);
- gen_qemu_st64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t0, t0, 8);
- gen_qemu_st64(cpu_fpr[(rd + 1) % 32], t0, ctx->mem_idx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
{
int ra = rA(ctx->opcode);
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- gen_addr_imm_index(t0, ctx, 0);
- gen_qemu_st64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t1, t0, 8);
- gen_qemu_st64(cpu_fpr[(rd + 1) % 32], t1, ctx->mem_idx);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_imm_index(ctx, t0, 0);
+ gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* stfqux */
{
int ra = rA(ctx->opcode);
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- TCGv t1 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
- gen_qemu_st64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t1, t0, 8);
- gen_qemu_st64(cpu_fpr[(rd + 1) % 32], t1, ctx->mem_idx);
+ TCGv t0, t1;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ t1 = tcg_temp_new();
+ gen_addr_add(ctx, t1, t0, 8);
+ gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t1);
+ tcg_temp_free(t1);
if (ra != 0)
tcg_gen_mov_tl(cpu_gpr[ra], t0);
tcg_temp_free(t0);
- tcg_temp_free(t1);
}
/* stfqx */
GEN_HANDLER(stfqx, 0x1F, 0x17, 0x1C, 0x00000001, PPC_POWER2)
{
int rd = rD(ctx->opcode);
- TCGv t0 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
- gen_qemu_st64(cpu_fpr[rd], t0, ctx->mem_idx);
- tcg_gen_addi_tl(t0, t0, 8);
- gen_qemu_st64(cpu_fpr[(rd + 1) % 32], t0, ctx->mem_idx);
+ TCGv t0;
+ gen_set_access_type(ctx, ACCESS_FLOAT);
+ t0 = tcg_temp_new();
+ gen_addr_reg_index(ctx, t0);
+ gen_qemu_st64(ctx, cpu_fpr[rd], t0);
+ gen_addr_add(ctx, t0, t0, 8);
+ gen_qemu_st64(ctx, cpu_fpr[(rd + 1) % 32], t0);
tcg_temp_free(t0);
}
GEN_EXCP_PRIVOPC(ctx);
#else
TCGv t0;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
t0 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
-#if defined(TARGET_PPC64)
- if (!ctx->sf_mode)
- tcg_gen_ext32u_tl(t0, t0);
-#endif
+ gen_addr_reg_index(ctx, t0);
gen_helper_tlbie(cpu_gpr[rB(ctx->opcode)]);
tcg_temp_free(t0);
#endif
GEN_EXCP_PRIVREG(ctx);
#else
TCGv dcrn;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
GEN_EXCP_PRIVREG(ctx);
#else
TCGv dcrn;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVREG(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVREG(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVREG(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
GEN_EXCP_PRIVOPC(ctx);
#else
TCGv EA, val;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
+ gen_set_access_type(ctx, ACCESS_CACHE);
EA = tcg_temp_new();
- gen_set_access_type(ACCESS_CACHE);
- gen_addr_reg_index(EA, ctx);
+ gen_addr_reg_index(ctx, EA);
val = tcg_temp_new();
- gen_qemu_ld32u(val, EA, ctx->mem_idx);
+ gen_qemu_ld32u(ctx, val, EA);
tcg_temp_free(val);
tcg_gen_mov_tl(cpu_gpr[rD(ctx->opcode)], EA);
tcg_temp_free(EA);
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#endif
}
-/* rfci (supervisor only) */
+/* rfci (mem_idx only) */
GEN_HANDLER2(rfci_40x, "rfci", 0x13, 0x13, 0x01, 0x03FF8001, PPC_40x_EXCP)
{
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
GEN_EXCP_PRIVOPC(ctx);
#else
TCGv t0;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
t0 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
+ gen_addr_reg_index(ctx, t0);
gen_helper_4xx_tlbsx(cpu_gpr[rD(ctx->opcode)], t0);
tcg_temp_free(t0);
if (Rc(ctx->opcode)) {
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
GEN_EXCP_PRIVOPC(ctx);
#else
TCGv t0;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
t0 = tcg_temp_new();
- gen_addr_reg_index(t0, ctx);
+ gen_addr_reg_index(ctx, t0);
gen_helper_440_tlbsx(cpu_gpr[rD(ctx->opcode)], t0);
tcg_temp_free(t0);
if (Rc(ctx->opcode)) {
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
GEN_EXCP_PRIVOPC(ctx);
#else
TCGv t0;
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
#if defined(CONFIG_USER_ONLY)
GEN_EXCP_PRIVOPC(ctx);
#else
- if (unlikely(!ctx->supervisor)) {
+ if (unlikely(!ctx->mem_idx)) {
GEN_EXCP_PRIVOPC(ctx);
return;
}
GEN_EXCP_NO_VR(ctx); \
return; \
} \
+ gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
- gen_addr_reg_index(EA, ctx); \
+ gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf); \
- if (ctx->mem_idx & 1) { \
- gen_qemu_ld64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ if (ctx->le_mode) { \
+ gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_ld64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
} else { \
- gen_qemu_ld64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_ld64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_ld64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_ld64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
} \
tcg_temp_free(EA); \
}
GEN_EXCP_NO_VR(ctx); \
return; \
} \
+ gen_set_access_type(ctx, ACCESS_INT); \
EA = tcg_temp_new(); \
- gen_addr_reg_index(EA, ctx); \
+ gen_addr_reg_index(ctx, EA); \
tcg_gen_andi_tl(EA, EA, ~0xf); \
- if (ctx->mem_idx & 1) { \
- gen_qemu_st64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ if (ctx->le_mode) { \
+ gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_st64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
} else { \
- gen_qemu_st64(cpu_avrh[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_st64(ctx, cpu_avrh[rD(ctx->opcode)], EA); \
tcg_gen_addi_tl(EA, EA, 8); \
- gen_qemu_st64(cpu_avrl[rD(ctx->opcode)], EA, ctx->mem_idx); \
+ gen_qemu_st64(ctx, cpu_avrl[rD(ctx->opcode)], EA); \
} \
tcg_temp_free(EA); \
}
GEN_SPE(evcmpeq, speundef, 0x1A, 0x08, 0x00600000, PPC_SPE); ////
/* SPE load and stores */
-static always_inline void gen_addr_spe_imm_index (TCGv EA, DisasContext *ctx, int sh)
+static always_inline void gen_addr_spe_imm_index (DisasContext *ctx, TCGv EA, int sh)
{
target_ulong uimm = rB(ctx->opcode);
- if (rA(ctx->opcode) == 0)
+ if (rA(ctx->opcode) == 0) {
tcg_gen_movi_tl(EA, uimm << sh);
- else
+ } else {
tcg_gen_addi_tl(EA, cpu_gpr[rA(ctx->opcode)], uimm << sh);
+#if defined(TARGET_PPC64)
+ if (!ctx->sf_mode) {
+ tcg_gen_ext32u_tl(EA, EA);
+ }
+#endif
+ }
}
static always_inline void gen_op_evldd(DisasContext *ctx, TCGv addr)
{
#if defined(TARGET_PPC64)
- gen_qemu_ld64(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_ld64(ctx, cpu_gpr[rD(ctx->opcode)], addr);
#else
TCGv_i64 t0 = tcg_temp_new_i64();
- gen_qemu_ld64(t0, addr, ctx->mem_idx);
+ gen_qemu_ld64(ctx, t0, addr);
tcg_gen_trunc_i64_i32(cpu_gpr[rD(ctx->opcode)], t0);
tcg_gen_shri_i64(t0, t0, 32);
tcg_gen_trunc_i64_i32(cpu_gprh[rD(ctx->opcode)], t0);
{
#if defined(TARGET_PPC64)
TCGv t0 = tcg_temp_new();
- gen_qemu_ld32u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld32u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
- tcg_gen_addi_tl(addr, addr, 4);
- gen_qemu_ld32u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_ld32u(ctx, t0, addr);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
tcg_temp_free(t0);
#else
- gen_qemu_ld32u(cpu_gprh[rD(ctx->opcode)], addr, ctx->mem_idx);
- tcg_gen_addi_tl(addr, addr, 4);
- gen_qemu_ld32u(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_ld32u(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_ld32u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
#endif
}
{
TCGv t0 = tcg_temp_new();
#if defined(TARGET_PPC64)
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(t0, t0, 32);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(t0, t0, 16);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
#else
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
#endif
tcg_temp_free(t0);
static always_inline void gen_op_evlhhesplat(DisasContext *ctx, TCGv addr)
{
TCGv t0 = tcg_temp_new();
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
#if defined(TARGET_PPC64)
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
tcg_gen_shli_tl(t0, t0, 16);
static always_inline void gen_op_evlhhousplat(DisasContext *ctx, TCGv addr)
{
TCGv t0 = tcg_temp_new();
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
#if defined(TARGET_PPC64)
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
static always_inline void gen_op_evlhhossplat(DisasContext *ctx, TCGv addr)
{
TCGv t0 = tcg_temp_new();
- gen_qemu_ld16s(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16s(ctx, t0, addr);
#if defined(TARGET_PPC64)
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
tcg_gen_ext32u_tl(t0, t0);
{
TCGv t0 = tcg_temp_new();
#if defined(TARGET_PPC64)
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(t0, t0, 16);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
#else
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
#endif
tcg_temp_free(t0);
{
#if defined(TARGET_PPC64)
TCGv t0 = tcg_temp_new();
- gen_qemu_ld16u(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(t0, t0, 32);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
tcg_temp_free(t0);
#else
- gen_qemu_ld16u(cpu_gprh[rD(ctx->opcode)], addr, ctx->mem_idx);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, cpu_gpr[rD(ctx->opcode)], addr);
#endif
}
{
#if defined(TARGET_PPC64)
TCGv t0 = tcg_temp_new();
- gen_qemu_ld16s(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16s(ctx, t0, addr);
tcg_gen_ext32u_tl(cpu_gpr[rD(ctx->opcode)], t0);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16s(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16s(ctx, t0, addr);
tcg_gen_shli_tl(t0, t0, 32);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
tcg_temp_free(t0);
#else
- gen_qemu_ld16s(cpu_gprh[rD(ctx->opcode)], addr, ctx->mem_idx);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16s(cpu_gpr[rD(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_ld16s(ctx, cpu_gprh[rD(ctx->opcode)], addr);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16s(ctx, cpu_gpr[rD(ctx->opcode)], addr);
#endif
}
static always_inline void gen_op_evlwwsplat(DisasContext *ctx, TCGv addr)
{
TCGv t0 = tcg_temp_new();
- gen_qemu_ld32u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld32u(ctx, t0, addr);
#if defined(TARGET_PPC64)
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 32);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
{
TCGv t0 = tcg_temp_new();
#if defined(TARGET_PPC64)
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 48);
tcg_gen_shli_tl(t0, t0, 32);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
tcg_gen_shli_tl(t0, t0, 16);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rD(ctx->opcode)], t0);
#else
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gprh[rD(ctx->opcode)], t0, 16);
tcg_gen_or_tl(cpu_gprh[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_ld16u(t0, addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_ld16u(ctx, t0, addr);
tcg_gen_shli_tl(cpu_gpr[rD(ctx->opcode)], t0, 16);
tcg_gen_or_tl(cpu_gpr[rD(ctx->opcode)], cpu_gprh[rD(ctx->opcode)], t0);
#endif
static always_inline void gen_op_evstdd(DisasContext *ctx, TCGv addr)
{
#if defined(TARGET_PPC64)
- gen_qemu_st64(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_st64(ctx, cpu_gpr[rS(ctx->opcode)], addr);
#else
TCGv_i64 t0 = tcg_temp_new_i64();
tcg_gen_concat_i32_i64(t0, cpu_gpr[rS(ctx->opcode)], cpu_gprh[rS(ctx->opcode)]);
- gen_qemu_st64(t0, addr, ctx->mem_idx);
+ gen_qemu_st64(ctx, t0, addr);
tcg_temp_free_i64(t0);
#endif
}
#if defined(TARGET_PPC64)
TCGv t0 = tcg_temp_new();
tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
- gen_qemu_st32(t0, addr, ctx->mem_idx);
+ gen_qemu_st32(ctx, t0, addr);
tcg_temp_free(t0);
#else
- gen_qemu_st32(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr);
#endif
- tcg_gen_addi_tl(addr, addr, 4);
- gen_qemu_st32(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 4);
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr);
}
static always_inline void gen_op_evstdh(DisasContext *ctx, TCGv addr)
#else
tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
#endif
- gen_qemu_st16(t0, addr, ctx->mem_idx);
- tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_st16(ctx, t0, addr);
+ gen_addr_add(ctx, addr, addr, 2);
#if defined(TARGET_PPC64)
tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
- gen_qemu_st16(t0, addr, ctx->mem_idx);
+ gen_qemu_st16(ctx, t0, addr);
#else
- gen_qemu_st16(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr);
#endif
- tcg_gen_addi_tl(addr, addr, 2);
+ gen_addr_add(ctx, addr, addr, 2);
tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
- gen_qemu_st16(t0, addr, ctx->mem_idx);
+ gen_qemu_st16(ctx, t0, addr);
tcg_temp_free(t0);
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_st16(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
}
static always_inline void gen_op_evstwhe(DisasContext *ctx, TCGv addr)
#else
tcg_gen_shri_tl(t0, cpu_gprh[rS(ctx->opcode)], 16);
#endif
- gen_qemu_st16(t0, addr, ctx->mem_idx);
- tcg_gen_addi_tl(addr, addr, 2);
+ gen_qemu_st16(ctx, t0, addr);
+ gen_addr_add(ctx, addr, addr, 2);
tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 16);
- gen_qemu_st16(t0, addr, ctx->mem_idx);
+ gen_qemu_st16(ctx, t0, addr);
tcg_temp_free(t0);
}
#if defined(TARGET_PPC64)
TCGv t0 = tcg_temp_new();
tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
- gen_qemu_st16(t0, addr, ctx->mem_idx);
+ gen_qemu_st16(ctx, t0, addr);
tcg_temp_free(t0);
#else
- gen_qemu_st16(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_st16(ctx, cpu_gprh[rS(ctx->opcode)], addr);
#endif
- tcg_gen_addi_tl(addr, addr, 2);
- gen_qemu_st16(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_addr_add(ctx, addr, addr, 2);
+ gen_qemu_st16(ctx, cpu_gpr[rS(ctx->opcode)], addr);
}
static always_inline void gen_op_evstwwe(DisasContext *ctx, TCGv addr)
#if defined(TARGET_PPC64)
TCGv t0 = tcg_temp_new();
tcg_gen_shri_tl(t0, cpu_gpr[rS(ctx->opcode)], 32);
- gen_qemu_st32(t0, addr, ctx->mem_idx);
+ gen_qemu_st32(ctx, t0, addr);
tcg_temp_free(t0);
#else
- gen_qemu_st32(cpu_gprh[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_st32(ctx, cpu_gprh[rS(ctx->opcode)], addr);
#endif
}
static always_inline void gen_op_evstwwo(DisasContext *ctx, TCGv addr)
{
- gen_qemu_st32(cpu_gpr[rS(ctx->opcode)], addr, ctx->mem_idx);
+ gen_qemu_st32(ctx, cpu_gpr[rS(ctx->opcode)], addr);
}
#define GEN_SPEOP_LDST(name, opc2, sh) \
-GEN_HANDLER(gen_##name, 0x04, opc2, 0x0C, 0x00000000, PPC_SPE) \
+GEN_HANDLER(name, 0x04, opc2, 0x0C, 0x00000000, PPC_SPE) \
{ \
TCGv t0; \
if (unlikely(!ctx->spe_enabled)) { \
GEN_EXCP_NO_AP(ctx); \
return; \
} \
+ gen_set_access_type(ctx, ACCESS_INT); \
t0 = tcg_temp_new(); \
if (Rc(ctx->opcode)) { \
- gen_addr_spe_imm_index(t0, ctx, sh); \
+ gen_addr_spe_imm_index(ctx, t0, sh); \
} else { \
- gen_addr_reg_index(t0, ctx); \
+ gen_addr_reg_index(ctx, t0); \
} \
gen_op_##name(ctx, t0); \
tcg_temp_free(t0); \
opc_handler_t **table, *handler;
target_ulong pc_start;
uint16_t *gen_opc_end;
- int supervisor, little_endian;
CPUBreakpoint *bp;
int j, lj = -1;
int num_insns;
ctx.tb = tb;
ctx.exception = POWERPC_EXCP_NONE;
ctx.spr_cb = env->spr_cb;
- supervisor = env->mmu_idx;
-#if !defined(CONFIG_USER_ONLY)
- ctx.supervisor = supervisor;
-#endif
- little_endian = env->hflags & (1 << MSR_LE) ? 1 : 0;
+ ctx.mem_idx = env->mmu_idx;
+ ctx.access_type = -1;
+ ctx.le_mode = env->hflags & (1 << MSR_LE) ? 1 : 0;
#if defined(TARGET_PPC64)
ctx.sf_mode = msr_sf;
- ctx.mem_idx = (supervisor << 2) | (msr_sf << 1) | little_endian;
-#else
- ctx.mem_idx = (supervisor << 1) | little_endian;
#endif
ctx.fpu_enabled = msr_fp;
if ((env->flags & POWERPC_FLAG_SPE) && msr_spe)
if (loglevel & CPU_LOG_TB_IN_ASM) {
fprintf(logfile, "----------------\n");
fprintf(logfile, "nip=" ADDRX " super=%d ir=%d\n",
- ctx.nip, supervisor, (int)msr_ir);
+ ctx.nip, ctx.mem_idx, (int)msr_ir);
}
#endif
if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
gen_io_start();
- if (unlikely(little_endian)) {
+ if (unlikely(ctx.le_mode)) {
ctx.opcode = bswap32(ldl_code(ctx.nip));
} else {
ctx.opcode = ldl_code(ctx.nip);
if (loglevel & CPU_LOG_TB_IN_ASM) {
int flags;
flags = env->bfd_mach;
- flags |= little_endian << 16;
+ flags |= ctx.le_mode << 16;
fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
target_disas(logfile, pc_start, ctx.nip - pc_start, flags);
fprintf(logfile, "\n");