enum {
SAR = 3,
SCOMPARE1 = 12,
+ PS = 230,
};
+#define PS_INTLEVEL 0xf
+#define PS_INTLEVEL_SHIFT 0
+
+#define PS_EXCM 0x10
+#define PS_UM 0x20
+
+#define PS_RING 0xc0
+#define PS_RING_SHIFT 6
+
+#define PS_OWB 0xf00
+#define PS_OWB_SHIFT 8
+
+#define PS_CALLINC 0x30000
+#define PS_CALLINC_SHIFT 16
+#define PS_CALLINC_LEN 2
+
+#define PS_WOE 0x40000
+
typedef struct XtensaConfig {
const char *name;
uint64_t options;
return (config->options & XTENSA_OPTION_BIT(opt)) != 0;
}
+static inline int xtensa_get_ring(const CPUState *env)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) {
+ return (env->sregs[PS] & PS_RING) >> PS_RING_SHIFT;
+ } else {
+ return 0;
+ }
+}
+
+static inline int xtensa_get_cring(const CPUState *env)
+{
+ if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) &&
+ (env->sregs[PS] & PS_EXCM) == 0) {
+ return (env->sregs[PS] & PS_RING) >> PS_RING_SHIFT;
+ } else {
+ return 0;
+ }
+}
+
+/* MMU modes definitions */
+#define MMU_MODE0_SUFFIX _ring0
+#define MMU_MODE1_SUFFIX _ring1
+#define MMU_MODE2_SUFFIX _ring2
+#define MMU_MODE3_SUFFIX _ring3
+
static inline int cpu_mmu_index(CPUState *env)
{
- return 0;
+ return xtensa_get_cring(env);
}
+#define XTENSA_TBFLAG_RING_MASK 0x3
+#define XTENSA_TBFLAG_EXCM 0x4
+
static inline void cpu_get_tb_cpu_state(CPUState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
{
*pc = env->pc;
*cs_base = 0;
*flags = 0;
+ *flags |= xtensa_get_ring(env);
+ if (env->sregs[PS] & PS_EXCM) {
+ *flags |= XTENSA_TBFLAG_EXCM;
+ }
}
#include "cpu-all.h"
TranslationBlock *tb;
uint32_t pc;
uint32_t next_pc;
+ int cring;
+ int ring;
int is_jmp;
int singlestep_enabled;
static const char * const sregnames[256] = {
[SAR] = "SAR",
[SCOMPARE1] = "SCOMPARE1",
+ [PS] = "PS",
};
static const char * const uregnames[256] = {
dc->sar_m32_5bit = false;
}
+static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v)
+{
+ uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB |
+ PS_UM | PS_EXCM | PS_INTLEVEL;
+
+ if (option_enabled(dc, XTENSA_OPTION_MMU)) {
+ mask |= PS_RING;
+ }
+ tcg_gen_andi_i32(cpu_SR[sr], v, mask);
+ /* This can change mmu index, so exit tb */
+ gen_jumpi(dc, dc->next_pc, -1);
+}
+
static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s)
{
static void (* const wsr_handler[256])(DisasContext *dc,
uint32_t sr, TCGv_i32 v) = {
[SAR] = gen_wsr_sar,
+ [PS] = gen_wsr_ps,
};
if (sregnames[sr]) {
/* no ext L32R */
- tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, 0);
+ tcg_gen_qemu_ld32u(cpu_R[RRR_T], tmp, dc->cring);
tcg_temp_free(tmp);
}
break;
#define gen_load_store(type, shift) do { \
TCGv_i32 addr = tcg_temp_new_i32(); \
tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << shift); \
- tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, 0); \
+ tcg_gen_qemu_##type(cpu_R[RRI8_T], addr, dc->cring); \
tcg_temp_free(addr); \
} while (0)
tcg_gen_mov_i32(tmp, cpu_R[RRI8_T]);
tcg_gen_addi_i32(addr, cpu_R[RRI8_S], RRI8_IMM8 << 2);
- tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, 0);
+ tcg_gen_qemu_ld32u(cpu_R[RRI8_T], addr, dc->cring);
tcg_gen_brcond_i32(TCG_COND_NE, cpu_R[RRI8_T],
cpu_SR[SCOMPARE1], label);
- tcg_gen_qemu_st32(tmp, addr, 0);
+ tcg_gen_qemu_st32(tmp, addr, dc->cring);
gen_set_label(label);
tcg_temp_free(addr);
#define gen_narrow_load_store(type) do { \
TCGv_i32 addr = tcg_temp_new_i32(); \
tcg_gen_addi_i32(addr, cpu_R[RRRN_S], RRRN_R << 2); \
- tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, 0); \
+ tcg_gen_qemu_##type(cpu_R[RRRN_T], addr, dc->cring); \
tcg_temp_free(addr); \
} while (0)
dc.singlestep_enabled = env->singlestep_enabled;
dc.tb = tb;
dc.pc = pc_start;
+ dc.ring = tb->flags & XTENSA_TBFLAG_RING_MASK;
+ dc.cring = (tb->flags & XTENSA_TBFLAG_EXCM) ? 0 : dc.ring;
dc.is_jmp = DISAS_NEXT;
init_sar_tracker(&dc);