#if defined(TARGET_I386)
/* if hardware interrupt pending, we execute it */
if ((interrupt_request & CPU_INTERRUPT_HARD) &&
- (env->eflags & IF_MASK)) {
+ (env->eflags & IF_MASK) &&
+ !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
int intno;
intno = cpu_x86_get_pic_interrupt(env);
if (loglevel) {
#endif
}
#endif
- /* we compute the CPU state. We assume it will not
- change during the whole generated block. */
+ /* we record a subset of the CPU state. It will
+ always be the same before a given translated block
+ is executed. */
#if defined(TARGET_I386)
flags = (env->segs[R_CS].flags & DESC_B_MASK)
- >> (DESC_B_SHIFT - GEN_FLAG_CODE32_SHIFT);
+ >> (DESC_B_SHIFT - HF_CS32_SHIFT);
flags |= (env->segs[R_SS].flags & DESC_B_MASK)
- >> (DESC_B_SHIFT - GEN_FLAG_SS32_SHIFT);
+ >> (DESC_B_SHIFT - HF_SS32_SHIFT);
flags |= (((unsigned long)env->segs[R_DS].base |
(unsigned long)env->segs[R_ES].base |
(unsigned long)env->segs[R_SS].base) != 0) <<
- GEN_FLAG_ADDSEG_SHIFT;
- flags |= env->cpl << GEN_FLAG_CPL_SHIFT;
- flags |= env->soft_mmu << GEN_FLAG_SOFT_MMU_SHIFT;
- flags |= (env->eflags & VM_MASK) >> (17 - GEN_FLAG_VM_SHIFT);
- flags |= (env->eflags & (IOPL_MASK | TF_MASK));
+ HF_ADDSEG_SHIFT;
+ flags |= env->hflags;
+ flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
cs_base = env->segs[R_CS].base;
pc = cs_base + env->eip;
#elif defined(TARGET_ARM)
/* reset soft MMU for next block (it can currently
only be set by a memory fault) */
#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
- if (env->soft_mmu) {
- env->soft_mmu = 0;
+ if (env->hflags & HF_SOFTMMU_MASK) {
+ env->hflags &= ~HF_SOFTMMU_MASK;
/* do not allow linking to another block */
T0 = 0;
}
raise_exception_err(EXCP0E_PAGE, env->error_code);
} else {
/* activate soft MMU for this block */
- env->soft_mmu = 1;
+ env->hflags |= HF_SOFTMMU_MASK;
sigprocmask(SIG_SETMASK, old_set, NULL);
cpu_loop_exit();
}
#define CC_S 0x0080
#define CC_O 0x0800
+#define TF_SHIFT 8
+#define IOPL_SHIFT 12
+#define VM_SHIFT 17
+
#define TF_MASK 0x00000100
#define IF_MASK 0x00000200
#define DF_MASK 0x00000400
#define VIP_MASK 0x00100000
#define ID_MASK 0x00200000
+/* hidden flags - used internally by qemu to represent additionnal cpu
+ states. Only the CPL and INHIBIT_IRQ are not redundant. We avoid
+ using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring
+ with eflags. */
+/* current cpl */
+#define HF_CPL_SHIFT 0
+/* true if soft mmu is being used */
+#define HF_SOFTMMU_SHIFT 2
+/* true if hardware interrupts must be disabled for next instruction */
+#define HF_INHIBIT_IRQ_SHIFT 3
+/* 16 or 32 segments */
+#define HF_CS32_SHIFT 4
+#define HF_SS32_SHIFT 5
+/* zero base for DS, ES and SS */
+#define HF_ADDSEG_SHIFT 6
+
+#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
+#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
+#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
+#define HF_CS32_MASK (1 << HF_CS32_SHIFT)
+#define HF_SS32_MASK (1 << HF_CS32_SHIFT)
+#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
+
#define CR0_PE_MASK (1 << 0)
#define CR0_TS_MASK (1 << 3)
#define CR0_WP_MASK (1 << 16)
uint32_t cc_dst;
uint32_t cc_op;
int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
+ uint32_t hflags; /* hidden flags, see HF_xxx constants */
/* FPU state */
unsigned int fpstt; /* top of stack index */
SegmentCache tr;
SegmentCache gdt; /* only base and limit are used */
SegmentCache idt; /* only base and limit are used */
- int cpl; /* current cpl */
- int soft_mmu; /* TRUE if soft mmu is being used */
/* sysenter registers */
uint32_t sysenter_cs;
/* wrapper, just in case memory mappings must be changed */
static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
{
- s->cpl = cpl;
+#if HF_CPL_MASK == 3
+ s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
+#else
+#error HF_CPL_MASK is hardcoded
+#endif
}
/* simulate fsave/frstor */
#if defined(TARGET_I386)
-#define GEN_FLAG_CODE32_SHIFT 0
-#define GEN_FLAG_ADDSEG_SHIFT 1
-#define GEN_FLAG_SS32_SHIFT 2
-#define GEN_FLAG_VM_SHIFT 3
-#define GEN_FLAG_ST_SHIFT 4
-#define GEN_FLAG_TF_SHIFT 8 /* same position as eflags */
-#define GEN_FLAG_CPL_SHIFT 9
-#define GEN_FLAG_SOFT_MMU_SHIFT 11
-#define GEN_FLAG_IOPL_SHIFT 12 /* same position as eflags */
-
void optimize_flags_init(void);
#endif
{
SegmentCache *dt;
uint8_t *ptr, *ssp;
- int type, dpl, selector, ss_dpl;
+ int type, dpl, selector, ss_dpl, cpl;
int has_error_code, new_stack, shift;
uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
uint32_t old_cs, old_ss, old_esp, old_eip;
break;
}
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
/* check privledge if software int */
- if (is_int && dpl < env->cpl)
+ if (is_int && dpl < cpl)
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
/* check valid bit */
if (!(e2 & DESC_P_MASK))
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- if (dpl > env->cpl)
+ if (dpl > cpl)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
- if (!(e2 & DESC_C_MASK) && dpl < env->cpl) {
+ if (!(e2 & DESC_C_MASK) && dpl < cpl) {
/* to inner priviledge */
get_ss_esp_from_tss(&ss, &esp, dpl);
if ((ss & 0xfffc) == 0)
if (!(ss_e2 & DESC_P_MASK))
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
new_stack = 1;
- } else if ((e2 & DESC_C_MASK) || dpl == env->cpl) {
+ } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
/* to same priviledge */
new_stack = 0;
} else {
{
SegmentCache *dt;
uint8_t *ptr;
- int dpl;
+ int dpl, cpl;
uint32_t e2;
dt = &env->idt;
e2 = ldl(ptr + 4);
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
/* check privledge if software int */
- if (is_int && dpl < env->cpl)
+ if (is_int && dpl < cpl)
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
/* Since we emulate only user space, we cannot do more than
raise_exception_err(EXCP0D_GPF, 0);
if (load_segment(&e1, &e2, new_cs) != 0)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- cpl = env->cpl;
+ cpl = env->hflags & HF_CPL_MASK;
if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK))
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
raise_exception_err(EXCP0D_GPF, 0);
if (load_segment(&e1, &e2, new_cs) != 0)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- cpl = env->cpl;
+ cpl = env->hflags & HF_CPL_MASK;
if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK))
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
if (!(e2 & DESC_S_MASK) ||
!(e2 & DESC_CS_MASK))
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- cpl = env->cpl;
+ cpl = env->hflags & HF_CPL_MASK;
rpl = new_cs & 3;
if (rpl < cpl)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
tlb_flush(env);
#ifdef CONFIG_SOFTMMU
- env->soft_mmu = 1;
+ env->hflags |= HF_SOFTMMU_MASK;
#endif
/* init various static tables */
if (!inited) {
int cpl, error_code, is_dirty, is_user, prot, page_size, ret;
unsigned long pd;
- cpl = env->cpl;
+ cpl = env->hflags & HF_CPL_MASK;
is_user = (cpl == 3);
#ifdef DEBUG_MMU
}
do_mapping:
- if (env->soft_mmu) {
+ if (env->hflags & HF_SOFTMMU_MASK) {
unsigned long paddr, vaddr, address, addend, page_offset;
int index;
if ((pd & 0xfff) != 0) {
/* IO access: no mapping is done as it will be handled by the
soft MMU */
- if (!env->soft_mmu)
+ if (!(env->hflags & HF_SOFTMMU_MASK))
ret = 2;
} else {
void *map_addr;
env->eflags |= IF_MASK;
}
+void OPPROTO op_set_inhibit_irq(void)
+{
+ env->hflags |= HF_INHIBIT_IRQ_MASK;
+}
+
+void OPPROTO op_reset_inhibit_irq(void)
+{
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK;
+}
+
#if 0
/* vm86plus instructions */
void OPPROTO op_cli_vm(void)
/* test if there is match for unaligned or IO access */
/* XXX: could done more in memory macro in a non portable way */
- is_user = (env->cpl == 3);
+ is_user = ((env->hflags & HF_CPL_MASK) == 3);
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_read[is_user][index].address;
int is_user, index, shift;
unsigned long physaddr, tlb_addr, addr1, addr2;
- is_user = (env->cpl == 3);
+ is_user = ((env->hflags & HF_CPL_MASK) == 3);
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_read[is_user][index].address;
void *retaddr;
int is_user, index;
- is_user = (env->cpl == 3);
+ is_user = ((env->hflags & HF_CPL_MASK) == 3);
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_write[is_user][index].address;
unsigned long physaddr, tlb_addr;
int is_user, index, i;
- is_user = (env->cpl == 3);
+ is_user = ((env->hflags & HF_CPL_MASK) == 3);
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo:
tlb_addr = env->tlb_write[is_user][index].address;
else
gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[seg_reg]));
/* abort translation because the register may have a non zero base
- or because ss32 may change */
+ or because ss32 may change. For R_SS, translation must always
+ stop as a special handling must be done to disable hardware
+ interrupts for the next instruction */
if (seg_reg == R_SS || (!s->addseg && seg_reg < R_FS))
s->is_jmp = 2;
}
case 0x07: /* pop es */
case 0x17: /* pop ss */
case 0x1f: /* pop ds */
+ reg = b >> 3;
gen_pop_T0(s);
- gen_movl_seg_T0(s, b >> 3, pc_start - s->cs_base);
+ gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
gen_pop_update(s);
- /* XXX: if reg == SS, inhibit interrupts/trace */
+ if (reg == R_SS) {
+ /* if reg == SS, inhibit interrupts/trace */
+ gen_op_set_inhibit_irq();
+ }
break;
case 0x1a1: /* pop fs */
case 0x1a9: /* pop gs */
goto illegal_op;
gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
- /* XXX: if reg == SS, inhibit interrupts/trace */
+ if (reg == R_SS) {
+ /* if reg == SS, inhibit interrupts/trace */
+ gen_op_set_inhibit_irq();
+ }
break;
case 0x8c: /* mov Gv, seg */
modrm = ldub(s->pc++);
if (!s->vm86) {
if (s->cpl <= s->iopl) {
gen_op_sti();
+ /* interruptions are enabled only the first insn after sti */
+ gen_op_set_inhibit_irq();
s->is_jmp = 2; /* give a chance to handle pending irqs */
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
if (s->iopl == 3) {
gen_op_sti();
+ /* interruptions are enabled only the first insn after sti */
+ gen_op_set_inhibit_irq();
s->is_jmp = 2; /* give a chance to handle pending irqs */
} else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
}
}
- /* XXX: interruptions are enabled only the first insn after sti */
break;
case 0x62: /* bound */
ot = dflag ? OT_LONG : OT_WORD;
flags = tb->flags;
dc->pe = env->cr[0] & CR0_PE_MASK;
- dc->code32 = (flags >> GEN_FLAG_CODE32_SHIFT) & 1;
- dc->ss32 = (flags >> GEN_FLAG_SS32_SHIFT) & 1;
- dc->addseg = (flags >> GEN_FLAG_ADDSEG_SHIFT) & 1;
- dc->f_st = (flags >> GEN_FLAG_ST_SHIFT) & 7;
- dc->vm86 = (flags >> GEN_FLAG_VM_SHIFT) & 1;
- dc->cpl = (flags >> GEN_FLAG_CPL_SHIFT) & 3;
- dc->iopl = (flags >> GEN_FLAG_IOPL_SHIFT) & 3;
- dc->tf = (flags >> GEN_FLAG_TF_SHIFT) & 1;
+ dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
+ dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
+ dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
+ dc->f_st = 0;
+ dc->vm86 = (flags >> VM_SHIFT) & 1;
+ dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
+ dc->iopl = (flags >> IOPL_SHIFT) & 3;
+ dc->tf = (flags >> TF_SHIFT) & 1;
dc->cc_op = CC_OP_DYNAMIC;
dc->cs_base = cs_base;
dc->tb = tb;
dc->popl_esp_hack = 0;
/* select memory access functions */
dc->mem_index = 0;
- if ((flags >> GEN_FLAG_SOFT_MMU_SHIFT) & 1) {
+ if (flags & HF_SOFTMMU_MASK) {
if (dc->cpl == 3)
dc->mem_index = 6;
else
dc->is_jmp = DISAS_NEXT;
pc_ptr = pc_start;
lj = -1;
+
+ /* if irq were inhibited for the next instruction, we can disable
+ them here as it is simpler (otherwise jumps would have to
+ handled as special case) */
+ if (flags & HF_INHIBIT_IRQ_MASK) {
+ gen_op_reset_inhibit_irq();
+ }
do {
if (env->nb_breakpoints > 0) {
for(j = 0; j < env->nb_breakpoints; j++) {