#define HF_GIF_SHIFT 20 /* if set CPU takes interrupts */
#define HF_HIF_SHIFT 21 /* shadow copy of IF_MASK when in SVM */
#define HF_NMI_SHIFT 22 /* CPU serving NMI */
-#define HF_SVME_SHIFT 23 /* SVME enabled (copy of EFER.SVME */
+#define HF_SVME_SHIFT 23 /* SVME enabled (copy of EFER.SVME) */
#define HF_SVMI_SHIFT 24 /* SVM intercepts are active */
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
return EXCP_HALTED;
}
+/* load efer and update the corresponding hflags. XXX: do consistency
+ checks with cpuid bits ? */
+static inline void cpu_load_efer(CPUState *env, uint64_t val)
+{
+ env->efer = val;
+ env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
+ if (env->efer & MSR_EFER_LMA)
+ env->hflags |= HF_LMA_MASK;
+ if (env->efer & MSR_EFER_SVME)
+ env->hflags |= HF_SVME_MASK;
+}
/* init SMM cpu state */
#ifdef TARGET_X86_64
- env->efer = 0;
- env->hflags &= ~HF_LMA_MASK;
+ cpu_load_efer(env, 0);
#endif
load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
env->eip = 0x00008000;
sm_state = env->smbase + 0x8000;
#ifdef TARGET_X86_64
- env->efer = ldq_phys(sm_state + 0x7ed0);
- if (env->efer & MSR_EFER_LMA)
- env->hflags |= HF_LMA_MASK;
- else
- env->hflags &= ~HF_LMA_MASK;
+ cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
for(i = 0; i < 6; i++) {
offset = 0x7e00 + i * 16;
update_mask |= MSR_EFER_FFXSR;
if (env->cpuid_ext2_features & CPUID_EXT2_NX)
update_mask |= MSR_EFER_NXE;
- env->efer = (env->efer & ~update_mask) |
- (val & update_mask);
+ if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
+ update_mask |= MSR_EFER_SVME;
+ cpu_load_efer(env, (env->efer & ~update_mask) |
+ (val & update_mask));
}
break;
case MSR_STAR:
}
#ifdef TARGET_X86_64
- env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
- env->hflags &= ~HF_LMA_MASK;
- if (env->efer & MSR_EFER_LMA)
- env->hflags |= HF_LMA_MASK;
+ cpu_load_efer(env,
+ ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
#endif
env->eflags = 0;
load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
cpu_set_apic_tpr(env, env->cr[8]);
}
- /* we need to set the efer after the crs so the hidden flags get set properly */
+ /* we need to set the efer after the crs so the hidden flags get
+ set properly */
#ifdef TARGET_X86_64
- env->efer = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
- env->hflags &= ~HF_LMA_MASK;
- if (env->efer & MSR_EFER_LMA)
- env->hflags |= HF_LMA_MASK;
- /* XXX: should also emulate the VM_CR MSR */
- env->hflags &= ~HF_SVME_MASK;
- if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
- if (env->efer & MSR_EFER_SVME)
- env->hflags |= HF_SVME_MASK;
- } else {
- env->efer &= ~MSR_EFER_SVME;
- }
+ cpu_load_efer(env,
+ ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
#endif
env->eflags = 0;