#define MSR_IA32_APICBASE_BSP (1<<8)
#define MSR_IA32_APICBASE_ENABLE (1<<11)
#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
-#define MSR_IA32_TSCDEADLINE 0x6e0
#define MSR_MTRRcap 0xfe
#define MSR_MTRRcap_VCNT 8
uint64_t async_pf_en_msr;
uint64_t tsc;
- uint64_t tsc_deadline;
uint64_t mcg_status;
#define cpu_list_id x86_cpu_list
#define cpudef_setup x86_cpudef_setup
-#define CPU_SAVE_VERSION 13
+#define CPU_SAVE_VERSION 12
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
static bool has_msr_star;
static bool has_msr_hsave_pa;
-static bool has_msr_tsc_deadline;
static bool has_msr_async_pf_en;
static int lm_capable_kernel;
has_msr_hsave_pa = true;
continue;
}
- if (kvm_msr_list->indices[i] == MSR_IA32_TSCDEADLINE) {
- has_msr_tsc_deadline = true;
- continue;
- }
}
}
if (has_msr_hsave_pa) {
kvm_msr_entry_set(&msrs[n++], MSR_VM_HSAVE_PA, env->vm_hsave);
}
- if (has_msr_tsc_deadline) {
- kvm_msr_entry_set(&msrs[n++], MSR_IA32_TSCDEADLINE, env->tsc_deadline);
- }
#ifdef TARGET_X86_64
if (lm_capable_kernel) {
kvm_msr_entry_set(&msrs[n++], MSR_CSTAR, env->cstar);
if (has_msr_hsave_pa) {
msrs[n++].index = MSR_VM_HSAVE_PA;
}
- if (has_msr_tsc_deadline) {
- msrs[n++].index = MSR_IA32_TSCDEADLINE;
- }
if (!env->tsc_valid) {
msrs[n++].index = MSR_IA32_TSC;
case MSR_IA32_TSC:
env->tsc = msrs[i].data;
break;
- case MSR_IA32_TSCDEADLINE:
- env->tsc_deadline = msrs[i].data;
- break;
case MSR_VM_HSAVE_PA:
env->vm_hsave = msrs[i].data;
break;
VMSTATE_UINT64_V(xcr0, CPUState, 12),
VMSTATE_UINT64_V(xstate_bv, CPUState, 12),
VMSTATE_YMMH_REGS_VARS(ymmh_regs, CPUState, CPU_NB_REGS, 12),
- VMSTATE_UINT64_V(tsc_deadline, CPUState, 13),
VMSTATE_END_OF_LIST()
/* The above list is not sorted /wrt version numbers, watch out! */
},