/* MSR used to read the per-partition time reference counter */
#define HV_X64_MSR_TIME_REF_COUNT 0x40000020
+/* A partition's reference time stamp counter (TSC) page */
+#define HV_X64_MSR_REFERENCE_TSC 0x40000021
+
/* MSR used to retrieve the TSC frequency */
#define HV_X64_MSR_TSC_FREQUENCY 0x40000022
#define KVM_CAP_ARM_EL1_32BIT 93
#define KVM_CAP_SPAPR_MULTITCE 94
#define KVM_CAP_EXT_EMUL_CPUID 95
+#define KVM_CAP_HYPERV_TIME 96
#ifdef KVM_CAP_IRQ_ROUTING
bool hyperv_vapic;
bool hyperv_relaxed_timing;
int hyperv_spinlock_attempts;
+ bool hyperv_time;
bool check_cpuid;
bool enforce_cpuid;
{ .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
+ DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
DEFINE_PROP_END_OF_LIST()
uint64_t msr_hv_hypercall;
uint64_t msr_hv_guest_os_id;
uint64_t msr_hv_vapic;
+ uint64_t msr_hv_tsc;
/* exception/interrupt handling */
int error_code;
static int lm_capable_kernel;
static bool has_msr_hv_hypercall;
static bool has_msr_hv_vapic;
+static bool has_msr_hv_tsc;
static bool has_msr_architectural_pmu;
static uint32_t num_architectural_pmu_counters;
CPUState *cs = CPU(cpu);
return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
(hyperv_hypercall_available(cpu) ||
+ cpu->hyperv_time ||
cpu->hyperv_relaxed_timing);
}
c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
has_msr_hv_vapic = true;
}
-
+ if (cpu->hyperv_time &&
+ kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
+ c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
+ c->eax |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
+ c->eax |= 0x200;
+ has_msr_hv_tsc = true;
+ }
c = &cpuid_data.entries[cpuid_i++];
c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
if (cpu->hyperv_relaxed_timing) {
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE,
env->msr_hv_vapic);
}
+ if (has_msr_hv_tsc) {
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_REFERENCE_TSC,
+ env->msr_hv_tsc);
+ }
/* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
* kvm_put_msr_feature_control. */
if (has_msr_hv_vapic) {
msrs[n++].index = HV_X64_MSR_APIC_ASSIST_PAGE;
}
+ if (has_msr_hv_tsc) {
+ msrs[n++].index = HV_X64_MSR_REFERENCE_TSC;
+ }
msr_data.info.nmsrs = n;
ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
case HV_X64_MSR_APIC_ASSIST_PAGE:
env->msr_hv_vapic = msrs[i].data;
break;
+ case HV_X64_MSR_REFERENCE_TSC:
+ env->msr_hv_tsc = msrs[i].data;
+ break;
}
}
}
};
+static bool hyperv_time_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return env->msr_hv_tsc != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_time = {
+ .name = "cpu/msr_hyperv_time",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField []) {
+ VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
const VMStateDescription vmstate_x86_cpu = {
.name = "cpu",
.version_id = 12,
}, {
.vmsd = &vmstate_msr_hyperv_vapic,
.needed = hyperv_vapic_enable_needed,
+ }, {
+ .vmsd = &vmstate_msr_hyperv_time,
+ .needed = hyperv_time_enable_needed,
} , {
/* empty */
}