kvm: add support for hyper-v timers
authorVadim Rozenfeld <vrozenfe@redhat.com>
Thu, 23 Jan 2014 13:40:49 +0000 (00:40 +1100)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 3 Feb 2014 16:33:55 +0000 (17:33 +0100)
http://msdn.microsoft.com/en-us/library/windows/hardware/ff541625%28v=vs.85%29.aspx

This code is generic for activating reference time counter or virtual reference time stamp counter

Signed-off-by: Vadim Rozenfeld <vrozenfe@redhat.com>
Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
linux-headers/asm-x86/hyperv.h
linux-headers/linux/kvm.h
target-i386/cpu-qom.h
target-i386/cpu.c
target-i386/cpu.h
target-i386/kvm.c
target-i386/machine.c

index b8f1c0176cbc2eb43ea4d43740d6186c018f7f9e..3b400ee9f7349d41e47a6b403fdc17689d444d35 100644 (file)
 /* MSR used to read the per-partition time reference counter */
 #define HV_X64_MSR_TIME_REF_COUNT              0x40000020
 
+/* A partition's reference time stamp counter (TSC) page */
+#define HV_X64_MSR_REFERENCE_TSC               0x40000021
+
 /* MSR used to retrieve the TSC frequency */
 #define HV_X64_MSR_TSC_FREQUENCY               0x40000022
 
index 5a496718458c4306adbc971547c14c0b37b1cf0d..999fb135e1e294bee32f26136d99a94c6269d222 100644 (file)
@@ -674,6 +674,7 @@ struct kvm_ppc_smmu_info {
 #define KVM_CAP_ARM_EL1_32BIT 93
 #define KVM_CAP_SPAPR_MULTITCE 94
 #define KVM_CAP_EXT_EMUL_CPUID 95
+#define KVM_CAP_HYPERV_TIME 96
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index d1751a40c6232ecbafd5a18bfe39721895d6912e..722f11a04f5c152617e11b67fa62752e35b64059 100644 (file)
@@ -69,6 +69,7 @@ typedef struct X86CPU {
     bool hyperv_vapic;
     bool hyperv_relaxed_timing;
     int hyperv_spinlock_attempts;
+    bool hyperv_time;
     bool check_cpuid;
     bool enforce_cpuid;
 
index 2e0be01421d969d7d7e94b1c5a526c0506912649..1f30efdb63de9810ab8aef03079b2b937c7940be 100644 (file)
@@ -2702,6 +2702,7 @@ static Property x86_cpu_properties[] = {
     { .name  = "hv-spinlocks", .info  = &qdev_prop_spinlocks },
     DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
     DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
+    DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
     DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
     DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
     DEFINE_PROP_END_OF_LIST()
index 45bd554f31a81d7f4020d7dae0ed21090a4e5655..1b94f0ffb71c2b89cfcb0f730faab10e6af773a2 100644 (file)
@@ -865,6 +865,7 @@ typedef struct CPUX86State {
     uint64_t msr_hv_hypercall;
     uint64_t msr_hv_guest_os_id;
     uint64_t msr_hv_vapic;
+    uint64_t msr_hv_tsc;
 
     /* exception/interrupt handling */
     int error_code;
index ddd437f43cf961c28914ee451701f94e1e91e4d8..e555040a97121e1b54765047c02244dd35cbed63 100644 (file)
@@ -74,6 +74,7 @@ static bool has_msr_kvm_steal_time;
 static int lm_capable_kernel;
 static bool has_msr_hv_hypercall;
 static bool has_msr_hv_vapic;
+static bool has_msr_hv_tsc;
 
 static bool has_msr_architectural_pmu;
 static uint32_t num_architectural_pmu_counters;
@@ -442,6 +443,7 @@ static bool hyperv_enabled(X86CPU *cpu)
     CPUState *cs = CPU(cpu);
     return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
            (hyperv_hypercall_available(cpu) ||
+            cpu->hyperv_time  ||
             cpu->hyperv_relaxed_timing);
 }
 
@@ -499,7 +501,13 @@ int kvm_arch_init_vcpu(CPUState *cs)
             c->eax |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
             has_msr_hv_vapic = true;
         }
-
+        if (cpu->hyperv_time &&
+            kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
+            c->eax |= HV_X64_MSR_HYPERCALL_AVAILABLE;
+            c->eax |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
+            c->eax |= 0x200;
+            has_msr_hv_tsc = true;
+        }
         c = &cpuid_data.entries[cpuid_i++];
         c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
         if (cpu->hyperv_relaxed_timing) {
@@ -1239,6 +1247,10 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
             kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_APIC_ASSIST_PAGE,
                               env->msr_hv_vapic);
         }
+        if (has_msr_hv_tsc) {
+            kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_REFERENCE_TSC,
+                              env->msr_hv_tsc);
+        }
 
         /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see
          *       kvm_put_msr_feature_control. */
@@ -1530,6 +1542,9 @@ static int kvm_get_msrs(X86CPU *cpu)
     if (has_msr_hv_vapic) {
         msrs[n++].index = HV_X64_MSR_APIC_ASSIST_PAGE;
     }
+    if (has_msr_hv_tsc) {
+        msrs[n++].index = HV_X64_MSR_REFERENCE_TSC;
+    }
 
     msr_data.info.nmsrs = n;
     ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
@@ -1647,6 +1662,9 @@ static int kvm_get_msrs(X86CPU *cpu)
         case HV_X64_MSR_APIC_ASSIST_PAGE:
             env->msr_hv_vapic = msrs[i].data;
             break;
+        case HV_X64_MSR_REFERENCE_TSC:
+            env->msr_hv_tsc = msrs[i].data;
+            break;
         }
     }
 
index e72e27092f07bbf470742615ae7046fedae6a20b..d548c055a911298e371eba4e2a452db24f00a57b 100644 (file)
@@ -593,6 +593,25 @@ static const VMStateDescription vmstate_msr_hyperv_vapic = {
     }
 };
 
+static bool hyperv_time_enable_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    return env->msr_hv_tsc != 0;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_time = {
+    .name = "cpu/msr_hyperv_time",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_UINT64(env.msr_hv_tsc, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
 const VMStateDescription vmstate_x86_cpu = {
     .name = "cpu",
     .version_id = 12,
@@ -733,6 +752,9 @@ const VMStateDescription vmstate_x86_cpu = {
         }, {
             .vmsd = &vmstate_msr_hyperv_vapic,
             .needed = hyperv_vapic_enable_needed,
+        }, {
+            .vmsd = &vmstate_msr_hyperv_time,
+            .needed = hyperv_time_enable_needed,
         } , {
             /* empty */
         }