KVM: nVMX: Fix nested VMX TSC emulation
authorNadav Har'El <nyh@il.ibm.com>
Tue, 2 Aug 2011 12:54:52 +0000 (15:54 +0300)
committerAvi Kivity <avi@redhat.com>
Sun, 25 Sep 2011 16:18:02 +0000 (19:18 +0300)
This patch fixes two corner cases in nested (L2) handling of TSC-related
issues:

1. Somewhat suprisingly, according to the Intel spec, if L1 allows WRMSR to
the TSC MSR without an exit, then this should set L1's TSC value itself - not
offset by vmcs12.TSC_OFFSET (like was wrongly done in the previous code).

2. Allow L1 to disable the TSC_OFFSETING control, and then correctly ignore
the vmcs12.TSC_OFFSET.

Signed-off-by: Nadav Har'El <nyh@il.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/vmx.c

index 97b6454..5e8d411 100644 (file)
@@ -1777,15 +1777,23 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
  */
 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
-       vmcs_write64(TSC_OFFSET, offset);
-       if (is_guest_mode(vcpu))
+       if (is_guest_mode(vcpu)) {
                /*
-                * We're here if L1 chose not to trap the TSC MSR. Since
-                * prepare_vmcs12() does not copy tsc_offset, we need to also
-                * set the vmcs12 field here.
+                * We're here if L1 chose not to trap WRMSR to TSC. According
+                * to the spec, this should set L1's TSC; The offset that L1
+                * set for L2 remains unchanged, and still needs to be added
+                * to the newly set TSC to get L2's TSC.
                 */
-               get_vmcs12(vcpu)->tsc_offset = offset -
-                       to_vmx(vcpu)->nested.vmcs01_tsc_offset;
+               struct vmcs12 *vmcs12;
+               to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
+               /* recalculate vmcs02.TSC_OFFSET: */
+               vmcs12 = get_vmcs12(vcpu);
+               vmcs_write64(TSC_OFFSET, offset +
+                       (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
+                        vmcs12->tsc_offset : 0));
+       } else {
+               vmcs_write64(TSC_OFFSET, offset);
+       }
 }
 
 static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
@@ -6485,8 +6493,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
 
        set_cr4_guest_host_mask(vmx);
 
-       vmcs_write64(TSC_OFFSET,
-               vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
+       if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
+               vmcs_write64(TSC_OFFSET,
+                       vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
+       else
+               vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
 
        if (enable_vpid) {
                /*
@@ -6893,7 +6904,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
 
        load_vmcs12_host_state(vcpu, vmcs12);
 
-       /* Update TSC_OFFSET if vmx_adjust_tsc_offset() was used while L2 ran */
+       /* Update TSC_OFFSET if TSC was changed while L2 ran */
        vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
 
        /* This is needed for same reason as it was needed in prepare_vmcs02 */