x86/KVM/VMX: Extend add_atomic_switch_msr() to allow VMENTER only MSRs
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Thu, 21 Jun 2018 02:01:22 +0000 (22:01 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 15 Aug 2018 16:14:48 +0000 (18:14 +0200)
commit 989e3992d2eca32c3f1404f2bc91acda3aa122d8 upstream

The IA32_FLUSH_CMD MSR needs only to be written on VMENTER. Extend
add_atomic_switch_msr() with an entry_only parameter to allow storing the
MSR only in the guest (ENTRY) MSR array.

Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kvm/vmx.c

index e4ccb93..e868c06 100644 (file)
@@ -2038,9 +2038,9 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
 }
 
 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
-                                 u64 guest_val, u64 host_val)
+                                 u64 guest_val, u64 host_val, bool entry_only)
 {
-       int i, j;
+       int i, j = 0;
        struct msr_autoload *m = &vmx->msr_autoload;
 
        switch (msr) {
@@ -2076,7 +2076,9 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
        }
 
        i = find_msr(&m->guest, msr);
-       j = find_msr(&m->host, msr);
+       if (!entry_only)
+               j = find_msr(&m->host, msr);
+
        if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
                printk_once(KERN_WARNING "Not enough msr switch entries. "
                                "Can't add msr %x\n", msr);
@@ -2086,12 +2088,16 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
                i = m->guest.nr++;
                vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
        }
+       m->guest.val[i].index = msr;
+       m->guest.val[i].value = guest_val;
+
+       if (entry_only)
+               return;
+
        if (j < 0) {
                j = m->host.nr++;
                vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
        }
-       m->guest.val[i].index = msr;
-       m->guest.val[i].value = guest_val;
        m->host.val[j].index = msr;
        m->host.val[j].value = host_val;
 }
@@ -2150,7 +2156,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
                        guest_efer &= ~EFER_LME;
                if (guest_efer != host_efer)
                        add_atomic_switch_msr(vmx, MSR_EFER,
-                                             guest_efer, host_efer);
+                                             guest_efer, host_efer, false);
                return false;
        } else {
                guest_efer &= ~ignore_bits;
@@ -3314,7 +3320,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                vcpu->arch.ia32_xss = data;
                if (vcpu->arch.ia32_xss != host_xss)
                        add_atomic_switch_msr(vmx, MSR_IA32_XSS,
-                               vcpu->arch.ia32_xss, host_xss);
+                               vcpu->arch.ia32_xss, host_xss, false);
                else
                        clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
                break;
@@ -8985,7 +8991,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
                        clear_atomic_switch_msr(vmx, msrs[i].msr);
                else
                        add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
-                                       msrs[i].host);
+                                       msrs[i].host, false);
 }
 
 void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)