Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 9 Jun 2021 20:09:57 +0000 (13:09 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 9 Jun 2021 20:09:57 +0000 (13:09 -0700)
Pull kvm fixes from Paolo Bonzini:
 "Bugfixes, including a TLB flush fix that affects processors without
  nested page tables"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  kvm: fix previous commit for 32-bit builds
  kvm: avoid speculation-based attacks from out-of-range memslot accesses
  KVM: x86: Unload MMU on guest TLB flush if TDP disabled to force MMU sync
  KVM: x86: Ensure liveliness of nested VM-Enter fail tracepoint message
  selftests: kvm: Add support for customized slot0 memory size
  KVM: selftests: introduce P47V64 for s390x
  KVM: x86: Ensure PV TLB flush tracepoint reflects KVM behavior
  KVM: X86: MMU: Use the correct inherited permissions to get shadow page
  KVM: LAPIC: Write 0 to TMICT should also cancel vmx-preemption timer
  KVM: SVM: Fix SEV SEND_START session length & SEND_UPDATE_DATA query length after commit 238eca821cee

1  2 
arch/x86/kvm/x86.c

diff --combined arch/x86/kvm/x86.c
@@@ -3072,6 -3072,19 +3072,19 @@@ static void kvm_vcpu_flush_tlb_all(stru
  static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
  {
        ++vcpu->stat.tlb_flush;
+       if (!tdp_enabled) {
+                /*
+                * A TLB flush on behalf of the guest is equivalent to
+                * INVPCID(all), toggling CR4.PGE, etc., which requires
+                * a forced sync of the shadow page tables.  Unload the
+                * entire MMU here and the subsequent load will sync the
+                * shadow page tables, and also flush the TLB.
+                */
+               kvm_mmu_unload(vcpu);
+               return;
+       }
        static_call(kvm_x86_tlb_flush_guest)(vcpu);
  }
  
@@@ -3101,9 -3114,11 +3114,11 @@@ static void record_steal_time(struct kv
         * expensive IPIs.
         */
        if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
+               u8 st_preempted = xchg(&st->preempted, 0);
                trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
-                                      st->preempted & KVM_VCPU_FLUSH_TLB);
-               if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
+                                      st_preempted & KVM_VCPU_FLUSH_TLB);
+               if (st_preempted & KVM_VCPU_FLUSH_TLB)
                        kvm_vcpu_flush_tlb_guest(vcpu);
        } else {
                st->preempted = 0;
@@@ -3470,7 -3485,7 +3485,7 @@@ int kvm_get_msr_common(struct kvm_vcpu 
        case MSR_IA32_LASTBRANCHTOIP:
        case MSR_IA32_LASTINTFROMIP:
        case MSR_IA32_LASTINTTOIP:
 -      case MSR_K8_SYSCFG:
 +      case MSR_AMD64_SYSCFG:
        case MSR_K8_TSEG_ADDR:
        case MSR_K8_TSEG_MASK:
        case MSR_VM_HSAVE_PA: