KVM: x86/mmu: Handle no-slot faults in kvm_faultin_pfn()
authorDavid Matlack <dmatlack@google.com>
Wed, 21 Sep 2022 17:35:42 +0000 (10:35 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 27 Dec 2022 11:02:59 +0000 (06:02 -0500)
Handle faults on GFNs that do not have a backing memslot in
kvm_faultin_pfn() and drop handle_abnormal_pfn(). This eliminates
duplicate code in the various page fault handlers.

Opportunistically tweak the comment about handling gfn > host.MAXPHYADDR
to reflect that the effect of returning RET_PF_EMULATE at that point is
to avoid creating an MMIO SPTE for such GFNs.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20220921173546.2674386-7-dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h

index b5f9f07554540770273257081154893dcf1f4833..e2e8c4dfbaa5110ddf7d88c018bdef3919993d8c 100644 (file)
@@ -3218,28 +3218,32 @@ static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fa
        return -EFAULT;
 }
 
-static int handle_abnormal_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
-                              unsigned int access)
+static int kvm_handle_noslot_fault(struct kvm_vcpu *vcpu,
+                                  struct kvm_page_fault *fault,
+                                  unsigned int access)
 {
-       if (unlikely(!fault->slot)) {
-               gva_t gva = fault->is_tdp ? 0 : fault->addr;
+       gva_t gva = fault->is_tdp ? 0 : fault->addr;
 
-               vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
-                                    access & shadow_mmio_access_mask);
-               /*
-                * If MMIO caching is disabled, emulate immediately without
-                * touching the shadow page tables as attempting to install an
-                * MMIO SPTE will just be an expensive nop.  Do not cache MMIO
-                * whose gfn is greater than host.MAXPHYADDR, any guest that
-                * generates such gfns is running nested and is being tricked
-                * by L0 userspace (you can observe gfn > L1.MAXPHYADDR if
-                * and only if L1's MAXPHYADDR is inaccurate with respect to
-                * the hardware's).
-                */
-               if (unlikely(!enable_mmio_caching) ||
-                   unlikely(fault->gfn > kvm_mmu_max_gfn()))
-                       return RET_PF_EMULATE;
-       }
+       vcpu_cache_mmio_info(vcpu, gva, fault->gfn,
+                            access & shadow_mmio_access_mask);
+
+       /*
+        * If MMIO caching is disabled, emulate immediately without
+        * touching the shadow page tables as attempting to install an
+        * MMIO SPTE will just be an expensive nop.
+        */
+       if (unlikely(!enable_mmio_caching))
+               return RET_PF_EMULATE;
+
+       /*
+        * Do not create an MMIO SPTE for a gfn greater than host.MAXPHYADDR,
+        * any guest that generates such gfns is running nested and is being
+        * tricked by L0 userspace (you can observe gfn > L1.MAXPHYADDR if and
+        * only if L1's MAXPHYADDR is inaccurate with respect to the
+        * hardware's).
+        */
+       if (unlikely(fault->gfn > kvm_mmu_max_gfn()))
+               return RET_PF_EMULATE;
 
        return RET_PF_CONTINUE;
 }
@@ -4248,7 +4252,8 @@ static int __kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        return RET_PF_CONTINUE;
 }
 
-static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
+static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault,
+                          unsigned int access)
 {
        int ret;
 
@@ -4262,6 +4267,9 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
        if (unlikely(is_error_pfn(fault->pfn)))
                return kvm_handle_error_pfn(vcpu, fault);
 
+       if (unlikely(!fault->slot))
+               return kvm_handle_noslot_fault(vcpu, fault, access);
+
        return RET_PF_CONTINUE;
 }
 
@@ -4312,11 +4320,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        if (r)
                return r;
 
-       r = kvm_faultin_pfn(vcpu, fault);
-       if (r != RET_PF_CONTINUE)
-               return r;
-
-       r = handle_abnormal_pfn(vcpu, fault, ACC_ALL);
+       r = kvm_faultin_pfn(vcpu, fault, ACC_ALL);
        if (r != RET_PF_CONTINUE)
                return r;
 
index 88acf232494ba84a6dc2f7310883025068dd1d80..e5662dbd519c4ed72bcdcce574100797762bd44f 100644 (file)
@@ -837,11 +837,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
        else
                fault->max_level = walker.level;
 
-       r = kvm_faultin_pfn(vcpu, fault);
-       if (r != RET_PF_CONTINUE)
-               return r;
-
-       r = handle_abnormal_pfn(vcpu, fault, walker.pte_access);
+       r = kvm_faultin_pfn(vcpu, fault, walker.pte_access);
        if (r != RET_PF_CONTINUE)
                return r;