KVM: x86/mmu: Avoid memslot lookup during KVM_PFN_ERR_HWPOISON handling
authorDavid Matlack <dmatlack@google.com>
Wed, 21 Sep 2022 17:35:41 +0000 (10:35 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 29 Dec 2022 20:33:20 +0000 (15:33 -0500)
Pass the kvm_page_fault struct down to kvm_handle_error_pfn() to avoid a
memslot lookup when handling KVM_PFN_ERR_HWPOISON. Opportunistically
move the gfn_to_hva_memslot() call and @current down into
kvm_send_hwpoison_signal() to cut down on line lengths.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
Reviewed-by: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20220921173546.2674386-6-dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index d8a256f..b5f9f07 100644 (file)
@@ -3188,14 +3188,16 @@ static int __direct_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
        return ret;
 }
 
-static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
+static void kvm_send_hwpoison_signal(struct kvm_memory_slot *slot, gfn_t gfn)
 {
-       send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, PAGE_SHIFT, tsk);
+       unsigned long hva = gfn_to_hva_memslot(slot, gfn);
+
+       send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva, PAGE_SHIFT, current);
 }
 
-static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
+static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
 {
-       if (is_sigpending_pfn(pfn)) {
+       if (is_sigpending_pfn(fault->pfn)) {
                kvm_handle_signal_exit(vcpu);
                return -EINTR;
        }
@@ -3205,11 +3207,11 @@ static int kvm_handle_error_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
         * into the spte otherwise read access on readonly gfn also can
         * caused mmio page fault and treat it as mmio access.
         */
-       if (pfn == KVM_PFN_ERR_RO_FAULT)
+       if (fault->pfn == KVM_PFN_ERR_RO_FAULT)
                return RET_PF_EMULATE;
 
-       if (pfn == KVM_PFN_ERR_HWPOISON) {
-               kvm_send_hwpoison_signal(kvm_vcpu_gfn_to_hva(vcpu, gfn), current);
+       if (fault->pfn == KVM_PFN_ERR_HWPOISON) {
+               kvm_send_hwpoison_signal(fault->slot, fault->gfn);
                return RET_PF_RETRY;
        }
 
@@ -4258,7 +4260,7 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
                return ret;
 
        if (unlikely(is_error_pfn(fault->pfn)))
-               return kvm_handle_error_pfn(vcpu, fault->gfn, fault->pfn);
+               return kvm_handle_error_pfn(vcpu, fault);
 
        return RET_PF_CONTINUE;
 }