KVM: nVMX: Use kvm_vcpu_map() to get/pin vmcs12's APIC-access page
authorSean Christopherson <seanjc@google.com>
Fri, 29 Apr 2022 01:04:11 +0000 (01:04 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 20 Jun 2022 10:21:33 +0000 (06:21 -0400)
Use kvm_vcpu_map() to get/pin the backing for vmcs12's APIC-access page,
there's no reason it has to be restricted to 'struct page' backing.  The
APIC-access page actually doesn't need to be backed by anything, which is
ironically why it got left behind by the series which introduced
kvm_vcpu_map()[1]; the plan was to shove a dummy pfn into vmcs02[2], but
that code never got merged.

Switching the APIC-access page to kvm_vcpu_map() doesn't preclude using a
magic pfn in the future, and will allow a future patch to drop
kvm_vcpu_gpa_to_page().

[1] https://lore.kernel.org/all/1547026933-31226-1-git-send-email-karahmed@amazon.de
[2] https://lore.kernel.org/lkml/1543845551-4403-1-git-send-email-karahmed@amazon.de

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220429010416.2788472-6-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.h

index aad938e..778f820 100644 (file)
@@ -311,11 +311,12 @@ static void free_nested(struct kvm_vcpu *vcpu)
        vmx->nested.cached_vmcs12 = NULL;
        kfree(vmx->nested.cached_shadow_vmcs12);
        vmx->nested.cached_shadow_vmcs12 = NULL;
-       /* Unpin physical memory we referred to in the vmcs02 */
-       if (vmx->nested.apic_access_page) {
-               kvm_release_page_clean(vmx->nested.apic_access_page);
-               vmx->nested.apic_access_page = NULL;
-       }
+       /*
+        * Unpin physical memory we referred to in the vmcs02.  The APIC access
+        * page's backing page (yeah, confusing) shouldn't actually be accessed,
+        * and if it is written, the contents are irrelevant.
+        */
+       kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false);
        kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
        kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
        vmx->nested.pi_desc = NULL;
@@ -3164,8 +3165,6 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct kvm_host_map *map;
-       struct page *page;
-       u64 hpa;
 
        if (!vcpu->arch.pdptrs_from_userspace &&
            !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
@@ -3180,23 +3179,12 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
 
 
        if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
-               /*
-                * Translate L1 physical address to host physical
-                * address for vmcs02. Keep the page pinned, so this
-                * physical address remains valid. We keep a reference
-                * to it so we can release it later.
-                */
-               if (vmx->nested.apic_access_page) { /* shouldn't happen */
-                       kvm_release_page_clean(vmx->nested.apic_access_page);
-                       vmx->nested.apic_access_page = NULL;
-               }
-               page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
-               if (!is_error_page(page)) {
-                       vmx->nested.apic_access_page = page;
-                       hpa = page_to_phys(vmx->nested.apic_access_page);
-                       vmcs_write64(APIC_ACCESS_ADDR, hpa);
+               map = &vmx->nested.apic_access_page_map;
+
+               if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->apic_access_addr), map)) {
+                       vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(map->pfn));
                } else {
-                       pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
+                       pr_debug_ratelimited("%s: no backing for APIC-access address in vmcs12\n",
                                             __func__);
                        vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                        vcpu->run->internal.suberror =
@@ -4632,10 +4620,7 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
        }
 
        /* Unpin physical memory we referred to in vmcs02 */
-       if (vmx->nested.apic_access_page) {
-               kvm_release_page_clean(vmx->nested.apic_access_page);
-               vmx->nested.apic_access_page = NULL;
-       }
+       kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false);
        kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
        kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
        vmx->nested.pi_desc = NULL;
index a84c91e..286c88e 100644 (file)
@@ -204,7 +204,7 @@ struct nested_vmx {
         * Guest pages referred to in the vmcs02 with host-physical
         * pointers, so we must keep them pinned while L2 runs.
         */
-       struct page *apic_access_page;
+       struct kvm_host_map apic_access_page_map;
        struct kvm_host_map virtual_apic_map;
        struct kvm_host_map pi_desc_map;