KVM: x86/mmu: Rename kvm_mmu->get_cr3() to ->get_guest_pgd()
authorSean Christopherson <sean.j.christopherson@intel.com>
Tue, 3 Mar 2020 02:02:39 +0000 (18:02 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 16 Mar 2020 16:57:46 +0000 (17:57 +0100)
Rename kvm_mmu->get_cr3() to call out that it is retrieving a guest
value, as opposed to kvm_mmu->set_cr3(), which sets a host value, and to
note that it will return something other than CR3 when nested EPT is in
use.  Hopefully the new name will also make it more obvious that L1's
nested_cr3 is returned in SVM's nested NPT case.

No functional change intended.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/x86.c

index 327cfce..316ec6c 100644 (file)
@@ -385,7 +385,7 @@ struct kvm_mmu_root_info {
  */
 struct kvm_mmu {
        void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
-       unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
+       unsigned long (*get_guest_pgd)(struct kvm_vcpu *vcpu);
        u64 (*get_pdptr)(struct kvm_vcpu *vcpu, int index);
        int (*page_fault)(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u32 err,
                          bool prefault);
index a214e10..a1f4e32 100644 (file)
@@ -3733,7 +3733,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
        gfn_t root_gfn, root_cr3;
        int i;
 
-       root_cr3 = vcpu->arch.mmu->get_cr3(vcpu);
+       root_cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
        root_gfn = root_cr3 >> PAGE_SHIFT;
 
        if (mmu_check_root(vcpu, root_gfn))
@@ -4070,7 +4070,7 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
        arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
        arch.gfn = gfn;
        arch.direct_map = vcpu->arch.mmu->direct_map;
-       arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);
+       arch.cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
 
        return kvm_setup_async_pf(vcpu, cr2_or_gpa,
                                  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
@@ -4929,7 +4929,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
        context->shadow_root_level = kvm_x86_ops->get_tdp_level(vcpu);
        context->direct_map = true;
        context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
-       context->get_cr3 = get_cr3;
+       context->get_guest_pgd = get_cr3;
        context->get_pdptr = kvm_pdptr_read;
        context->inject_page_fault = kvm_inject_page_fault;
 
@@ -5076,7 +5076,7 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
 
        kvm_init_shadow_mmu(vcpu);
        context->set_cr3           = kvm_x86_ops->set_cr3;
-       context->get_cr3           = get_cr3;
+       context->get_guest_pgd     = get_cr3;
        context->get_pdptr         = kvm_pdptr_read;
        context->inject_page_fault = kvm_inject_page_fault;
 }
@@ -5090,7 +5090,7 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
                return;
 
        g_context->mmu_role.as_u64 = new_role.as_u64;
-       g_context->get_cr3           = get_cr3;
+       g_context->get_guest_pgd     = get_cr3;
        g_context->get_pdptr         = kvm_pdptr_read;
        g_context->inject_page_fault = kvm_inject_page_fault;
 
index 6b15b58..1ddbfff 100644 (file)
@@ -333,7 +333,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
        trace_kvm_mmu_pagetable_walk(addr, access);
 retry_walk:
        walker->level = mmu->root_level;
-       pte           = mmu->get_cr3(vcpu);
+       pte           = mmu->get_guest_pgd(vcpu);
        have_ad       = PT_HAVE_ACCESSED_DIRTY(mmu);
 
 #if PTTYPE == 64
index 0d41727..48c9390 100644 (file)
@@ -3012,7 +3012,7 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
        vcpu->arch.mmu = &vcpu->arch.guest_mmu;
        kvm_init_shadow_mmu(vcpu);
        vcpu->arch.mmu->set_cr3           = nested_svm_set_tdp_cr3;
-       vcpu->arch.mmu->get_cr3           = nested_svm_get_tdp_cr3;
+       vcpu->arch.mmu->get_guest_pgd     = nested_svm_get_tdp_cr3;
        vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
        vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
        vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu);
index c1eae44..b6719d7 100644 (file)
@@ -355,7 +355,7 @@ static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
                        nested_ept_ad_enabled(vcpu),
                        nested_ept_get_eptp(vcpu));
        vcpu->arch.mmu->set_cr3           = vmx_set_cr3;
-       vcpu->arch.mmu->get_cr3           = nested_ept_get_eptp;
+       vcpu->arch.mmu->get_guest_pgd     = nested_ept_get_eptp;
        vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
        vcpu->arch.mmu->get_pdptr         = kvm_pdptr_read;
 
index b15092c..ba4d476 100644 (file)
@@ -10165,7 +10165,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
                return;
 
        if (!vcpu->arch.mmu->direct_map &&
-             work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
+             work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
                return;
 
        kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);