KVM: x86/mmu: Collapse 32-bit PAE and 64-bit statements for helpers
authorSean Christopherson <seanjc@google.com>
Tue, 22 Jun 2021 17:57:30 +0000 (10:57 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 24 Jun 2021 22:00:46 +0000 (18:00 -0400)
Skip paging32E_init_context() and paging64_init_context_common() and go
directly to paging64_init_context() (was the common version) now that
the relevant flows don't need to distinguish between 64-bit PAE and
32-bit PAE for other reasons.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210622175739.3610207-46-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index 8cf0c1a..08ac4e4 100644 (file)
@@ -4531,9 +4531,8 @@ static void reset_guest_paging_metadata(struct kvm_vcpu *vcpu,
        update_last_nonleaf_level(mmu);
 }
 
-static void paging64_init_context_common(struct kvm_mmu *context)
+static void paging64_init_context(struct kvm_mmu *context)
 {
-       WARN_ON_ONCE(!is_cr4_pae(context));
        context->page_fault = paging64_page_fault;
        context->gva_to_gpa = paging64_gva_to_gpa;
        context->sync_page = paging64_sync_page;
@@ -4541,11 +4540,6 @@ static void paging64_init_context_common(struct kvm_mmu *context)
        context->direct_map = false;
 }
 
-static void paging64_init_context(struct kvm_mmu *context)
-{
-       paging64_init_context_common(context);
-}
-
 static void paging32_init_context(struct kvm_mmu *context)
 {
        context->page_fault = paging32_page_fault;
@@ -4555,11 +4549,6 @@ static void paging32_init_context(struct kvm_mmu *context)
        context->direct_map = false;
 }
 
-static void paging32E_init_context(struct kvm_mmu *context)
-{
-       paging64_init_context_common(context);
-}
-
 static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
                                                         struct kvm_mmu_role_regs *regs)
 {
@@ -4650,8 +4639,6 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
 
        if (!is_paging(vcpu))
                context->gva_to_gpa = nonpaging_gva_to_gpa;
-       else if (is_long_mode(vcpu))
-               context->gva_to_gpa = paging64_gva_to_gpa;
        else if (is_pae(vcpu))
                context->gva_to_gpa = paging64_gva_to_gpa;
        else
@@ -4704,10 +4691,8 @@ static void shadow_mmu_init_context(struct kvm_vcpu *vcpu, struct kvm_mmu *conte
 
        if (!____is_cr0_pg(regs))
                nonpaging_init_context(context);
-       else if (____is_efer_lma(regs))
-               paging64_init_context(context);
        else if (____is_cr4_pae(regs))
-               paging32E_init_context(context);
+               paging64_init_context(context);
        else
                paging32_init_context(context);
        context->root_level = role_regs_to_root_level(regs);