KVM: SVM: Drop vcpu_svm.vmcb_pa
authorSean Christopherson <seanjc@google.com>
Tue, 6 Apr 2021 17:18:09 +0000 (10:18 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 20 Apr 2021 08:18:49 +0000 (04:18 -0400)
Remove vmcb_pa from vcpu_svm and simply read current_vmcb->pa directly in
the one path where it is consumed.  Unlike svm->vmcb, use of the current
vmcb's address is very limited, as evidenced by the fact that its use
can be trimmed to a single dereference.

Opportunistically add a comment about using vmcb01 for VMLOAD/VMSAVE, at
first glance using vmcb01 instead of vmcb_pa looks wrong.

No functional change intended.

Cc: Maxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210406171811.4043363-3-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/svm.c
arch/x86/kvm/svm/svm.h

index 04ef49d..1207526 100644 (file)
@@ -1310,7 +1310,6 @@ void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
 {
        svm->current_vmcb = target_vmcb;
        svm->vmcb = target_vmcb->ptr;
-       svm->vmcb_pa = target_vmcb->pa;
 }
 
 static int svm_create_vcpu(struct kvm_vcpu *vcpu)
@@ -3704,6 +3703,7 @@ static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
 static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
+       unsigned long vmcb_pa = svm->current_vmcb->pa;
 
        /*
         * VMENTER enables interrupts (host state), but the kernel state is
@@ -3726,12 +3726,18 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
        lockdep_hardirqs_on(CALLER_ADDR0);
 
        if (sev_es_guest(vcpu->kvm)) {
-               __svm_sev_es_vcpu_run(svm->vmcb_pa);
+               __svm_sev_es_vcpu_run(vmcb_pa);
        } else {
                struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
 
+               /*
+                * Use a single vmcb (vmcb01 because it's always valid) for
+                * context switching guest state via VMLOAD/VMSAVE, that way
+                * the state doesn't need to be copied between vmcb01 and
+                * vmcb02 when switching vmcbs for nested virtualization.
+                */
                vmload(svm->vmcb01.pa);
-               __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&vcpu->arch.regs);
+               __svm_vcpu_run(vmcb_pa, (unsigned long *)&vcpu->arch.regs);
                vmsave(svm->vmcb01.pa);
 
                vmload(__sme_page_pa(sd->save_area));
index fffdd5f..04e21ff 100644 (file)
@@ -111,7 +111,6 @@ struct svm_nested_state {
 struct vcpu_svm {
        struct kvm_vcpu vcpu;
        struct vmcb *vmcb;
-       unsigned long vmcb_pa;
        struct kvm_vmcb_info vmcb01;
        struct kvm_vmcb_info *current_vmcb;
        struct svm_cpu_data *svm_data;