KVM: vmx/pmu: Reduce the overhead of LBR pass-through or cancellation
authorLike Xu <like.xu@linux.intel.com>
Mon, 1 Feb 2021 05:10:35 +0000 (13:10 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 4 Feb 2021 10:27:25 +0000 (05:27 -0500)
When the LBR records msrs has already been pass-through, there is no
need to call vmx_update_intercept_for_lbr_msrs() again and again, and
vice versa.

Signed-off-by: Like Xu <like.xu@linux.intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Message-Id: <20210201051039.255478-8-like.xu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.h

index c7dbaac..254d9fc 100644 (file)
@@ -557,6 +557,7 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
        vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
        lbr_desc->records.nr = 0;
        lbr_desc->event = NULL;
+       lbr_desc->msr_passthrough = false;
 }
 
 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
@@ -603,12 +604,24 @@ static void vmx_update_intercept_for_lbr_msrs(struct kvm_vcpu *vcpu, bool set)
 
 static inline void vmx_disable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
 {
+       struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+
+       if (!lbr_desc->msr_passthrough)
+               return;
+
        vmx_update_intercept_for_lbr_msrs(vcpu, true);
+       lbr_desc->msr_passthrough = false;
 }
 
 static inline void vmx_enable_lbr_msrs_passthrough(struct kvm_vcpu *vcpu)
 {
+       struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
+
+       if (lbr_desc->msr_passthrough)
+               return;
+
        vmx_update_intercept_for_lbr_msrs(vcpu, false);
+       lbr_desc->msr_passthrough = true;
 }
 
 /*
index 41bf9ad..b5679d1 100644 (file)
@@ -113,6 +113,9 @@ struct lbr_desc {
         * The records may be inaccurate if the host reclaims the LBR.
         */
        struct perf_event *event;
+
+       /* True if LBRs are marked as not intercepted in the MSR bitmap */
+       bool msr_passthrough;
 };
 
 /*