KVM: x86: Move .pmu_ops to kvm_x86_init_ops and tag as __initdata
authorLike Xu <likexu@tencent.com>
Tue, 29 Mar 2022 23:50:53 +0000 (23:50 +0000)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 13 Apr 2022 17:37:45 +0000 (13:37 -0400)
The pmu_ops should be moved to kvm_x86_init_ops and tagged as __initdata.
That'll save those precious few bytes, and more importantly make
the original ops unreachable, i.e. make it harder to sneak in post-init
modification bugs.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Like Xu <likexu@tencent.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220329235054.3534728-4-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index 25d1aac..e1c695f 100644 (file)
@@ -1465,8 +1465,6 @@ struct kvm_x86_ops {
        int cpu_dirty_log_size;
        void (*update_cpu_dirty_logging)(struct kvm_vcpu *vcpu);
 
-       /* pmu operations of sub-arch */
-       const struct kvm_pmu_ops *pmu_ops;
        const struct kvm_x86_nested_ops *nested_ops;
 
        void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
@@ -1542,6 +1540,7 @@ struct kvm_x86_init_ops {
        unsigned int (*handle_intel_pt_intr)(void);
 
        struct kvm_x86_ops *runtime_ops;
+       struct kvm_pmu_ops *pmu_ops;
 };
 
 struct kvm_arch_async_pf {
index 24eb935..57ab473 100644 (file)
@@ -319,7 +319,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
        }
 }
 
-struct kvm_pmu_ops amd_pmu_ops = {
+struct kvm_pmu_ops amd_pmu_ops __initdata = {
        .pmc_perf_hw_id = amd_pmc_perf_hw_id,
        .pmc_is_enabled = amd_pmc_is_enabled,
        .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
index 95b26db..22bbd69 100644 (file)
@@ -4694,7 +4694,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 
        .sched_in = svm_sched_in,
 
-       .pmu_ops = &amd_pmu_ops,
        .nested_ops = &svm_nested_ops,
 
        .deliver_interrupt = svm_deliver_interrupt,
@@ -4988,6 +4987,7 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
        .check_processor_compatibility = svm_check_processor_compat,
 
        .runtime_ops = &svm_x86_ops,
+       .pmu_ops = &amd_pmu_ops,
 };
 
 static int __init svm_init(void)
index bc3f851..9db6623 100644 (file)
@@ -723,7 +723,7 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
                intel_pmu_release_guest_lbr_event(vcpu);
 }
 
-struct kvm_pmu_ops intel_pmu_ops = {
+struct kvm_pmu_ops intel_pmu_ops __initdata = {
        .pmc_perf_hw_id = intel_pmc_perf_hw_id,
        .pmc_is_enabled = intel_pmc_is_enabled,
        .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
index fb0b0fa..df0b70c 100644 (file)
@@ -7816,7 +7816,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
        .cpu_dirty_log_size = PML_ENTITY_NUM,
        .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
 
-       .pmu_ops = &intel_pmu_ops,
        .nested_ops = &vmx_nested_ops,
 
        .pi_update_irte = vmx_pi_update_irte,
@@ -8070,6 +8069,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = {
        .handle_intel_pt_intr = NULL,
 
        .runtime_ops = &vmx_x86_ops,
+       .pmu_ops = &intel_pmu_ops,
 };
 
 static void vmx_cleanup_l1d_flush(void)
index 65be0de..fb8153d 100644 (file)
@@ -11633,7 +11633,7 @@ static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
 #include <asm/kvm-x86-ops.h>
 #undef __KVM_X86_OP
 
-       kvm_pmu_ops_update(ops->runtime_ops->pmu_ops);
+       kvm_pmu_ops_update(ops->pmu_ops);
 }
 
 int kvm_arch_hardware_setup(void *opaque)