KVM: x86: Protect MSR-based index computations in pmu.h from Spectre-v1/L1TF attacks
authorMarios Pomonis <pomonis@google.com>
Wed, 11 Dec 2019 20:47:48 +0000 (12:47 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 14 Feb 2020 21:31:04 +0000 (16:31 -0500)
commit 13c5183a4e643cc2b03a22d0e582c8e17bb7457d upstream.

This fixes a Spectre-v1/L1TF vulnerability in the get_gp_pmc() and
get_fixed_pmc() functions.
They both contain index computations based on the (attacker-controlled)
MSR number.

Fixes: 25462f7f5295 ("KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch")

Signed-off-by: Nick Finco <nifi@google.com>
Signed-off-by: Marios Pomonis <pomonis@google.com>
Reviewed-by: Andrew Honig <ahonig@google.com>
Cc: stable@vger.kernel.org
Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kvm/pmu.h

index f96e1f9..fbf3d25 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef __KVM_X86_PMU_H
 #define __KVM_X86_PMU_H
 
+#include <linux/nospec.h>
+
 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
 #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
 #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
@@ -80,8 +82,12 @@ static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
                                         u32 base)
 {
-       if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
-               return &pmu->gp_counters[msr - base];
+       if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
+               u32 index = array_index_nospec(msr - base,
+                                              pmu->nr_arch_gp_counters);
+
+               return &pmu->gp_counters[index];
+       }
 
        return NULL;
 }
@@ -91,8 +97,12 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
 {
        int base = MSR_CORE_PERF_FIXED_CTR0;
 
-       if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
-               return &pmu->fixed_counters[msr - base];
+       if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
+               u32 index = array_index_nospec(msr - base,
+                                              pmu->nr_arch_fixed_counters);
+
+               return &pmu->fixed_counters[index];
+       }
 
        return NULL;
 }