KVM: x86/pmu: Use enums instead of hardcoded magic for arch event indices
authorSean Christopherson <seanjc@google.com>
Wed, 7 Jun 2023 01:02:03 +0000 (18:02 -0700)
committerSean Christopherson <seanjc@google.com>
Wed, 2 Aug 2023 23:44:36 +0000 (16:44 -0700)
Add "enum intel_pmu_architectural_events" to replace the magic numbers for
the (pseudo-)architectural events, and to give a meaningful name to each
event so that new readers don't need psychic powers to understand what the
code is doing.

Cc: Aaron Lewis <aaronlewis@google.com>
Cc: Like Xu <like.xu.linux@gmail.com>
Reviewed-by: Like Xu <likexu@tencent.com>
Link: https://lore.kernel.org/r/20230607010206.1425277-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/vmx/pmu_intel.c

index 80c769c..1109cbd 100644 (file)
 
 #define MSR_PMC_FULL_WIDTH_BIT      (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
 
+enum intel_pmu_architectural_events {
+       /*
+        * The order of the architectural events matters as support for each
+        * event is enumerated via CPUID using the index of the event.
+        */
+       INTEL_ARCH_CPU_CYCLES,
+       INTEL_ARCH_INSTRUCTIONS_RETIRED,
+       INTEL_ARCH_REFERENCE_CYCLES,
+       INTEL_ARCH_LLC_REFERENCES,
+       INTEL_ARCH_LLC_MISSES,
+       INTEL_ARCH_BRANCHES_RETIRED,
+       INTEL_ARCH_BRANCHES_MISPREDICTED,
+
+       NR_REAL_INTEL_ARCH_EVENTS,
+
+       /*
+        * Pseudo-architectural event used to implement IA32_FIXED_CTR2, a.k.a.
+        * TSC reference cycles.  The architectural reference cycles event may
+        * or may not actually use the TSC as the reference, e.g. might use the
+        * core crystal clock or the bus clock (yeah, "architectural").
+        */
+       PSEUDO_ARCH_REFERENCE_CYCLES = NR_REAL_INTEL_ARCH_EVENTS,
+       NR_INTEL_ARCH_EVENTS,
+};
+
 static struct {
        u8 eventsel;
        u8 unit_mask;
 } const intel_arch_events[] = {
-       [0] = { 0x3c, 0x00 },
-       [1] = { 0xc0, 0x00 },
-       [2] = { 0x3c, 0x01 },
-       [3] = { 0x2e, 0x4f },
-       [4] = { 0x2e, 0x41 },
-       [5] = { 0xc4, 0x00 },
-       [6] = { 0xc5, 0x00 },
-       /* The above index must match CPUID 0x0A.EBX bit vector */
-       [7] = { 0x00, 0x03 },
+       [INTEL_ARCH_CPU_CYCLES]                 = { 0x3c, 0x00 },
+       [INTEL_ARCH_INSTRUCTIONS_RETIRED]       = { 0xc0, 0x00 },
+       [INTEL_ARCH_REFERENCE_CYCLES]           = { 0x3c, 0x01 },
+       [INTEL_ARCH_LLC_REFERENCES]             = { 0x2e, 0x4f },
+       [INTEL_ARCH_LLC_MISSES]                 = { 0x2e, 0x41 },
+       [INTEL_ARCH_BRANCHES_RETIRED]           = { 0xc4, 0x00 },
+       [INTEL_ARCH_BRANCHES_MISPREDICTED]      = { 0xc5, 0x00 },
+       [PSEUDO_ARCH_REFERENCE_CYCLES]          = { 0x00, 0x03 },
 };
 
 /* mapping between fixed pmc index and intel_arch_events array */
-static int fixed_pmc_events[] = {1, 0, 7};
+static int fixed_pmc_events[] = {
+       [0] = INTEL_ARCH_INSTRUCTIONS_RETIRED,
+       [1] = INTEL_ARCH_CPU_CYCLES,
+       [2] = PSEUDO_ARCH_REFERENCE_CYCLES,
+};
 
 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
 {
@@ -80,13 +108,16 @@ static bool intel_hw_event_available(struct kvm_pmc *pmc)
        u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) {
+       BUILD_BUG_ON(ARRAY_SIZE(intel_arch_events) != NR_INTEL_ARCH_EVENTS);
+
+       for (i = 0; i < NR_INTEL_ARCH_EVENTS; i++) {
                if (intel_arch_events[i].eventsel != event_select ||
                    intel_arch_events[i].unit_mask != unit_mask)
                        continue;
 
                /* disable event that reported as not present by cpuid */
-               if ((i < 7) && !(pmu->available_event_types & (1 << i)))
+               if ((i < PSEUDO_ARCH_REFERENCE_CYCLES) &&
+                   !(pmu->available_event_types & (1 << i)))
                        return false;
 
                break;