KVM: selftests: Add a common helper for the PMU event filter guest code
authorAaron Lewis <aaronlewis@google.com>
Fri, 7 Apr 2023 23:32:49 +0000 (16:32 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 14 Apr 2023 20:20:53 +0000 (13:20 -0700)
Split out the common parts of the Intel and AMD guest code in the PMU
event filter test into a helper function.  This is in preparation for
adding additional counters to the test.

No functional changes intended.

Signed-off-by: Aaron Lewis <aaronlewis@google.com>
Link: https://lore.kernel.org/r/20230407233254.957013-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c

index 2feef25..13eca93 100644 (file)
@@ -100,6 +100,15 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip)
                GUEST_SYNC(0);
 }
 
+static uint64_t run_and_measure_loop(uint32_t msr_base)
+{
+       uint64_t branches_retired = rdmsr(msr_base + 0);
+
+       __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+
+       return rdmsr(msr_base + 0) - branches_retired;
+}
+
 static void intel_guest_code(void)
 {
        check_msr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
@@ -108,16 +117,15 @@ static void intel_guest_code(void)
        GUEST_SYNC(1);
 
        for (;;) {
-               uint64_t br0, br1;
+               uint64_t count;
 
                wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
                wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
                      ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED);
-               wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
-               br0 = rdmsr(MSR_IA32_PMC0);
-               __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
-               br1 = rdmsr(MSR_IA32_PMC0);
-               GUEST_SYNC(br1 - br0);
+               wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x1);
+
+               count = run_and_measure_loop(MSR_IA32_PMC0);
+               GUEST_SYNC(count);
        }
 }
 
@@ -133,15 +141,14 @@ static void amd_guest_code(void)
        GUEST_SYNC(1);
 
        for (;;) {
-               uint64_t br0, br1;
+               uint64_t count;
 
                wrmsr(MSR_K7_EVNTSEL0, 0);
                wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
                      ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED);
-               br0 = rdmsr(MSR_K7_PERFCTR0);
-               __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
-               br1 = rdmsr(MSR_K7_PERFCTR0);
-               GUEST_SYNC(br1 - br0);
+
+               count = run_and_measure_loop(MSR_K7_PERFCTR0);
+               GUEST_SYNC(count);
        }
 }