GUEST_SYNC(0);
}
+static uint64_t run_and_measure_loop(uint32_t msr_base)
+{
+ uint64_t branches_retired = rdmsr(msr_base + 0);
+
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+
+ return rdmsr(msr_base + 0) - branches_retired;
+}
+
static void intel_guest_code(void)
{
check_msr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
GUEST_SYNC(1);
for (;;) {
- uint64_t br0, br1;
+ uint64_t count;
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED);
- wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
- br0 = rdmsr(MSR_IA32_PMC0);
- __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
- br1 = rdmsr(MSR_IA32_PMC0);
- GUEST_SYNC(br1 - br0);
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x1);
+
+ count = run_and_measure_loop(MSR_IA32_PMC0);
+ GUEST_SYNC(count);
}
}
GUEST_SYNC(1);
for (;;) {
- uint64_t br0, br1;
+ uint64_t count;
wrmsr(MSR_K7_EVNTSEL0, 0);
wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED);
- br0 = rdmsr(MSR_K7_PERFCTR0);
- __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
- br1 = rdmsr(MSR_K7_PERFCTR0);
- GUEST_SYNC(br1 - br0);
+
+ count = run_and_measure_loop(MSR_K7_PERFCTR0);
+ GUEST_SYNC(count);
}
}