@@ -100,6 +100,17 @@ static void check_msr(uint32_t msr, uint64_t bits_to_flip)
GUEST_SYNC(0);
}
+static uint64_t test_guest(uint32_t msr_base)
+{
+ uint64_t br0, br1;
+
+ br0 = rdmsr(msr_base + 0);
+ __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
+ br1 = rdmsr(msr_base + 0);
+
+ return br1 - br0;
+}
+
static void intel_guest_code(void)
{
check_msr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
@@ -108,16 +119,15 @@ static void intel_guest_code(void)
GUEST_SYNC(1);
for (;;) {
- uint64_t br0, br1;
+ uint64_t count;
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0);
wrmsr(MSR_P6_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | INTEL_BR_RETIRED);
- wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 1);
- br0 = rdmsr(MSR_IA32_PMC0);
- __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
- br1 = rdmsr(MSR_IA32_PMC0);
- GUEST_SYNC(br1 - br0);
+ wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0x1);
+
+ count = test_guest(MSR_IA32_PMC0);
+ GUEST_SYNC(count);
}
}
@@ -133,15 +143,14 @@ static void amd_guest_code(void)
GUEST_SYNC(1);
for (;;) {
- uint64_t br0, br1;
+ uint64_t count;
wrmsr(MSR_K7_EVNTSEL0, 0);
wrmsr(MSR_K7_EVNTSEL0, ARCH_PERFMON_EVENTSEL_ENABLE |
ARCH_PERFMON_EVENTSEL_OS | AMD_ZEN_BR_RETIRED);
- br0 = rdmsr(MSR_K7_PERFCTR0);
- __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES}));
- br1 = rdmsr(MSR_K7_PERFCTR0);
- GUEST_SYNC(br1 - br0);
+
+ count = test_guest(MSR_K7_PERFCTR0);
+ GUEST_SYNC(count);
}
}
Split out the common parts of the Intel and AMD guest code into a helper function. This is in preparation for adding additional counters to the test. No functional changes intended. Signed-off-by: Aaron Lewis <aaronlewis@google.com> --- .../kvm/x86_64/pmu_event_filter_test.c | 31 ++++++++++++------- 1 file changed, 20 insertions(+), 11 deletions(-)