@@ -18,25 +18,36 @@
#define EXPECTED_INSTR 17
#define EXPECTED_BRNCH 5
-
-/* Enable GLOBAL_CTRL + disable GLOBAL_CTRL + clflush/mfence instructions */
-#define EXTRA_INSTRNS (3 + 3 + 2)
+#define IBPB_JMP_INSTRNS 7
+#define IBPB_JMP_BRANCHES 1
+#define IBPB_JMP_ASM(_wrmsr) \
+ "mov $1, %%eax; xor %%edx, %%edx;\n\t" \
+ "mov $73, %%ecx;\n\t" \
+ _wrmsr "\n\t" \
+ "lea 2f, %%rax;\n\t" \
+ "jmp *%%rax;\n\t" \
+ "nop;\n\t" \
+ "2: nop;\n\t"
+
+/* GLOBAL_CTRL enable + disable + clflush/mfence + IBPB_JMP */
+#define EXTRA_INSTRNS (3 + 3 + 2 + IBPB_JMP_INSTRNS)
#define LOOP_INSTRNS (N * 10 + EXTRA_INSTRNS)
-#define LOOP_BRANCHES (N)
-#define LOOP_ASM(_wrmsr, _clflush) \
- _wrmsr "\n\t" \
+#define LOOP_BRANCHES (N + IBPB_JMP_BRANCHES)
+#define LOOP_ASM(_wrmsr1, _clflush, _wrmsr2) \
+ _wrmsr1 "\n\t" \
"mov %%ecx, %%edi; mov %%ebx, %%ecx;\n\t" \
_clflush "\n\t" \
"mfence;\n\t" \
"1: mov (%1), %2; add $64, %1;\n\t" \
"nop; nop; nop; nop; nop; nop; nop;\n\t" \
"loop 1b;\n\t" \
+ IBPB_JMP_ASM(_wrmsr2) \
"mov %%edi, %%ecx; xor %%eax, %%eax; xor %%edx, %%edx;\n\t" \
- _wrmsr "\n\t"
+ _wrmsr1 "\n\t"
-#define _loop_asm(_wrmsr, _clflush) \
+#define _loop_asm(_wrmsr1, _clflush, _wrmsr2) \
do { \
- asm volatile(LOOP_ASM(_wrmsr, _clflush) \
+ asm volatile(LOOP_ASM(_wrmsr1, _clflush, _wrmsr2) \
: "=b"(tmp), "=r"(tmp2), "=r"(tmp3) \
: "a"(eax), "d"(edx), "c"(global_ctl), \
"0"(N), "1"(buf) \
@@ -99,6 +110,12 @@ char *buf;
static struct pmu_event *gp_events;
static unsigned int gp_events_size;
+static int has_ibpb(void)
+{
+ return this_cpu_has(X86_FEATURE_SPEC_CTRL) ||
+ this_cpu_has(X86_FEATURE_AMD_IBPB);
+}
+
static inline void __loop(void)
{
unsigned long tmp, tmp2, tmp3;
@@ -106,10 +123,14 @@ static inline void __loop(void)
u32 eax = 0;
u32 edx = 0;
- if (this_cpu_has(X86_FEATURE_CLFLUSH))
- _loop_asm("nop", "clflush (%1)");
+ if (this_cpu_has(X86_FEATURE_CLFLUSH) && has_ibpb())
+ _loop_asm("nop", "clflush (%1)", "wrmsr");
+ else if (this_cpu_has(X86_FEATURE_CLFLUSH))
+ _loop_asm("nop", "clflush (%1)", "nop");
+ else if (has_ibpb())
+ _loop_asm("nop", "nop", "wrmsr");
else
- _loop_asm("nop", "nop");
+ _loop_asm("nop", "nop", "nop");
}
/*
@@ -126,10 +147,14 @@ static inline void __precise_loop(u64 cntrs)
u32 eax = cntrs & (BIT_ULL(32) - 1);
u32 edx = cntrs >> 32;
- if (this_cpu_has(X86_FEATURE_CLFLUSH))
- _loop_asm("wrmsr", "clflush (%1)");
+ if (this_cpu_has(X86_FEATURE_CLFLUSH) && has_ibpb())
+ _loop_asm("wrmsr", "clflush (%1)", "wrmsr");
+ else if (this_cpu_has(X86_FEATURE_CLFLUSH))
+ _loop_asm("wrmsr", "clflush (%1)", "nop");
+ else if (has_ibpb())
+ _loop_asm("wrmsr", "nop", "wrmsr");
else
- _loop_asm("wrmsr", "nop");
+ _loop_asm("wrmsr", "nop", "nop");
}
static inline void loop(u64 cntrs)
Currently the lower boundary of branch misses event is set to 0. Strictly speaking 0 shouldn't be a valid count since it can't tell us if branch misses event counter works correctly or even disabled. Whereas it's also possible and reasonable that branch misses event count is 0 especailly for such simple loop() program with advanced branch predictor. To eliminate such ambiguity and make branch misses event verification more acccurately, an extra IBPB indirect jump asm blob is appended and IBPB command is leveraged to clear the branch target buffer and force to cause a branch miss for the indirect jump. Suggested-by: Jim Mattson <jmattson@google.com> Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com> --- x86/pmu.c | 55 ++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 40 insertions(+), 15 deletions(-)