Message ID | 20210901003517.3953145-4-songliubraving@fb.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | bpf: introduce bpf_get_branch_snapshot | expand |
On Tue, Aug 31, 2021 at 7:01 PM Song Liu <songliubraving@fb.com> wrote: > > This test uses bpf_get_branch_snapshot from a fexit program. The test uses > a target function (bpf_testmod_loop_test) and compares the record against > kallsyms. If there isn't enough record matching kallsyms, the test fails. > > Signed-off-by: Song Liu <songliubraving@fb.com> > --- LGTM, few minor nits below Acked-by: Andrii Nakryiko <andrii@kernel.org> > .../selftests/bpf/bpf_testmod/bpf_testmod.c | 14 ++- > .../bpf/prog_tests/get_branch_snapshot.c | 101 ++++++++++++++++++ > .../selftests/bpf/progs/get_branch_snapshot.c | 44 ++++++++ > tools/testing/selftests/bpf/trace_helpers.c | 37 +++++++ > tools/testing/selftests/bpf/trace_helpers.h | 5 + > 5 files changed, 200 insertions(+), 1 deletion(-) > create mode 100644 tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c > create mode 100644 tools/testing/selftests/bpf/progs/get_branch_snapshot.c > [...] > + > +void test_get_branch_snapshot(void) > +{ > + struct get_branch_snapshot *skel = NULL; > + int err; > + > + if (create_perf_events()) { > + test__skip(); /* system doesn't support LBR */ > + goto cleanup; > + } > + > + skel = get_branch_snapshot__open_and_load(); > + if (!ASSERT_OK_PTR(skel, "get_branch_snapshot__open_and_load")) > + goto cleanup; > + > + err = kallsyms_find("bpf_testmod_loop_test", &skel->bss->address_low); > + if (!ASSERT_OK(err, "kallsyms_find")) > + goto cleanup; > + > + err = kallsyms_find_next("bpf_testmod_loop_test", &skel->bss->address_high); > + if (!ASSERT_OK(err, "kallsyms_find_next")) > + goto cleanup; > + > + err = get_branch_snapshot__attach(skel); > + if (!ASSERT_OK(err, "get_branch_snapshot__attach")) > + goto cleanup; > + > + /* trigger the program */ > + system("cat /sys/kernel/bpf_testmod > /dev/null 2>& 1"); ugh :( see prog_tests/module_attach.c, we can extract and reuse trigger_module_test_read() and trigger_module_test_write() > + > + if (skel->bss->total_entries < 16) { > + /* too few entries for the hit/waste test */ > + test__skip(); > + goto cleanup; > + } > + [...] > +SEC("fexit/bpf_testmod_loop_test") > +int BPF_PROG(test1, int n, int ret) > +{ > + long i; > + > + total_entries = bpf_get_branch_snapshot(entries, sizeof(entries), 0); > + total_entries /= sizeof(struct perf_branch_entry); > + > + bpf_printk("total_entries %lu\n", total_entries); > + > + for (i = 0; i < PERF_MAX_BRANCH_SNAPSHOT; i++) { > + if (i >= total_entries) > + break; > + if (in_range(entries[i].from) && in_range(entries[i].to)) > + test1_hits++; > + else if (!test1_hits) > + wasted_entries++; > + bpf_printk("i %d from %llx to %llx", i, entries[i].from, > + entries[i].to); debug leftovers? this will be polluting trace_pipe unnecessarily; same for above total_entries bpf_printk() > + } > + return 0; > +} > diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c > index e7a19b04d4eaf..5100a169b72b1 100644 > --- a/tools/testing/selftests/bpf/trace_helpers.c > +++ b/tools/testing/selftests/bpf/trace_helpers.c > @@ -1,4 +1,5 @@ > // SPDX-License-Identifier: GPL-2.0 > +#include <ctype.h> > #include <stdio.h> > #include <stdlib.h> > #include <string.h> > @@ -117,6 +118,42 @@ int kallsyms_find(const char *sym, unsigned long long *addr) > return err; > } > [...]
> On Aug 31, 2021, at 9:08 PM, Andrii Nakryiko <andrii.nakryiko@gmail.com> wrote: > > On Tue, Aug 31, 2021 at 7:01 PM Song Liu <songliubraving@fb.com> wrote: >> >> This test uses bpf_get_branch_snapshot from a fexit program. The test uses >> a target function (bpf_testmod_loop_test) and compares the record against >> kallsyms. If there isn't enough record matching kallsyms, the test fails. >> >> Signed-off-by: Song Liu <songliubraving@fb.com> >> --- > > LGTM, few minor nits below > > Acked-by: Andrii Nakryiko <andrii@kernel.org> > >> .../selftests/bpf/bpf_testmod/bpf_testmod.c | 14 ++- >> .../bpf/prog_tests/get_branch_snapshot.c | 101 ++++++++++++++++++ >> .../selftests/bpf/progs/get_branch_snapshot.c | 44 ++++++++ >> tools/testing/selftests/bpf/trace_helpers.c | 37 +++++++ >> tools/testing/selftests/bpf/trace_helpers.h | 5 + >> 5 files changed, 200 insertions(+), 1 deletion(-) >> create mode 100644 tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c >> create mode 100644 tools/testing/selftests/bpf/progs/get_branch_snapshot.c >> > > [...] > >> + >> +void test_get_branch_snapshot(void) >> +{ >> + struct get_branch_snapshot *skel = NULL; >> + int err; >> + >> + if (create_perf_events()) { >> + test__skip(); /* system doesn't support LBR */ >> + goto cleanup; >> + } >> + >> + skel = get_branch_snapshot__open_and_load(); >> + if (!ASSERT_OK_PTR(skel, "get_branch_snapshot__open_and_load")) >> + goto cleanup; >> + >> + err = kallsyms_find("bpf_testmod_loop_test", &skel->bss->address_low); >> + if (!ASSERT_OK(err, "kallsyms_find")) >> + goto cleanup; >> + >> + err = kallsyms_find_next("bpf_testmod_loop_test", &skel->bss->address_high); >> + if (!ASSERT_OK(err, "kallsyms_find_next")) >> + goto cleanup; >> + >> + err = get_branch_snapshot__attach(skel); >> + if (!ASSERT_OK(err, "get_branch_snapshot__attach")) >> + goto cleanup; >> + >> + /* trigger the program */ >> + system("cat /sys/kernel/bpf_testmod > /dev/null 2>& 1"); > > ugh :( see prog_tests/module_attach.c, we can extract and reuse > trigger_module_test_read() and trigger_module_test_write() Will fix. > >> + >> + if (skel->bss->total_entries < 16) { >> + /* too few entries for the hit/waste test */ >> + test__skip(); >> + goto cleanup; >> + } >> + > > [...] > >> +SEC("fexit/bpf_testmod_loop_test") >> +int BPF_PROG(test1, int n, int ret) >> +{ >> + long i; >> + >> + total_entries = bpf_get_branch_snapshot(entries, sizeof(entries), 0); >> + total_entries /= sizeof(struct perf_branch_entry); >> + >> + bpf_printk("total_entries %lu\n", total_entries); >> + >> + for (i = 0; i < PERF_MAX_BRANCH_SNAPSHOT; i++) { >> + if (i >= total_entries) >> + break; >> + if (in_range(entries[i].from) && in_range(entries[i].to)) >> + test1_hits++; >> + else if (!test1_hits) >> + wasted_entries++; >> + bpf_printk("i %d from %llx to %llx", i, entries[i].from, >> + entries[i].to); > > debug leftovers? this will be polluting trace_pipe unnecessarily; same > for above total_entries bpf_printk() Oops.. I added/removed it for every version, but forgot this time. Will fix in v5. Thanks, Song
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 141d8da687d21..19635e57ff21a 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -13,6 +13,18 @@ DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123; +noinline int bpf_testmod_loop_test(int n) +{ + int i, sum = 0; + + /* the primary goal of this test is to test LBR. Create a lot of + * branches in the function, so we can catch it easily. + */ + for (i = 0; i < n; i++) + sum += i; + return sum; +} + noinline ssize_t bpf_testmod_test_read(struct file *file, struct kobject *kobj, struct bin_attribute *bin_attr, @@ -24,6 +36,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, .len = len, }; + bpf_testmod_loop_test(101); trace_bpf_testmod_test_read(current, &ctx); return -EIO; /* always fail */ @@ -71,4 +84,3 @@ module_exit(bpf_testmod_exit); MODULE_AUTHOR("Andrii Nakryiko"); MODULE_DESCRIPTION("BPF selftests module"); MODULE_LICENSE("Dual BSD/GPL"); - diff --git a/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c new file mode 100644 index 0000000000000..03ffa5cdf9b09 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include <test_progs.h> +#include "get_branch_snapshot.skel.h" + +static int *pfd_array; +static int cpu_cnt; + +static int create_perf_events(void) +{ + struct perf_event_attr attr = {0}; + int cpu; + + /* create perf event */ + attr.size = sizeof(attr); + attr.type = PERF_TYPE_RAW; + attr.config = 0x1b00; + attr.sample_type = PERF_SAMPLE_BRANCH_STACK; + attr.branch_sample_type = PERF_SAMPLE_BRANCH_KERNEL | + PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY; + + cpu_cnt = libbpf_num_possible_cpus(); + pfd_array = malloc(sizeof(int) * cpu_cnt); + if (!pfd_array) { + cpu_cnt = 0; + return 1; + } + + for (cpu = 0; cpu < cpu_cnt; cpu++) { + pfd_array[cpu] = syscall(__NR_perf_event_open, &attr, + -1, cpu, -1, PERF_FLAG_FD_CLOEXEC); + if (pfd_array[cpu] < 0) + break; + } + + return cpu == 0; +} + +static void close_perf_events(void) +{ + int cpu = 0; + int fd; + + while (cpu++ < cpu_cnt) { + fd = pfd_array[cpu]; + if (fd < 0) + break; + close(fd); + } + free(pfd_array); +} + +void test_get_branch_snapshot(void) +{ + struct get_branch_snapshot *skel = NULL; + int err; + + if (create_perf_events()) { + test__skip(); /* system doesn't support LBR */ + goto cleanup; + } + + skel = get_branch_snapshot__open_and_load(); + if (!ASSERT_OK_PTR(skel, "get_branch_snapshot__open_and_load")) + goto cleanup; + + err = kallsyms_find("bpf_testmod_loop_test", &skel->bss->address_low); + if (!ASSERT_OK(err, "kallsyms_find")) + goto cleanup; + + err = kallsyms_find_next("bpf_testmod_loop_test", &skel->bss->address_high); + if (!ASSERT_OK(err, "kallsyms_find_next")) + goto cleanup; + + err = get_branch_snapshot__attach(skel); + if (!ASSERT_OK(err, "get_branch_snapshot__attach")) + goto cleanup; + + /* trigger the program */ + system("cat /sys/kernel/bpf_testmod > /dev/null 2>& 1"); + + if (skel->bss->total_entries < 16) { + /* too few entries for the hit/waste test */ + test__skip(); + goto cleanup; + } + + ASSERT_GT(skel->bss->test1_hits, 1, "find_looptest_in_lbr"); + + /* Given we stop LBR in software, we will waste a few entries. + * But we should try to waste as few as possible entries. We are at + * about 11 on x86_64 systems. + * Add a check for < 15 so that we get heads-up when something + * changes and wastes too many entries. + */ + ASSERT_LT(skel->bss->wasted_entries, 15, "check_wasted_entries"); + +cleanup: + get_branch_snapshot__destroy(skel); + close_perf_events(); +} diff --git a/tools/testing/selftests/bpf/progs/get_branch_snapshot.c b/tools/testing/selftests/bpf/progs/get_branch_snapshot.c new file mode 100644 index 0000000000000..24a6e7a9c08ac --- /dev/null +++ b/tools/testing/selftests/bpf/progs/get_branch_snapshot.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2021 Facebook */ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +__u64 test1_hits = 0; +__u64 address_low = 0; +__u64 address_high = 0; +int wasted_entries = 0; +long total_entries = 0; + +struct perf_branch_entry entries[PERF_MAX_BRANCH_SNAPSHOT] = {}; + + +static inline bool in_range(__u64 val) +{ + return (val >= address_low) && (val < address_high); +} + +SEC("fexit/bpf_testmod_loop_test") +int BPF_PROG(test1, int n, int ret) +{ + long i; + + total_entries = bpf_get_branch_snapshot(entries, sizeof(entries), 0); + total_entries /= sizeof(struct perf_branch_entry); + + bpf_printk("total_entries %lu\n", total_entries); + + for (i = 0; i < PERF_MAX_BRANCH_SNAPSHOT; i++) { + if (i >= total_entries) + break; + if (in_range(entries[i].from) && in_range(entries[i].to)) + test1_hits++; + else if (!test1_hits) + wasted_entries++; + bpf_printk("i %d from %llx to %llx", i, entries[i].from, + entries[i].to); + } + return 0; +} diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index e7a19b04d4eaf..5100a169b72b1 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <ctype.h> #include <stdio.h> #include <stdlib.h> #include <string.h> @@ -117,6 +118,42 @@ int kallsyms_find(const char *sym, unsigned long long *addr) return err; } +/* find the address of the next symbol of the same type, this can be used + * to determine the end of a function. + */ +int kallsyms_find_next(const char *sym, unsigned long long *addr) +{ + char type, found_type, name[500]; + unsigned long long value; + bool found = false; + int err = 0; + FILE *f; + + f = fopen("/proc/kallsyms", "r"); + if (!f) + return -EINVAL; + + while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) { + /* Different types of symbols in kernel modules are mixed + * in /proc/kallsyms. Only return the next matching type. + * Use tolower() for type so that 'T' matches 't'. + */ + if (found && found_type == tolower(type)) { + *addr = value; + goto out; + } + if (strcmp(name, sym) == 0) { + found = true; + found_type = tolower(type); + } + } + err = -ENOENT; + +out: + fclose(f); + return err; +} + void read_trace_pipe(void) { int trace_fd; diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h index d907b445524d5..bc8ed86105d94 100644 --- a/tools/testing/selftests/bpf/trace_helpers.h +++ b/tools/testing/selftests/bpf/trace_helpers.h @@ -16,6 +16,11 @@ long ksym_get_addr(const char *name); /* open kallsyms and find addresses on the fly, faster than load + search. */ int kallsyms_find(const char *sym, unsigned long long *addr); +/* find the address of the next symbol, this can be used to determine the + * end of a function + */ +int kallsyms_find_next(const char *sym, unsigned long long *addr); + void read_trace_pipe(void); ssize_t get_uprobe_offset(const void *addr, ssize_t base);
This test uses bpf_get_branch_snapshot from a fexit program. The test uses a target function (bpf_testmod_loop_test) and compares the record against kallsyms. If there isn't enough record matching kallsyms, the test fails. Signed-off-by: Song Liu <songliubraving@fb.com> --- .../selftests/bpf/bpf_testmod/bpf_testmod.c | 14 ++- .../bpf/prog_tests/get_branch_snapshot.c | 101 ++++++++++++++++++ .../selftests/bpf/progs/get_branch_snapshot.c | 44 ++++++++ tools/testing/selftests/bpf/trace_helpers.c | 37 +++++++ tools/testing/selftests/bpf/trace_helpers.h | 5 + 5 files changed, 200 insertions(+), 1 deletion(-) create mode 100644 tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c create mode 100644 tools/testing/selftests/bpf/progs/get_branch_snapshot.c