Message ID | 20220127071532.384888-3-houtao1@huawei.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | bpf, arm64: enable kfunc call | expand |
On 1/27/22 8:15 AM, Hou Tao wrote: > In add_kfunc_call(), bpf_kfunc_desc->imm with type s32 is used to > represent the offset of called kfunc from __bpf_call_base, so > add a test to ensure that the offset will not be overflowed. > > Signed-off-by: Hou Tao <houtao1@huawei.com> Thanks for looking into this! > --- > .../selftests/bpf/prog_tests/ksyms_module.c | 72 +++++++++++++++++++ > 1 file changed, 72 insertions(+) > > diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c > index d490ad80eccb..ce0cd3446931 100644 > --- a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c > +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c > @@ -6,6 +6,76 @@ > #include "test_ksyms_module.lskel.h" > #include "test_ksyms_module.skel.h" > > +/* Most logic comes from bpf_object__read_kallsyms_file() */ > +static int test_find_func_in_kallsyms(const char *func, unsigned long *addr) > +{ > + /* Same as KSYM_NAME_LEN */ > + char sym_name[128]; > + char sym_type; > + unsigned long sym_addr; > + int ret, err; > + FILE *f; > + > + f = fopen("/proc/kallsyms", "r"); > + if (!f) > + return -errno; > + > + err = -ENOENT; > + while (true) { > + ret = fscanf(f, "%lx %c %127s%*[^\n]\n", > + &sym_addr, &sym_type, sym_name); > + if (ret == EOF && feof(f)) > + break; > + > + if (ret != 3) { > + err = -EINVAL; > + break; > + } > + > + if ((sym_type == 't' || sym_type == 'T') && > + !strcmp(sym_name, func)) { > + *addr = sym_addr; > + err = 0; > + break; > + } > + } > + > + fclose(f); > + return err; > +} Could we just reuse kallsyms_find() from trace_helpers.c which is also used in couple of other prog_tests already? > + > +/* > + * Check whether or not s32 in bpf_kfunc_desc is sufficient > + * to represent the offset between bpf_testmod_test_mod_kfunc > + * and __bpf_call_base. > + */ > +void test_ksyms_module_valid_offset(void) > +{ > + unsigned long kfunc_addr; > + unsigned long base_addr; > + int used_offset; > + long actual_offset; > + int err; > + > + if (!env.has_testmod) { > + test__skip(); > + return; > + } > + > + err = test_find_func_in_kallsyms("bpf_testmod_test_mod_kfunc", > + &kfunc_addr); > + if (!ASSERT_OK(err, "find kfunc addr")) > + return; > + > + err = test_find_func_in_kallsyms("__bpf_call_base", &base_addr); > + if (!ASSERT_OK(err, "find base addr")) > + return; > + > + used_offset = kfunc_addr - base_addr; > + actual_offset = kfunc_addr - base_addr; > + ASSERT_EQ((long)used_offset, actual_offset, "kfunc offset overflowed"); Is the above also executed in case bpf_jit_supports_kfunc_call() falls back to the default __weak callback, returning false? If yes, then the ASSERT_EQ() may fail on archs like s390, ppc, etc where the offset may not be enough. > +} > + > void test_ksyms_module_lskel(void) > { > struct test_ksyms_module_lskel *skel; > @@ -55,6 +125,8 @@ void test_ksyms_module_libbpf(void) > > void test_ksyms_module(void) > { > + if (test__start_subtest("valid_offset")) > + test_ksyms_module_valid_offset(); > if (test__start_subtest("lskel")) > test_ksyms_module_lskel(); > if (test__start_subtest("libbpf")) >
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c index d490ad80eccb..ce0cd3446931 100644 --- a/tools/testing/selftests/bpf/prog_tests/ksyms_module.c +++ b/tools/testing/selftests/bpf/prog_tests/ksyms_module.c @@ -6,6 +6,76 @@ #include "test_ksyms_module.lskel.h" #include "test_ksyms_module.skel.h" +/* Most logic comes from bpf_object__read_kallsyms_file() */ +static int test_find_func_in_kallsyms(const char *func, unsigned long *addr) +{ + /* Same as KSYM_NAME_LEN */ + char sym_name[128]; + char sym_type; + unsigned long sym_addr; + int ret, err; + FILE *f; + + f = fopen("/proc/kallsyms", "r"); + if (!f) + return -errno; + + err = -ENOENT; + while (true) { + ret = fscanf(f, "%lx %c %127s%*[^\n]\n", + &sym_addr, &sym_type, sym_name); + if (ret == EOF && feof(f)) + break; + + if (ret != 3) { + err = -EINVAL; + break; + } + + if ((sym_type == 't' || sym_type == 'T') && + !strcmp(sym_name, func)) { + *addr = sym_addr; + err = 0; + break; + } + } + + fclose(f); + return err; +} + +/* + * Check whether or not s32 in bpf_kfunc_desc is sufficient + * to represent the offset between bpf_testmod_test_mod_kfunc + * and __bpf_call_base. + */ +void test_ksyms_module_valid_offset(void) +{ + unsigned long kfunc_addr; + unsigned long base_addr; + int used_offset; + long actual_offset; + int err; + + if (!env.has_testmod) { + test__skip(); + return; + } + + err = test_find_func_in_kallsyms("bpf_testmod_test_mod_kfunc", + &kfunc_addr); + if (!ASSERT_OK(err, "find kfunc addr")) + return; + + err = test_find_func_in_kallsyms("__bpf_call_base", &base_addr); + if (!ASSERT_OK(err, "find base addr")) + return; + + used_offset = kfunc_addr - base_addr; + actual_offset = kfunc_addr - base_addr; + ASSERT_EQ((long)used_offset, actual_offset, "kfunc offset overflowed"); +} + void test_ksyms_module_lskel(void) { struct test_ksyms_module_lskel *skel; @@ -55,6 +125,8 @@ void test_ksyms_module_libbpf(void) void test_ksyms_module(void) { + if (test__start_subtest("valid_offset")) + test_ksyms_module_valid_offset(); if (test__start_subtest("lskel")) test_ksyms_module_lskel(); if (test__start_subtest("libbpf"))
In add_kfunc_call(), bpf_kfunc_desc->imm with type s32 is used to represent the offset of called kfunc from __bpf_call_base, so add a test to ensure that the offset will not be overflowed. Signed-off-by: Hou Tao <houtao1@huawei.com> --- .../selftests/bpf/prog_tests/ksyms_module.c | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+)