diff mbox series

[RFC,bpf-next,3/6] selftests/test: test gen_prologue and gen_epilogue

Message ID 20240813184943.3759630-4-martin.lau@linux.dev (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series bpf: Add gen_epilogue and allow kfunc call in pro/epilogue | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-17 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-18 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 42 this patch: 42
netdev/build_tools success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 16 maintainers not CCed: sdf@fomichev.me eddyz87@gmail.com haoluo@google.com shuah@kernel.org linux-kselftest@vger.kernel.org song@kernel.org alexandre.torgue@foss.st.com linux-stm32@st-md-mailman.stormreply.com mcoquelin.stm32@gmail.com john.fastabend@gmail.com mykolal@fb.com linux-arm-kernel@lists.infradead.org jolsa@kernel.org thinker.li@gmail.com jrife@google.com kpsingh@kernel.org
netdev/build_clang success Errors and warnings before: 44 this patch: 44
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 49 this patch: 49
netdev/checkpatch warning WARNING: added, moved or deleted file(s), does MAINTAINERS need updating? WARNING: line length of 81 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 86 exceeds 80 columns WARNING: line length of 92 exceeds 80 columns WARNING: line length of 97 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Martin KaFai Lau Aug. 13, 2024, 6:49 p.m. UTC
From: Martin KaFai Lau <martin.lau@kernel.org>

This test adds a new struct_ops "bpf_testmod_st_ops" in bpf_testmod.
The ops of the bpf_testmod_st_ops is triggered by new kfunc calls
"bpf_kfunc_st_ops_test_*logue". These new kfunc calls are
primarily used by the SEC("syscall") program. The test triggering
sequence is like:
    SEC("syscall")
    syscall_prologue(struct st_ops_args *args)
        bpf_kfunc_st_op_test_prologue(args)
	    st_ops->test_prologue(args)

The .gen_prologue and .gen_epilogue of the bpf_testmod_st_ops
will add PROLOGUE_A (1000) and EPILOGUE_A (10000) to args->a.
.gen_prologue adds PROLOGUE_A (1000).
.gen_epilogue adds EPILOGUE_A (10000).
.gen_epilogue will also set the r0 to 2 * args->a.

The .gen_prologue and .gen_epilogue of the bpf_testmod_st_ops
will test the prog->aux->attach_func_name to decide if
it needs to generate codes.

The main programs of the SEC("struct_ops/..") will either
call a global subprog() which does "args->a += 1" or
call another new kfunc bpf_kfunc_st_ops_inc10 which does "args->a += 10".

The prog_tests/struct_ops_syscall.c will test_run the SEC("syscall")
programs. It checks the result by testing the args->a and the retval.

For example, when triggering the ops in the
'SEC("struct_ops/test_epilogue") int BPF_PROG(test_epilogue_subprog..'

the expected args->a is
+1 (because of the subprog calls) + 10000 (.gen_epilogue) = 10001
The expected return value is 2 * 10001 (.gen_epilogue).

Another set of tests is to have the main struct_ops program
to call a subprog and call the inc10 kfunc.

There is also a bpf_tail_call test for epilogue.

Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
---
 .../selftests/bpf/bpf_testmod/bpf_testmod.c   | 190 ++++++++++++++++++
 .../selftests/bpf/bpf_testmod/bpf_testmod.h   |  11 +
 .../bpf/bpf_testmod/bpf_testmod_kfunc.h       |   6 +
 .../bpf/prog_tests/struct_ops_syscall.c       |  91 +++++++++
 .../selftests/bpf/progs/struct_ops_syscall.c  | 113 +++++++++++
 5 files changed, 411 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/struct_ops_syscall.c
 create mode 100644 tools/testing/selftests/bpf/progs/struct_ops_syscall.c

Comments

Eduard Zingerman Aug. 14, 2024, 8:48 p.m. UTC | #1
Hi Martin,

Please note that after changes for struct_ops map autoload by libbpf,
test_loader could be use to test struct_ops related changes.
Also, test_loader now supports __xlated macro which allows to verify
rewrites applied by verifier.
For example, the sample below works:

    struct st_ops_args;
    
    struct bpf_testmod_st_ops {
    	int (*test_prologue)(struct st_ops_args *args);
    	int (*test_epilogue)(struct st_ops_args *args);
    	int (*test_pro_epilogue)(struct st_ops_args *args);
    	struct module *owner;
    };
    
    __success
    __xlated("0: *(u64 *)(r10 -8) = r1")
    __xlated("1: r0 = 0")
    __xlated("2: r1 = *(u64 *)(r10 -8)")
    __xlated("3: r1 = *(u64 *)(r1 +0)")
    __xlated("4: r6 = *(u32 *)(r1 +0)")
    __xlated("5: w6 += 10000")
    __xlated("6: *(u32 *)(r1 +0) = r6")
    __xlated("7: r6 = r1")
    __xlated("8: call kernel-function")
    __xlated("9: r1 = r6")
    __xlated("10: call kernel-function")
    __xlated("11: w0 *= 2")
    __xlated("12: exit")
    SEC("struct_ops/test_epilogue")
    __naked int test_epilogue(void)
    {
    	asm volatile (
    	"r0 = 0;"
    	"exit;"
    	::: __clobber_all);
    }
    
    SEC(".struct_ops.link")
    struct bpf_testmod_st_ops st_ops = {
    	.test_epilogue = (void *)test_epilogue,
    };

(Complete example is in the attachment).
test_loader based tests can also trigger program execution via __retval() macro.
The only (minor) shortcoming that I see, is that test_loader would
load/unload st_ops map multiple times because of the following
interaction:
- test_loader assumes that each bpf program defines a test;
- test_loader re-creates all maps before each test;
- libbpf struct_ops autocreate logic marks all programs referenced
  from struct_ops map as autoloaded.

I think that writing tests this way is easier to follow,
compared to arithmetic manipulations done currently.
What do you think?

Thanks,
Eduard
Martin KaFai Lau Aug. 15, 2024, 11:41 p.m. UTC | #2
On 8/14/24 1:48 PM, Eduard Zingerman wrote:
> Hi Martin,
> 
> Please note that after changes for struct_ops map autoload by libbpf,
> test_loader could be use to test struct_ops related changes.
> Also, test_loader now supports __xlated macro which allows to verify
> rewrites applied by verifier.
> For example, the sample below works:
> 
>      struct st_ops_args;
>      
>      struct bpf_testmod_st_ops {
>      	int (*test_prologue)(struct st_ops_args *args);
>      	int (*test_epilogue)(struct st_ops_args *args);
>      	int (*test_pro_epilogue)(struct st_ops_args *args);
>      	struct module *owner;
>      };
>      
>      __success
>      __xlated("0: *(u64 *)(r10 -8) = r1")
>      __xlated("1: r0 = 0")
>      __xlated("2: r1 = *(u64 *)(r10 -8)")
>      __xlated("3: r1 = *(u64 *)(r1 +0)")
>      __xlated("4: r6 = *(u32 *)(r1 +0)")
>      __xlated("5: w6 += 10000")
>      __xlated("6: *(u32 *)(r1 +0) = r6")
>      __xlated("7: r6 = r1")
>      __xlated("8: call kernel-function")
>      __xlated("9: r1 = r6")
>      __xlated("10: call kernel-function")
>      __xlated("11: w0 *= 2")
>      __xlated("12: exit")

It is appealing to be able to check at the xlated instruction level for 
.gen_pro/epilogue.

>      SEC("struct_ops/test_epilogue")
>      __naked int test_epilogue(void)
>      {
>      	asm volatile (
>      	"r0 = 0;"

I also want to test a struct_ops prog making kfunc call, e.g. the 
BPF_PROG(test_epilogue_kfunc) in this patch. I have never tried this in asm, so 
a n00b question. Do you know if there is an example how to call kfunc?

>      	"exit;"
>      	::: __clobber_all);
>      }
>      
>      SEC(".struct_ops.link")
>      struct bpf_testmod_st_ops st_ops = {
>      	.test_epilogue = (void *)test_epilogue,
>      };
> 
> (Complete example is in the attachment).
> test_loader based tests can also trigger program execution via __retval() macro.
> The only (minor) shortcoming that I see, is that test_loader would
> load/unload st_ops map multiple times because of the following
> interaction:
> - test_loader assumes that each bpf program defines a test;
> - test_loader re-creates all maps before each test;
> - libbpf struct_ops autocreate logic marks all programs referenced
>    from struct_ops map as autoloaded.

If I understand correctly, there are redundant works but still work?

Potentially the test_loader can check all the loaded struct_ops progs of a 
st_ops map at once which is an optimization.

Re: __retval(), the struct_ops progs is triggered by a SEC("syscall") prog. 
Before calling this syscall prog, the st_ops map needs to be attached first. I 
think the attach part is missing also? or there is a way?
Eduard Zingerman Aug. 16, 2024, 12:23 a.m. UTC | #3
On Thu, 2024-08-15 at 16:41 -0700, Martin KaFai Lau wrote:

[...]

> >      SEC("struct_ops/test_epilogue")
> >      __naked int test_epilogue(void)
> >      {
> >      	asm volatile (
> >      	"r0 = 0;"
> 
> I also want to test a struct_ops prog making kfunc call, e.g. the 
> BPF_PROG(test_epilogue_kfunc) in this patch. I have never tried this in asm, so 
> a n00b question. Do you know if there is an example how to call kfunc?

Here is an example:
progs/verifier_ref_tracking.c, specifically take a look at
acquire_release_user_key_reference(). The main trick is to have
__kfunc_btf_root() with dummy calls, so that there are BTF signatures
for kfuncs included in the object file.

> >      	"exit;"
> >      	::: __clobber_all);
> >      }
> >      
> >      SEC(".struct_ops.link")
> >      struct bpf_testmod_st_ops st_ops = {
> >      	.test_epilogue = (void *)test_epilogue,
> >      };
> > 
> > (Complete example is in the attachment).
> > test_loader based tests can also trigger program execution via __retval() macro.
> > The only (minor) shortcoming that I see, is that test_loader would
> > load/unload st_ops map multiple times because of the following
> > interaction:
> > - test_loader assumes that each bpf program defines a test;
> > - test_loader re-creates all maps before each test;
> > - libbpf struct_ops autocreate logic marks all programs referenced
> >    from struct_ops map as autoloaded.
> 
> If I understand correctly, there are redundant works but still work?

Yes.

> Potentially the test_loader can check all the loaded struct_ops progs of a 
> st_ops map at once which is an optimization.

Yes, I should look into this.

> Re: __retval(), the struct_ops progs is triggered by a SEC("syscall") prog. 
> Before calling this syscall prog, the st_ops map needs to be attached first. I 
> think the attach part is missing also? or there is a way?

I think libbpf handles the attachment automatically, I'll double check and reply.
Eduard Zingerman Aug. 16, 2024, 1:50 a.m. UTC | #4
On Thu, 2024-08-15 at 17:23 -0700, Eduard Zingerman wrote:

[...]

> > Re: __retval(), the struct_ops progs is triggered by a SEC("syscall") prog. 
> > Before calling this syscall prog, the st_ops map needs to be attached first. I 
> > think the attach part is missing also? or there is a way?
> 
> I think libbpf handles the attachment automatically, I'll double check and reply.
> 

In theory, the following addition to the example I've sent already should work:

    struct st_ops_args;
    int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) __ksym;
 
    SEC("syscall")
    __retval(0)
    int syscall_prologue(void *ctx)
    {
    	struct st_ops_args args = { -42 };
    	bpf_kfunc_st_ops_test_prologue(&args);
    	return args.a;
    }

However, the initial value of -42 is not changed, e.g. here is the log:

    $ ./test_progs -vvv -t struct_ops_epilogue/syscall_prologue
    ...
    libbpf: loaded kernel BTF from '/sys/kernel/btf/vmlinux'
    libbpf: extern (func ksym) 'bpf_kfunc_st_ops_test_prologue': resolved to bpf_testmod [104486]
    libbpf: struct_ops init_kern st_ops: type_id:44 kern_type_id:104321 kern_vtype_id:104378
    libbpf: struct_ops init_kern st_ops: func ptr test_prologue is set to prog test_prologue from data(+0) to kern_data(+0)
    libbpf: struct_ops init_kern st_ops: func ptr test_epilogue is set to prog test_epilogue from data(+8) to kern_data(+8)
    libbpf: map 'st_ops': created successfully, fd=5
    run_subtest:PASS:unexpected_load_failure 0 nsec
    VERIFIER LOG:
    =============
    ...
    =============
    do_prog_test_run:PASS:bpf_prog_test_run 0 nsec
    run_subtest:FAIL:837 Unexpected retval: -42 != 0
    #321/3   struct_ops_epilogue/syscall_prologue:FAIL
    #321     struct_ops_epilogue:FAIL

So, something goes awry in bpf_kfunc_st_ops_test_prologue():

    __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args)
    {
    	int ret = -1;
    
    	mutex_lock(&st_ops_mutex);
    	if (st_ops && st_ops->test_prologue)
    		ret = st_ops->test_prologue(args);
    	mutex_unlock(&st_ops_mutex);
    
    	return ret;
    }

Either st_ops is null or st_ops->test_prologue is null.
However, the log above shows:

    libbpf: struct_ops init_kern st_ops: type_id:44 kern_type_id:104321 kern_vtype_id:104378
    libbpf: struct_ops init_kern st_ops: func ptr test_prologue is set to prog test_prologue from data(+0) to kern_data(+0)
    libbpf: struct_ops init_kern st_ops: func ptr test_epilogue is set to prog test_epilogue from data(+8) to kern_data(+8)

Here libbpf does autoload for st_ops map and populates it, so st_ops->test_prologue should not be null.
Will have some time tomorrow to debug this (or you can give it a shot if you'd like).
Martin KaFai Lau Aug. 16, 2024, 5:27 p.m. UTC | #5
On 8/15/24 6:50 PM, Eduard Zingerman wrote:
> On Thu, 2024-08-15 at 17:23 -0700, Eduard Zingerman wrote:
> 
> [...]
> 
>>> Re: __retval(), the struct_ops progs is triggered by a SEC("syscall") prog.
>>> Before calling this syscall prog, the st_ops map needs to be attached first. I
>>> think the attach part is missing also? or there is a way?
>>
>> I think libbpf handles the attachment automatically, I'll double check and reply.
>>
> 
> In theory, the following addition to the example I've sent already should work:
> 
>      struct st_ops_args;
>      int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) __ksym;
>   
>      SEC("syscall")
>      __retval(0)
>      int syscall_prologue(void *ctx)
>      {
>      	struct st_ops_args args = { -42 };
>      	bpf_kfunc_st_ops_test_prologue(&args);
>      	return args.a;
>      }
> 
> However, the initial value of -42 is not changed, e.g. here is the log:
> 
>      $ ./test_progs -vvv -t struct_ops_epilogue/syscall_prologue
>      ...
>      libbpf: loaded kernel BTF from '/sys/kernel/btf/vmlinux'
>      libbpf: extern (func ksym) 'bpf_kfunc_st_ops_test_prologue': resolved to bpf_testmod [104486]
>      libbpf: struct_ops init_kern st_ops: type_id:44 kern_type_id:104321 kern_vtype_id:104378
>      libbpf: struct_ops init_kern st_ops: func ptr test_prologue is set to prog test_prologue from data(+0) to kern_data(+0)
>      libbpf: struct_ops init_kern st_ops: func ptr test_epilogue is set to prog test_epilogue from data(+8) to kern_data(+8)
>      libbpf: map 'st_ops': created successfully, fd=5
>      run_subtest:PASS:unexpected_load_failure 0 nsec
>      VERIFIER LOG:
>      =============
>      ...
>      =============
>      do_prog_test_run:PASS:bpf_prog_test_run 0 nsec
>      run_subtest:FAIL:837 Unexpected retval: -42 != 0
>      #321/3   struct_ops_epilogue/syscall_prologue:FAIL
>      #321     struct_ops_epilogue:FAIL
> 
> So, something goes awry in bpf_kfunc_st_ops_test_prologue():
> 
>      __bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args)
>      {
>      	int ret = -1;
>      
>      	mutex_lock(&st_ops_mutex);
>      	if (st_ops && st_ops->test_prologue)

Thanks for checking!

I think the bpf_map__attach_struct_ops() is not done such that st_ops is NULL.

It probably needs another tag in the SEC("syscall") program to tell which st_ops 
map should be attached first before executing the "syscall" program.

I like the idea of using the __xlated macro to check the patched prologue, ctx 
pointer saving, and epilogue. I will add this test in the respin. I will keep 
the current way in this patch to exercise syscall and the ops/func in st_ops for 
now. We can iterate on it later and use it as an example on what supports are 
needed on the test_loader side for st_ops map testing. On the repetitive-enough 
to worth test_loader refactoring side, I suspect some of the existing st_ops 
load-success/load-failure tests may be worth to look at also. Thoughts?

>      		ret = st_ops->test_prologue(args);
>      	mutex_unlock(&st_ops_mutex);
>      
>      	return ret;
>      }
> 
> Either st_ops is null or st_ops->test_prologue is null.
> However, the log above shows:
> 
>      libbpf: struct_ops init_kern st_ops: type_id:44 kern_type_id:104321 kern_vtype_id:104378
>      libbpf: struct_ops init_kern st_ops: func ptr test_prologue is set to prog test_prologue from data(+0) to kern_data(+0)
>      libbpf: struct_ops init_kern st_ops: func ptr test_epilogue is set to prog test_epilogue from data(+8) to kern_data(+8)
> 
> Here libbpf does autoload for st_ops map and populates it, so st_ops->test_prologue should not be null.
> Will have some time tomorrow to debug this (or you can give it a shot if you'd like).
>
Eduard Zingerman Aug. 16, 2024, 8:27 p.m. UTC | #6
On Fri, 2024-08-16 at 10:27 -0700, Martin KaFai Lau wrote:

[...]

> Thanks for checking!
> 
> I think the bpf_map__attach_struct_ops() is not done such that st_ops is NULL.
> 
> It probably needs another tag in the SEC("syscall") program to tell which st_ops 
> map should be attached first before executing the "syscall" program.
> 
> I like the idea of using the __xlated macro to check the patched prologue, ctx 
> pointer saving, and epilogue. I will add this test in the respin. I will keep 
> the current way in this patch to exercise syscall and the ops/func in st_ops for 
> now. We can iterate on it later and use it as an example on what supports are 
> needed on the test_loader side for st_ops map testing. On the repetitive-enough 
> to worth test_loader refactoring side, I suspect some of the existing st_ops 
> load-success/load-failure tests may be worth to look at also. Thoughts?

You are correct, this happens because bpf_map__attach_struct_ops() is
not called. Fortunately, the change for test_loader.c is not very big.
Please check two patches in the attachment.

> I suspect some of the existing st_ops load-success/load-failure
> tests may be worth to look at also.

I suspect this is the case, but would prefer not worry about it for now :)
Martin KaFai Lau Aug. 19, 2024, 10:30 p.m. UTC | #7
On 8/16/24 1:27 PM, Eduard Zingerman wrote:
> On Fri, 2024-08-16 at 10:27 -0700, Martin KaFai Lau wrote:
> 
> [...]
> 
>> Thanks for checking!
>>
>> I think the bpf_map__attach_struct_ops() is not done such that st_ops is NULL.
>>
>> It probably needs another tag in the SEC("syscall") program to tell which st_ops
>> map should be attached first before executing the "syscall" program.
>>
>> I like the idea of using the __xlated macro to check the patched prologue, ctx
>> pointer saving, and epilogue. I will add this test in the respin. I will keep
>> the current way in this patch to exercise syscall and the ops/func in st_ops for
>> now. We can iterate on it later and use it as an example on what supports are
>> needed on the test_loader side for st_ops map testing. On the repetitive-enough
>> to worth test_loader refactoring side, I suspect some of the existing st_ops
>> load-success/load-failure tests may be worth to look at also. Thoughts?
> 
> You are correct, this happens because bpf_map__attach_struct_ops() is
> not called. Fortunately, the change for test_loader.c is not very big.
> Please check two patches in the attachment.

The patch looks good. I tried and it works. I will add it in the next respin.
That will help to cover the __xlated check on the instructions generated by 
gen_pro/epilogue and also check the syscall return value for the common case.

Except the tail_call test which needs to load a struct_ops program that does 
bpf_tail_call and another struct_ops program that was used in the prog_array. 
These two struct_ops programs need to be used in two separate struct_ops maps to 
be able to load. The way that test_loader attaching all maps in your patch will 
fail because bpf_testmod does not support attaching more than one struct_ops map.

I don't want to further polish on the tail_call testing side. I will stay with 
the current way to do the tail_call test which also allows the more regular 
trampoline "unsigned long long *ctx" for the main struct_ops prog and also 
allows using ctx_in in the SEC("syscall") prog.

Thanks.
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index 3687a40b61c6..7194330bdefc 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -17,6 +17,7 @@ 
 #include <linux/in.h>
 #include <linux/in6.h>
 #include <linux/un.h>
+#include <linux/filter.h>
 #include <net/sock.h>
 #include <linux/namei.h>
 #include "bpf_testmod.h"
@@ -920,6 +921,51 @@  __bpf_kfunc int bpf_kfunc_call_kernel_getpeername(struct addr_args *args)
 	return err;
 }
 
+static DEFINE_MUTEX(st_ops_mutex);
+static struct bpf_testmod_st_ops *st_ops;
+
+__bpf_kfunc int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args)
+{
+	int ret = -1;
+
+	mutex_lock(&st_ops_mutex);
+	if (st_ops && st_ops->test_prologue)
+		ret = st_ops->test_prologue(args);
+	mutex_unlock(&st_ops_mutex);
+
+	return ret;
+}
+
+__bpf_kfunc int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args)
+{
+	int ret = -1;
+
+	mutex_lock(&st_ops_mutex);
+	if (st_ops && st_ops->test_epilogue)
+		ret = st_ops->test_epilogue(args);
+	mutex_unlock(&st_ops_mutex);
+
+	return ret;
+}
+
+__bpf_kfunc int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args)
+{
+	int ret = -1;
+
+	mutex_lock(&st_ops_mutex);
+	if (st_ops && st_ops->test_pro_epilogue)
+		ret = st_ops->test_pro_epilogue(args);
+	mutex_unlock(&st_ops_mutex);
+
+	return ret;
+}
+
+__bpf_kfunc int bpf_kfunc_st_ops_inc10(struct st_ops_args *args)
+{
+	args->a += 10;
+	return args->a;
+}
+
 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
@@ -956,6 +1002,10 @@  BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_sendmsg, KF_SLEEPABLE)
 BTF_ID_FLAGS(func, bpf_kfunc_call_sock_sendmsg, KF_SLEEPABLE)
 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getsockname, KF_SLEEPABLE)
 BTF_ID_FLAGS(func, bpf_kfunc_call_kernel_getpeername, KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_prologue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_kfunc_st_ops_test_pro_epilogue, KF_TRUSTED_ARGS | KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_kfunc_st_ops_inc10, KF_TRUSTED_ARGS)
 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
 
 static int bpf_testmod_ops_init(struct btf *btf)
@@ -1075,6 +1125,144 @@  struct bpf_struct_ops bpf_testmod_ops2 = {
 	.owner = THIS_MODULE,
 };
 
+static int bpf_test_mod_st_ops__test_prologue(struct st_ops_args *args)
+{
+	return 0;
+}
+
+static int bpf_test_mod_st_ops__test_epilogue(struct st_ops_args *args)
+{
+	return 0;
+}
+
+static int bpf_test_mod_st_ops__test_pro_epilogue(struct st_ops_args *args)
+{
+	return 0;
+}
+
+static int st_ops_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
+			       const struct bpf_prog *prog)
+{
+	struct bpf_insn *insn = insn_buf;
+
+	if (strcmp(prog->aux->attach_func_name, "test_prologue") &&
+	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
+		return 0;
+
+	/* r6 = r1[0]; // r6 will be "struct st_ops *args". r1 is "u64 *ctx".
+	 * r7 = r6->a;
+	 * r7 += 1000;
+	 * r6->a = r7;
+	 */
+	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0);
+	*insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, offsetof(struct st_ops_args, a));
+	*insn++ = BPF_ALU32_IMM(BPF_ADD, BPF_REG_7, 1000);
+	*insn++ = BPF_STX_MEM(BPF_W, BPF_REG_6, BPF_REG_7, offsetof(struct st_ops_args, a));
+	*insn++ = prog->insnsi[0];
+
+	return insn - insn_buf;
+}
+
+static int st_ops_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
+			       s16 ctx_stack_off)
+{
+	struct bpf_insn *insn = insn_buf;
+
+	if (strcmp(prog->aux->attach_func_name, "test_epilogue") &&
+	    strcmp(prog->aux->attach_func_name, "test_pro_epilogue"))
+		return 0;
+
+	/* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
+	 * r1 = r1[0]; // r1 will be "struct st_ops *args"
+	 * r6 = r1->a;
+	 * r6 += 10000;
+	 * r1->a = r6;
+	 * r0 = r6;
+	 * r0 *= 2;
+	 * BPF_EXIT;
+	 */
+	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
+	*insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
+	*insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct st_ops_args, a));
+	*insn++ = BPF_ALU32_IMM(BPF_ADD, BPF_REG_6, 10000);
+	*insn++ = BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, offsetof(struct st_ops_args, a));
+	*insn++ = BPF_MOV32_REG(BPF_REG_0, BPF_REG_6);
+	*insn++ = BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 2);
+	*insn++ = BPF_EXIT_INSN();
+
+	return insn - insn_buf;
+}
+
+static int st_ops_btf_struct_access(struct bpf_verifier_log *log,
+				    const struct bpf_reg_state *reg,
+				    int off, int size)
+{
+	if (off < 0 || off + size > sizeof(struct st_ops_args))
+		return -EACCES;
+	return 0;
+}
+
+static const struct bpf_verifier_ops st_ops_verifier_ops = {
+	.is_valid_access = bpf_testmod_ops_is_valid_access,
+	.btf_struct_access = st_ops_btf_struct_access,
+	.gen_prologue = st_ops_gen_prologue,
+	.gen_epilogue = st_ops_gen_epilogue,
+	.get_func_proto = bpf_base_func_proto,
+};
+
+static struct bpf_testmod_st_ops st_ops_cfi_stubs = {
+	.test_prologue = bpf_test_mod_st_ops__test_prologue,
+	.test_epilogue = bpf_test_mod_st_ops__test_epilogue,
+	.test_pro_epilogue = bpf_test_mod_st_ops__test_pro_epilogue,
+};
+
+static int st_ops_reg(void *kdata, struct bpf_link *link)
+{
+	int err = 0;
+
+	mutex_lock(&st_ops_mutex);
+	if (st_ops) {
+		pr_err("st_ops has already been registered\n");
+		err = -EEXIST;
+		goto unlock;
+	}
+	st_ops = kdata;
+
+unlock:
+	mutex_unlock(&st_ops_mutex);
+	return err;
+}
+
+static void st_ops_unreg(void *kdata, struct bpf_link *link)
+{
+	mutex_lock(&st_ops_mutex);
+	st_ops = NULL;
+	mutex_unlock(&st_ops_mutex);
+}
+
+static int st_ops_init(struct btf *btf)
+{
+	return 0;
+}
+
+static int st_ops_init_member(const struct btf_type *t,
+			      const struct btf_member *member,
+			      void *kdata, const void *udata)
+{
+	return 0;
+}
+
+static struct bpf_struct_ops testmod_st_ops = {
+	.verifier_ops = &st_ops_verifier_ops,
+	.init = st_ops_init,
+	.init_member = st_ops_init_member,
+	.reg = st_ops_reg,
+	.unreg = st_ops_unreg,
+	.cfi_stubs = &st_ops_cfi_stubs,
+	.name = "bpf_testmod_st_ops",
+	.owner = THIS_MODULE,
+};
+
 extern int bpf_fentry_test1(int a);
 
 static int bpf_testmod_init(void)
@@ -1092,8 +1280,10 @@  static int bpf_testmod_init(void)
 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set);
 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set);
 	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set);
+	ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_testmod_kfunc_set);
 	ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops);
 	ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2);
+	ret = ret ?: register_bpf_struct_ops(&testmod_st_ops, bpf_testmod_st_ops);
 	ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors,
 						 ARRAY_SIZE(bpf_testmod_dtors),
 						 THIS_MODULE);
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
index fe0d402b0d65..3241a9d796ed 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.h
@@ -94,4 +94,15 @@  struct bpf_testmod_ops2 {
 	int (*test_1)(void);
 };
 
+struct st_ops_args {
+	int a;
+};
+
+struct bpf_testmod_st_ops {
+	int (*test_prologue)(struct st_ops_args *args);
+	int (*test_epilogue)(struct st_ops_args *args);
+	int (*test_pro_epilogue)(struct st_ops_args *args);
+	struct module *owner;
+};
+
 #endif /* _BPF_TESTMOD_H */
diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
index e587a79f2239..0df429a0edaa 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h
@@ -144,4 +144,10 @@  void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, struct bpf_dynptr *ptr__nulla
 struct bpf_testmod_ctx *bpf_testmod_ctx_create(int *err) __ksym;
 void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) __ksym;
 
+struct st_ops_args;
+int bpf_kfunc_st_ops_test_prologue(struct st_ops_args *args) __ksym;
+int bpf_kfunc_st_ops_test_epilogue(struct st_ops_args *args) __ksym;
+int bpf_kfunc_st_ops_test_pro_epilogue(struct st_ops_args *args) __ksym;
+int bpf_kfunc_st_ops_inc10(struct st_ops_args *args) __ksym;
+
 #endif /* _BPF_TESTMOD_KFUNC_H */
diff --git a/tools/testing/selftests/bpf/prog_tests/struct_ops_syscall.c b/tools/testing/selftests/bpf/prog_tests/struct_ops_syscall.c
new file mode 100644
index 000000000000..a293a35b0dcc
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/struct_ops_syscall.c
@@ -0,0 +1,91 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include "struct_ops_syscall.skel.h"
+
+#define EPILOGUE_A  10000
+#define PROLOGUE_A   1000
+#define KFUNC_A10      10
+#define SUBPROG_A       1
+
+#define SUBPROG_TEST_MAIN	SUBPROG_A
+#define KFUNC_TEST_MAIN		(KFUNC_A10 + SUBPROG_A)
+
+struct st_ops_args {
+	int a;
+};
+
+static void do_test(struct struct_ops_syscall *skel,
+		    struct bpf_map *st_ops_map, int main_prog_a)
+{
+	LIBBPF_OPTS(bpf_test_run_opts, topts);
+	int err, prog_fd, expected_a;
+	struct st_ops_args args;
+	struct bpf_link *link;
+
+	topts.ctx_in = &args;
+	topts.ctx_size_in = sizeof(args);
+
+	link = bpf_map__attach_struct_ops(st_ops_map);
+	if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+		return;
+
+	/* gen_prologue + main prog */
+	expected_a = PROLOGUE_A + main_prog_a;
+	memset(&args, 0, sizeof(args));
+	prog_fd = bpf_program__fd(skel->progs.syscall_prologue);
+	err = bpf_prog_test_run_opts(prog_fd, &topts);
+	ASSERT_OK(err, "bpf_prog_test_run_opts");
+	ASSERT_EQ(args.a, expected_a, "args.a");
+	ASSERT_EQ(topts.retval, 0, "topts.retval");
+
+	/* main prog + gen_epilogue */
+	expected_a =  main_prog_a + EPILOGUE_A;
+	memset(&args, 0, sizeof(args));
+	prog_fd = bpf_program__fd(skel->progs.syscall_epilogue);
+	err = bpf_prog_test_run_opts(prog_fd, &topts);
+	ASSERT_OK(err, "bpf_prog_test_run_opts");
+	ASSERT_EQ(args.a, expected_a, "args.a");
+	ASSERT_EQ(topts.retval, expected_a * 2, "topts.retval");
+
+	/* gen_prologue + main prog + gen_epilogue */
+	expected_a = PROLOGUE_A + main_prog_a + EPILOGUE_A;
+	memset(&args, 0, sizeof(args));
+	prog_fd = bpf_program__fd(skel->progs.syscall_pro_epilogue);
+	err = bpf_prog_test_run_opts(prog_fd, &topts);
+	ASSERT_OK(err, "bpf_prog_test_run_opts");
+	ASSERT_EQ(args.a, expected_a, "args.a");
+	ASSERT_EQ(topts.retval, expected_a * 2, "topts.retval");
+	bpf_link__destroy(link);
+}
+
+void test_struct_ops_syscall(void)
+{
+	struct struct_ops_syscall *skel;
+
+	skel = struct_ops_syscall__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+		return;
+
+	if (test__start_subtest("subprog"))
+		do_test(skel, skel->maps.pro_epilogue_subprog_ops,
+			SUBPROG_TEST_MAIN);
+
+	if (test__start_subtest("kfunc"))
+		do_test(skel, skel->maps.pro_epilogue_kfunc_ops,
+			KFUNC_TEST_MAIN);
+
+	if (test__start_subtest("tailcall")) {
+		const int zero = 0;
+		int prog_fd = bpf_program__fd(skel->progs.test_epilogue_subprog);
+		int map_fd = bpf_map__fd(skel->maps.epilogue_map);
+		int err;
+
+		err = bpf_map_update_elem(map_fd, &zero, &prog_fd, 0);
+		if (ASSERT_OK(err, "map_update(epilogue_map)"))
+			do_test(skel, skel->maps.pro_epilogue_tail_ops,
+				SUBPROG_TEST_MAIN);
+	}
+
+	struct_ops_syscall__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_syscall.c b/tools/testing/selftests/bpf/progs/struct_ops_syscall.c
new file mode 100644
index 000000000000..ee153461d9f8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/struct_ops_syscall.c
@@ -0,0 +1,113 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "../bpf_testmod/bpf_testmod.h"
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+	__uint(max_entries, 1);
+	__uint(key_size, sizeof(__u32));
+	__uint(value_size, sizeof(__u32));
+} epilogue_map SEC(".maps");
+
+static __noinline int subprog(struct st_ops_args *args)
+{
+	args->a += 1;
+	return 0;
+}
+
+SEC("struct_ops/test_prologue_subprog")
+int BPF_PROG(test_prologue_subprog, struct st_ops_args *args)
+{
+	subprog(args);
+	return 0;
+}
+
+SEC("struct_ops/test_epilogue_subprog")
+int BPF_PROG(test_epilogue_subprog, struct st_ops_args *args)
+{
+	subprog(args);
+	return 0;
+}
+
+SEC("struct_ops/test_pro_epilogue_subprog")
+int BPF_PROG(test_pro_epilogue_subprog, struct st_ops_args *args)
+{
+	subprog(args);
+	return 0;
+}
+
+SEC("struct_ops/test_prologue_kfunc")
+int BPF_PROG(test_prologue_kfunc, struct st_ops_args *args)
+{
+	bpf_kfunc_st_ops_inc10(args);
+	subprog(args);
+	return 0;
+}
+
+SEC("struct_ops/test_epilogue_kfunc")
+int BPF_PROG(test_epilogue_kfunc, struct st_ops_args *args)
+{
+	bpf_kfunc_st_ops_inc10(args);
+	subprog(args);
+	return 0;
+}
+
+SEC("struct_ops/test_pro_epilogue_kfunc")
+int BPF_PROG(test_pro_epilogue_kfunc, struct st_ops_args *args)
+{
+	bpf_kfunc_st_ops_inc10(args);
+	subprog(args);
+	return 0;
+}
+
+SEC("struct_ops/test_epilogue_tail")
+int test_epilogue_tail(unsigned long long *ctx)
+{
+	bpf_tail_call_static(ctx, &epilogue_map, 0);
+	return 0;
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops pro_epilogue_subprog_ops = {
+	.test_prologue = (void *)test_prologue_subprog,
+	.test_epilogue = (void *)test_epilogue_subprog,
+	.test_pro_epilogue = (void *)test_pro_epilogue_subprog,
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops pro_epilogue_kfunc_ops = {
+	.test_prologue = (void *)test_prologue_kfunc,
+	.test_epilogue = (void *)test_epilogue_kfunc,
+	.test_pro_epilogue = (void *)test_pro_epilogue_kfunc,
+};
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops pro_epilogue_tail_ops = {
+	.test_prologue = (void *)test_prologue_subprog,
+	.test_epilogue = (void *)test_epilogue_tail,
+	.test_pro_epilogue = (void *)test_pro_epilogue_subprog,
+};
+
+SEC("syscall")
+int syscall_prologue(struct st_ops_args *args)
+{
+	return bpf_kfunc_st_ops_test_prologue(args);
+}
+
+SEC("syscall")
+int syscall_epilogue(struct st_ops_args *args)
+{
+	return bpf_kfunc_st_ops_test_epilogue(args);
+}
+
+SEC("syscall")
+int syscall_pro_epilogue(struct st_ops_args *args)
+{
+	return bpf_kfunc_st_ops_test_pro_epilogue(args);
+}