diff mbox series

[v5,bpf-next,2/3] mm/bpf: Add bpf_get_kmem_cache() kfunc

Message ID 20241010232505.1339892-3-namhyung@kernel.org (mailing list archive)
State Accepted
Commit 04b069ff01811551638fda0cd80dc5209c716eb1
Delegated to: BPF
Headers show
Series bpf: Add kmem_cache iterator and kfunc | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 6 this patch: 6
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 22 of 22 maintainers
netdev/build_clang success Errors and warnings before: 6 this patch: 6
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 70 this patch: 70
netdev/checkpatch warning WARNING: line length of 86 exceeds 80 columns
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 10 this patch: 10
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-17 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-18 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-21 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18

Commit Message

Namhyung Kim Oct. 10, 2024, 11:25 p.m. UTC
The bpf_get_kmem_cache() is to get a slab cache information from a
virtual address like virt_to_cache().  If the address is a pointer
to a slab object, it'd return a valid kmem_cache pointer, otherwise
NULL is returned.

It doesn't grab a reference count of the kmem_cache so the caller is
responsible to manage the access.  The returned point is marked as
PTR_UNTRUSTED.  And the kfunc has KF_RCU_PROTECTED as the slab object
might be protected by RCU.

The intended use case for now is to symbolize locks in slab objects
from the lock contention tracepoints.

Suggested-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Roman Gushchin <roman.gushchin@linux.dev> (mm/*)
Acked-by: Vlastimil Babka <vbabka@suse.cz> #mm/slab
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 kernel/bpf/helpers.c  |  1 +
 kernel/bpf/verifier.c |  5 +++++
 mm/slab_common.c      | 19 +++++++++++++++++++
 3 files changed, 25 insertions(+)

Comments

Alexei Starovoitov Oct. 11, 2024, 6:35 p.m. UTC | #1
On Thu, Oct 10, 2024 at 4:25 PM Namhyung Kim <namhyung@kernel.org> wrote:
>
> The bpf_get_kmem_cache() is to get a slab cache information from a
> virtual address like virt_to_cache().  If the address is a pointer
> to a slab object, it'd return a valid kmem_cache pointer, otherwise
> NULL is returned.
>
> It doesn't grab a reference count of the kmem_cache so the caller is
> responsible to manage the access.  The returned point is marked as
> PTR_UNTRUSTED.  And the kfunc has KF_RCU_PROTECTED as the slab object
> might be protected by RCU.

...
> +BTF_ID_FLAGS(func, bpf_get_kmem_cache, KF_RCU_PROTECTED)

This flag is unnecessary. PTR_UNTRUSTED can point to absolutely any memory.
In this case it likely points to a valid kmem_cache, but
the verifier will guard all accesses with probe_read anyway.

I can remove this flag while applying.
Namhyung Kim Oct. 11, 2024, 7:14 p.m. UTC | #2
On Fri, Oct 11, 2024 at 11:35:27AM -0700, Alexei Starovoitov wrote:
> On Thu, Oct 10, 2024 at 4:25 PM Namhyung Kim <namhyung@kernel.org> wrote:
> >
> > The bpf_get_kmem_cache() is to get a slab cache information from a
> > virtual address like virt_to_cache().  If the address is a pointer
> > to a slab object, it'd return a valid kmem_cache pointer, otherwise
> > NULL is returned.
> >
> > It doesn't grab a reference count of the kmem_cache so the caller is
> > responsible to manage the access.  The returned point is marked as
> > PTR_UNTRUSTED.  And the kfunc has KF_RCU_PROTECTED as the slab object
> > might be protected by RCU.
> 
> ...
> > +BTF_ID_FLAGS(func, bpf_get_kmem_cache, KF_RCU_PROTECTED)
> 
> This flag is unnecessary. PTR_UNTRUSTED can point to absolutely any memory.
> In this case it likely points to a valid kmem_cache, but
> the verifier will guard all accesses with probe_read anyway.
> 
> I can remove this flag while applying.

Ok, I'd be happy if you would remove it.

Thanks,
Namhyung
Namhyung Kim Oct. 14, 2024, 6:13 p.m. UTC | #3
Hi Alexei,

On Fri, Oct 11, 2024 at 12:14:14PM -0700, Namhyung Kim wrote:
> On Fri, Oct 11, 2024 at 11:35:27AM -0700, Alexei Starovoitov wrote:
> > On Thu, Oct 10, 2024 at 4:25 PM Namhyung Kim <namhyung@kernel.org> wrote:
> > >
> > > The bpf_get_kmem_cache() is to get a slab cache information from a
> > > virtual address like virt_to_cache().  If the address is a pointer
> > > to a slab object, it'd return a valid kmem_cache pointer, otherwise
> > > NULL is returned.
> > >
> > > It doesn't grab a reference count of the kmem_cache so the caller is
> > > responsible to manage the access.  The returned point is marked as
> > > PTR_UNTRUSTED.  And the kfunc has KF_RCU_PROTECTED as the slab object
> > > might be protected by RCU.
> > 
> > ...
> > > +BTF_ID_FLAGS(func, bpf_get_kmem_cache, KF_RCU_PROTECTED)
> > 
> > This flag is unnecessary. PTR_UNTRUSTED can point to absolutely any memory.
> > In this case it likely points to a valid kmem_cache, but
> > the verifier will guard all accesses with probe_read anyway.
> > 
> > I can remove this flag while applying.
> 
> Ok, I'd be happy if you would remove it.

You will need to update the bpf_rcu_read_lock/unlock() in the test code
(patch 3).  I can send v6 with that and Vlastimil's Ack if you want.

Thanks,
Namhyung
Alexei Starovoitov Oct. 15, 2024, 1:50 a.m. UTC | #4
On Mon, Oct 14, 2024 at 11:13 AM Namhyung Kim <namhyung@kernel.org> wrote:
>
> Hi Alexei,
>
> On Fri, Oct 11, 2024 at 12:14:14PM -0700, Namhyung Kim wrote:
> > On Fri, Oct 11, 2024 at 11:35:27AM -0700, Alexei Starovoitov wrote:
> > > On Thu, Oct 10, 2024 at 4:25 PM Namhyung Kim <namhyung@kernel.org> wrote:
> > > >
> > > > The bpf_get_kmem_cache() is to get a slab cache information from a
> > > > virtual address like virt_to_cache().  If the address is a pointer
> > > > to a slab object, it'd return a valid kmem_cache pointer, otherwise
> > > > NULL is returned.
> > > >
> > > > It doesn't grab a reference count of the kmem_cache so the caller is
> > > > responsible to manage the access.  The returned point is marked as
> > > > PTR_UNTRUSTED.  And the kfunc has KF_RCU_PROTECTED as the slab object
> > > > might be protected by RCU.
> > >
> > > ...
> > > > +BTF_ID_FLAGS(func, bpf_get_kmem_cache, KF_RCU_PROTECTED)
> > >
> > > This flag is unnecessary. PTR_UNTRUSTED can point to absolutely any memory.
> > > In this case it likely points to a valid kmem_cache, but
> > > the verifier will guard all accesses with probe_read anyway.
> > >
> > > I can remove this flag while applying.
> >
> > Ok, I'd be happy if you would remove it.
>
> You will need to update the bpf_rcu_read_lock/unlock() in the test code
> (patch 3).  I can send v6 with that and Vlastimil's Ack if you want.

Fixed all that while applying.

Could you please follow up with an open-coded iterator version
of the same slab iterator ?
So that progs can iterate slabs as a normal for/while loop ?
Namhyung Kim Oct. 15, 2024, 6:19 p.m. UTC | #5
On Mon, Oct 14, 2024 at 06:50:49PM -0700, Alexei Starovoitov wrote:
> On Mon, Oct 14, 2024 at 11:13 AM Namhyung Kim <namhyung@kernel.org> wrote:
> >
> > Hi Alexei,
> >
> > On Fri, Oct 11, 2024 at 12:14:14PM -0700, Namhyung Kim wrote:
> > > On Fri, Oct 11, 2024 at 11:35:27AM -0700, Alexei Starovoitov wrote:
> > > > On Thu, Oct 10, 2024 at 4:25 PM Namhyung Kim <namhyung@kernel.org> wrote:
> > > > >
> > > > > The bpf_get_kmem_cache() is to get a slab cache information from a
> > > > > virtual address like virt_to_cache().  If the address is a pointer
> > > > > to a slab object, it'd return a valid kmem_cache pointer, otherwise
> > > > > NULL is returned.
> > > > >
> > > > > It doesn't grab a reference count of the kmem_cache so the caller is
> > > > > responsible to manage the access.  The returned point is marked as
> > > > > PTR_UNTRUSTED.  And the kfunc has KF_RCU_PROTECTED as the slab object
> > > > > might be protected by RCU.
> > > >
> > > > ...
> > > > > +BTF_ID_FLAGS(func, bpf_get_kmem_cache, KF_RCU_PROTECTED)
> > > >
> > > > This flag is unnecessary. PTR_UNTRUSTED can point to absolutely any memory.
> > > > In this case it likely points to a valid kmem_cache, but
> > > > the verifier will guard all accesses with probe_read anyway.
> > > >
> > > > I can remove this flag while applying.
> > >
> > > Ok, I'd be happy if you would remove it.
> >
> > You will need to update the bpf_rcu_read_lock/unlock() in the test code
> > (patch 3).  I can send v6 with that and Vlastimil's Ack if you want.
> 
> Fixed all that while applying.
> 
> Could you please follow up with an open-coded iterator version
> of the same slab iterator ?
> So that progs can iterate slabs as a normal for/while loop ?

I'm not sure I'm following.  Do you want a new test program to iterate
kmem_caches by reading list pointers manually?  How can I grab the
slab_mutex then?

Thanks,
Namhyung
Alexei Starovoitov Oct. 15, 2024, 6:25 p.m. UTC | #6
On Tue, Oct 15, 2024 at 11:20 AM Namhyung Kim <namhyung@kernel.org> wrote:
>
> On Mon, Oct 14, 2024 at 06:50:49PM -0700, Alexei Starovoitov wrote:
> > On Mon, Oct 14, 2024 at 11:13 AM Namhyung Kim <namhyung@kernel.org> wrote:
> > >
> > > Hi Alexei,
> > >
> > > On Fri, Oct 11, 2024 at 12:14:14PM -0700, Namhyung Kim wrote:
> > > > On Fri, Oct 11, 2024 at 11:35:27AM -0700, Alexei Starovoitov wrote:
> > > > > On Thu, Oct 10, 2024 at 4:25 PM Namhyung Kim <namhyung@kernel.org> wrote:
> > > > > >
> > > > > > The bpf_get_kmem_cache() is to get a slab cache information from a
> > > > > > virtual address like virt_to_cache().  If the address is a pointer
> > > > > > to a slab object, it'd return a valid kmem_cache pointer, otherwise
> > > > > > NULL is returned.
> > > > > >
> > > > > > It doesn't grab a reference count of the kmem_cache so the caller is
> > > > > > responsible to manage the access.  The returned point is marked as
> > > > > > PTR_UNTRUSTED.  And the kfunc has KF_RCU_PROTECTED as the slab object
> > > > > > might be protected by RCU.
> > > > >
> > > > > ...
> > > > > > +BTF_ID_FLAGS(func, bpf_get_kmem_cache, KF_RCU_PROTECTED)
> > > > >
> > > > > This flag is unnecessary. PTR_UNTRUSTED can point to absolutely any memory.
> > > > > In this case it likely points to a valid kmem_cache, but
> > > > > the verifier will guard all accesses with probe_read anyway.
> > > > >
> > > > > I can remove this flag while applying.
> > > >
> > > > Ok, I'd be happy if you would remove it.
> > >
> > > You will need to update the bpf_rcu_read_lock/unlock() in the test code
> > > (patch 3).  I can send v6 with that and Vlastimil's Ack if you want.
> >
> > Fixed all that while applying.
> >
> > Could you please follow up with an open-coded iterator version
> > of the same slab iterator ?
> > So that progs can iterate slabs as a normal for/while loop ?
>
> I'm not sure I'm following.  Do you want a new test program to iterate
> kmem_caches by reading list pointers manually?  How can I grab the
> slab_mutex then?

No.
See bpf_iter_task_new/_next/_destroy kfuncs and
commit c68a78ffe2cb ("bpf: Introduce task open coded iterator kfuncs").
Namhyung Kim Oct. 15, 2024, 8:54 p.m. UTC | #7
On Tue, Oct 15, 2024 at 11:25:11AM -0700, Alexei Starovoitov wrote:
> On Tue, Oct 15, 2024 at 11:20 AM Namhyung Kim <namhyung@kernel.org> wrote:
> >
> > On Mon, Oct 14, 2024 at 06:50:49PM -0700, Alexei Starovoitov wrote:
> > > On Mon, Oct 14, 2024 at 11:13 AM Namhyung Kim <namhyung@kernel.org> wrote:
> > > >
> > > > Hi Alexei,
> > > >
> > > > On Fri, Oct 11, 2024 at 12:14:14PM -0700, Namhyung Kim wrote:
> > > > > On Fri, Oct 11, 2024 at 11:35:27AM -0700, Alexei Starovoitov wrote:
> > > > > > On Thu, Oct 10, 2024 at 4:25 PM Namhyung Kim <namhyung@kernel.org> wrote:
> > > > > > >
> > > > > > > The bpf_get_kmem_cache() is to get a slab cache information from a
> > > > > > > virtual address like virt_to_cache().  If the address is a pointer
> > > > > > > to a slab object, it'd return a valid kmem_cache pointer, otherwise
> > > > > > > NULL is returned.
> > > > > > >
> > > > > > > It doesn't grab a reference count of the kmem_cache so the caller is
> > > > > > > responsible to manage the access.  The returned point is marked as
> > > > > > > PTR_UNTRUSTED.  And the kfunc has KF_RCU_PROTECTED as the slab object
> > > > > > > might be protected by RCU.
> > > > > >
> > > > > > ...
> > > > > > > +BTF_ID_FLAGS(func, bpf_get_kmem_cache, KF_RCU_PROTECTED)
> > > > > >
> > > > > > This flag is unnecessary. PTR_UNTRUSTED can point to absolutely any memory.
> > > > > > In this case it likely points to a valid kmem_cache, but
> > > > > > the verifier will guard all accesses with probe_read anyway.
> > > > > >
> > > > > > I can remove this flag while applying.
> > > > >
> > > > > Ok, I'd be happy if you would remove it.
> > > >
> > > > You will need to update the bpf_rcu_read_lock/unlock() in the test code
> > > > (patch 3).  I can send v6 with that and Vlastimil's Ack if you want.
> > >
> > > Fixed all that while applying.
> > >
> > > Could you please follow up with an open-coded iterator version
> > > of the same slab iterator ?
> > > So that progs can iterate slabs as a normal for/while loop ?
> >
> > I'm not sure I'm following.  Do you want a new test program to iterate
> > kmem_caches by reading list pointers manually?  How can I grab the
> > slab_mutex then?
> 
> No.
> See bpf_iter_task_new/_next/_destroy kfuncs and
> commit c68a78ffe2cb ("bpf: Introduce task open coded iterator kfuncs").

Oh, ok.  Thanks for the pointer, I'll take a look and add the open code
version.

Thanks,
Namhyung
diff mbox series

Patch

diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 4053f279ed4cc7ab..7bfef9378ab21267 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -3090,6 +3090,7 @@  BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW)
 BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY)
 BTF_ID_FLAGS(func, bpf_copy_from_user_str, KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_get_kmem_cache, KF_RCU_PROTECTED)
 BTF_KFUNCS_END(common_btf_ids)
 
 static const struct btf_kfunc_id_set common_kfunc_set = {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index cfc62e0776bff2c8..f514247ba8ba8a57 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -11259,6 +11259,7 @@  enum special_kfunc_type {
 	KF_bpf_preempt_enable,
 	KF_bpf_iter_css_task_new,
 	KF_bpf_session_cookie,
+	KF_bpf_get_kmem_cache,
 };
 
 BTF_SET_START(special_kfunc_set)
@@ -11324,6 +11325,7 @@  BTF_ID(func, bpf_session_cookie)
 #else
 BTF_ID_UNUSED
 #endif
+BTF_ID(func, bpf_get_kmem_cache)
 
 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
 {
@@ -12834,6 +12836,9 @@  static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
 			regs[BPF_REG_0].type = PTR_TO_BTF_ID;
 			regs[BPF_REG_0].btf_id = ptr_type_id;
 
+			if (meta.func_id == special_kfunc_list[KF_bpf_get_kmem_cache])
+				regs[BPF_REG_0].type |= PTR_UNTRUSTED;
+
 			if (is_iter_next_kfunc(&meta)) {
 				struct bpf_reg_state *cur_iter;
 
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 7443244656150325..5484e1cd812f698e 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1322,6 +1322,25 @@  size_t ksize(const void *objp)
 }
 EXPORT_SYMBOL(ksize);
 
+#ifdef CONFIG_BPF_SYSCALL
+#include <linux/btf.h>
+
+__bpf_kfunc_start_defs();
+
+__bpf_kfunc struct kmem_cache *bpf_get_kmem_cache(u64 addr)
+{
+	struct slab *slab;
+
+	if (!virt_addr_valid(addr))
+		return NULL;
+
+	slab = virt_to_slab((void *)(long)addr);
+	return slab ? slab->slab_cache : NULL;
+}
+
+__bpf_kfunc_end_defs();
+#endif /* CONFIG_BPF_SYSCALL */
+
 /* Tracepoints definitions. */
 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);