diff mbox series

[bpf-next,2/4] selftests/bpf: Add cgroup kfunc / kptr selftests

Message ID 20221122055458.173143-3-void@manifault.com (mailing list archive)
State Accepted
Commit f583ddf15e57746e60f3b68d529afc9faa2e2cb3
Delegated to: BPF
Headers show
Series Support storing struct cgroup * objects as kptrs | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix success Link
netdev/cover_letter success Series has a cover letter
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/cc_maintainers warning 3 maintainers not CCed: linux-kselftest@vger.kernel.org shuah@kernel.org mykolal@fb.com
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning CHECK: Please don't use multiple blank lines WARNING: added, moved or deleted file(s), does MAINTAINERS need updating? WARNING: line length of 100 exceeds 80 columns WARNING: line length of 81 exceeds 80 columns WARNING: line length of 82 exceeds 80 columns WARNING: line length of 83 exceeds 80 columns WARNING: line length of 84 exceeds 80 columns WARNING: line length of 85 exceeds 80 columns WARNING: line length of 86 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns WARNING: line length of 91 exceeds 80 columns WARNING: line length of 95 exceeds 80 columns WARNING: line length of 96 exceeds 80 columns WARNING: line length of 97 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-7 success Logs for llvm-toolchain
bpf/vmtest-bpf-next-VM_Test-8 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-2 success Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-3 success Logs for build for aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-5 success Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-4 success Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 success Logs for test_maps on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-17 fail Logs for test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-18 fail Logs for test_progs on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-19 fail Logs for test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 fail Logs for test_progs_no_alu32 on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-24 success Logs for test_progs_no_alu32_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-29 success Logs for test_progs_parallel on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-32 success Logs for test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for test_progs_parallel on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-34 success Logs for test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-37 success Logs for test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-38 success Logs for test_verifier on x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-9 success Logs for test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-10 success Logs for test_maps on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-15 fail Logs for test_progs on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-20 fail Logs for test_progs_no_alu32 on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-25 success Logs for test_progs_no_alu32_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-30 success Logs for test_progs_parallel on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-35 success Logs for test_verifier on aarch64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-36 success Logs for test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-21 fail Logs for test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for test_progs_no_alu32_parallel on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 fail Logs for test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for test_progs_parallel on s390x with gcc
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-11 success Logs for test_maps on s390x with gcc

Commit Message

David Vernet Nov. 22, 2022, 5:54 a.m. UTC
This patch adds a selftest suite to validate the cgroup kfuncs that were
added in the prior patch.

Signed-off-by: David Vernet <void@manifault.com>
---
 tools/testing/selftests/bpf/DENYLIST.s390x    |   1 +
 .../selftests/bpf/prog_tests/cgrp_kfunc.c     | 174 ++++++++++++
 .../selftests/bpf/progs/cgrp_kfunc_common.h   |  71 +++++
 .../selftests/bpf/progs/cgrp_kfunc_failure.c  | 260 ++++++++++++++++++
 .../selftests/bpf/progs/cgrp_kfunc_success.c  | 125 +++++++++
 5 files changed, 631 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c
 create mode 100644 tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h
 create mode 100644 tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
 create mode 100644 tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index 12cf2159975e..b9a3d80204c6 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -10,6 +10,7 @@  bpf_nf                                   # JIT does not support calling kernel f
 bpf_tcp_ca                               # JIT does not support calling kernel function                                (kfunc)
 cb_refs                                  # expected error message unexpected error: -524                               (trampoline)
 cgroup_hierarchical_stats                # JIT does not support calling kernel function                                (kfunc)
+cgrp_kfunc                               # JIT does not support calling kernel function
 cgrp_local_storage                       # prog_attach unexpected error: -524                                          (trampoline)
 core_read_macros                         # unknown func bpf_probe_read#4                                               (overlapping)
 d_path                                   # failed to auto-attach program 'prog_stat': -524                             (trampoline)
diff --git a/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c
new file mode 100644
index 000000000000..a59b166bbcc4
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/cgrp_kfunc.c
@@ -0,0 +1,174 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#define _GNU_SOURCE
+#include <cgroup_helpers.h>
+#include <test_progs.h>
+
+#include "cgrp_kfunc_failure.skel.h"
+#include "cgrp_kfunc_success.skel.h"
+
+static size_t log_buf_sz = 1 << 20; /* 1 MB */
+static char obj_log_buf[1048576];
+
+static struct cgrp_kfunc_success *open_load_cgrp_kfunc_skel(void)
+{
+	struct cgrp_kfunc_success *skel;
+	int err;
+
+	skel = cgrp_kfunc_success__open();
+	if (!ASSERT_OK_PTR(skel, "skel_open"))
+		return NULL;
+
+	skel->bss->pid = getpid();
+
+	err = cgrp_kfunc_success__load(skel);
+	if (!ASSERT_OK(err, "skel_load"))
+		goto cleanup;
+
+	return skel;
+
+cleanup:
+	cgrp_kfunc_success__destroy(skel);
+	return NULL;
+}
+
+static int mkdir_rm_test_dir(void)
+{
+	int fd;
+	const char *cgrp_path = "cgrp_kfunc";
+
+	fd = create_and_get_cgroup(cgrp_path);
+	if (!ASSERT_GT(fd, 0, "mkdir_cgrp_fd"))
+		return -1;
+
+	close(fd);
+	remove_cgroup(cgrp_path);
+
+	return 0;
+}
+
+static void run_success_test(const char *prog_name)
+{
+	struct cgrp_kfunc_success *skel;
+	struct bpf_program *prog;
+	struct bpf_link *link = NULL;
+
+	skel = open_load_cgrp_kfunc_skel();
+	if (!ASSERT_OK_PTR(skel, "open_load_skel"))
+		return;
+
+	if (!ASSERT_OK(skel->bss->err, "pre_mkdir_err"))
+		goto cleanup;
+
+	prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+	if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+		goto cleanup;
+
+	link = bpf_program__attach(prog);
+	if (!ASSERT_OK_PTR(link, "attached_link"))
+		goto cleanup;
+
+	ASSERT_EQ(skel->bss->invocations, 0, "pre_rmdir_count");
+	if (!ASSERT_OK(mkdir_rm_test_dir(), "cgrp_mkdir"))
+		goto cleanup;
+
+	ASSERT_EQ(skel->bss->invocations, 1, "post_rmdir_count");
+	ASSERT_OK(skel->bss->err, "post_rmdir_err");
+
+cleanup:
+	bpf_link__destroy(link);
+	cgrp_kfunc_success__destroy(skel);
+}
+
+static const char * const success_tests[] = {
+	"test_cgrp_acquire_release_argument",
+	"test_cgrp_acquire_leave_in_map",
+	"test_cgrp_xchg_release",
+	"test_cgrp_get_release",
+};
+
+static struct {
+	const char *prog_name;
+	const char *expected_err_msg;
+} failure_tests[] = {
+	{"cgrp_kfunc_acquire_untrusted", "R1 must be referenced or trusted"},
+	{"cgrp_kfunc_acquire_fp", "arg#0 pointer type STRUCT cgroup must point"},
+	{"cgrp_kfunc_acquire_unsafe_kretprobe", "reg type unsupported for arg#0 function"},
+	{"cgrp_kfunc_acquire_trusted_walked", "R1 must be referenced or trusted"},
+	{"cgrp_kfunc_acquire_null", "arg#0 pointer type STRUCT cgroup must point"},
+	{"cgrp_kfunc_acquire_unreleased", "Unreleased reference"},
+	{"cgrp_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"},
+	{"cgrp_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"},
+	{"cgrp_kfunc_get_null", "arg#0 expected pointer to map value"},
+	{"cgrp_kfunc_xchg_unreleased", "Unreleased reference"},
+	{"cgrp_kfunc_get_unreleased", "Unreleased reference"},
+	{"cgrp_kfunc_release_untrusted", "arg#0 is untrusted_ptr_or_null_ expected ptr_ or socket"},
+	{"cgrp_kfunc_release_fp", "arg#0 pointer type STRUCT cgroup must point"},
+	{"cgrp_kfunc_release_null", "arg#0 is ptr_or_null_ expected ptr_ or socket"},
+	{"cgrp_kfunc_release_unacquired", "release kernel function bpf_cgroup_release expects"},
+};
+
+static void verify_fail(const char *prog_name, const char *expected_err_msg)
+{
+	LIBBPF_OPTS(bpf_object_open_opts, opts);
+	struct cgrp_kfunc_failure *skel;
+	int err, i;
+
+	opts.kernel_log_buf = obj_log_buf;
+	opts.kernel_log_size = log_buf_sz;
+	opts.kernel_log_level = 1;
+
+	skel = cgrp_kfunc_failure__open_opts(&opts);
+	if (!ASSERT_OK_PTR(skel, "cgrp_kfunc_failure__open_opts"))
+		goto cleanup;
+
+	for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
+		struct bpf_program *prog;
+		const char *curr_name = failure_tests[i].prog_name;
+
+		prog = bpf_object__find_program_by_name(skel->obj, curr_name);
+		if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+			goto cleanup;
+
+		bpf_program__set_autoload(prog, !strcmp(curr_name, prog_name));
+	}
+
+	err = cgrp_kfunc_failure__load(skel);
+	if (!ASSERT_ERR(err, "unexpected load success"))
+		goto cleanup;
+
+	if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
+		fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
+		fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
+	}
+
+cleanup:
+	cgrp_kfunc_failure__destroy(skel);
+}
+
+void test_cgrp_kfunc(void)
+{
+	int i, err;
+
+	err = setup_cgroup_environment();
+	if (!ASSERT_OK(err, "cgrp_env_setup"))
+		goto cleanup;
+
+	for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
+		if (!test__start_subtest(success_tests[i]))
+			continue;
+
+		run_success_test(success_tests[i]);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
+		if (!test__start_subtest(failure_tests[i].prog_name))
+			continue;
+
+		verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg);
+	}
+
+cleanup:
+	cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h
new file mode 100644
index 000000000000..3f18def0e45c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_common.h
@@ -0,0 +1,71 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#ifndef _CGRP_KFUNC_COMMON_H
+#define _CGRP_KFUNC_COMMON_H
+
+#include <errno.h>
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct __cgrps_kfunc_map_value {
+	struct cgroup __kptr_ref * cgrp;
+};
+
+struct hash_map {
+	__uint(type, BPF_MAP_TYPE_HASH);
+	__type(key, int);
+	__type(value, struct __cgrps_kfunc_map_value);
+	__uint(max_entries, 1);
+} __cgrps_kfunc_map SEC(".maps");
+
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
+struct cgroup *bpf_cgroup_kptr_get(struct cgroup **pp) __ksym;
+void bpf_cgroup_release(struct cgroup *p) __ksym;
+
+static inline struct __cgrps_kfunc_map_value *cgrps_kfunc_map_value_lookup(struct cgroup *cgrp)
+{
+	s32 id;
+	long status;
+
+	status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
+	if (status)
+		return NULL;
+
+	return bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
+}
+
+static inline int cgrps_kfunc_map_insert(struct cgroup *cgrp)
+{
+	struct __cgrps_kfunc_map_value local, *v;
+	long status;
+	struct cgroup *acquired, *old;
+	s32 id;
+
+	status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
+	if (status)
+		return status;
+
+	local.cgrp = NULL;
+	status = bpf_map_update_elem(&__cgrps_kfunc_map, &id, &local, BPF_NOEXIST);
+	if (status)
+		return status;
+
+	v = bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
+	if (!v) {
+		bpf_map_delete_elem(&__cgrps_kfunc_map, &id);
+		return -ENOENT;
+	}
+
+	acquired = bpf_cgroup_acquire(cgrp);
+	old = bpf_kptr_xchg(&v->cgrp, acquired);
+	if (old) {
+		bpf_cgroup_release(old);
+		return -EEXIST;
+	}
+
+	return 0;
+}
+
+#endif /* _CGRP_KFUNC_COMMON_H */
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
new file mode 100644
index 000000000000..a1369b5ebcf8
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_failure.c
@@ -0,0 +1,260 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "cgrp_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(cgroup_mkdir,
+ *         TP_PROTO(struct cgroup *cgrp, const char *path),
+ *         TP_ARGS(cgrp, path)
+ */
+
+static struct __cgrps_kfunc_map_value *insert_lookup_cgrp(struct cgroup *cgrp)
+{
+	int status;
+
+	status = cgrps_kfunc_map_insert(cgrp);
+	if (status)
+		return NULL;
+
+	return cgrps_kfunc_map_value_lookup(cgrp);
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_acquire_untrusted, struct cgroup *cgrp, const char *path)
+{
+	struct cgroup *acquired;
+	struct __cgrps_kfunc_map_value *v;
+
+	v = insert_lookup_cgrp(cgrp);
+	if (!v)
+		return 0;
+
+	/* Can't invoke bpf_cgroup_acquire() on an untrusted pointer. */
+	acquired = bpf_cgroup_acquire(v->cgrp);
+	bpf_cgroup_release(acquired);
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_acquire_fp, struct cgroup *cgrp, const char *path)
+{
+	struct cgroup *acquired, *stack_cgrp = (struct cgroup *)&path;
+
+	/* Can't invoke bpf_cgroup_acquire() on a random frame pointer. */
+	acquired = bpf_cgroup_acquire((struct cgroup *)&stack_cgrp);
+	bpf_cgroup_release(acquired);
+
+	return 0;
+}
+
+SEC("kretprobe/cgroup_destroy_locked")
+int BPF_PROG(cgrp_kfunc_acquire_unsafe_kretprobe, struct cgroup *cgrp)
+{
+	struct cgroup *acquired;
+
+	/* Can't acquire an untrusted struct cgroup * pointer. */
+	acquired = bpf_cgroup_acquire(cgrp);
+	bpf_cgroup_release(acquired);
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_acquire_trusted_walked, struct cgroup *cgrp, const char *path)
+{
+	struct cgroup *acquired;
+
+	/* Can't invoke bpf_cgroup_acquire() on a pointer obtained from walking a trusted cgroup. */
+	acquired = bpf_cgroup_acquire(cgrp->old_dom_cgrp);
+	bpf_cgroup_release(acquired);
+
+	return 0;
+}
+
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_acquire_null, struct cgroup *cgrp, const char *path)
+{
+	struct cgroup *acquired;
+
+	/* Can't invoke bpf_cgroup_acquire() on a NULL pointer. */
+	acquired = bpf_cgroup_acquire(NULL);
+	if (!acquired)
+		return 0;
+	bpf_cgroup_release(acquired);
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_acquire_unreleased, struct cgroup *cgrp, const char *path)
+{
+	struct cgroup *acquired;
+
+	acquired = bpf_cgroup_acquire(cgrp);
+
+	/* Acquired cgroup is never released. */
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_get_non_kptr_param, struct cgroup *cgrp, const char *path)
+{
+	struct cgroup *kptr;
+
+	/* Cannot use bpf_cgroup_kptr_get() on a non-kptr, even on a valid cgroup. */
+	kptr = bpf_cgroup_kptr_get(&cgrp);
+	if (!kptr)
+		return 0;
+
+	bpf_cgroup_release(kptr);
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_get_non_kptr_acquired, struct cgroup *cgrp, const char *path)
+{
+	struct cgroup *kptr, *acquired;
+
+	acquired = bpf_cgroup_acquire(cgrp);
+
+	/* Cannot use bpf_cgroup_kptr_get() on a non-map-value, even if the kptr was acquired. */
+	kptr = bpf_cgroup_kptr_get(&acquired);
+	bpf_cgroup_release(acquired);
+	if (!kptr)
+		return 0;
+
+	bpf_cgroup_release(kptr);
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_get_null, struct cgroup *cgrp, const char *path)
+{
+	struct cgroup *kptr;
+
+	/* Cannot use bpf_cgroup_kptr_get() on a NULL pointer. */
+	kptr = bpf_cgroup_kptr_get(NULL);
+	if (!kptr)
+		return 0;
+
+	bpf_cgroup_release(kptr);
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_xchg_unreleased, struct cgroup *cgrp, const char *path)
+{
+	struct cgroup *kptr;
+	struct __cgrps_kfunc_map_value *v;
+
+	v = insert_lookup_cgrp(cgrp);
+	if (!v)
+		return 0;
+
+	kptr = bpf_kptr_xchg(&v->cgrp, NULL);
+	if (!kptr)
+		return 0;
+
+	/* Kptr retrieved from map is never released. */
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_get_unreleased, struct cgroup *cgrp, const char *path)
+{
+	struct cgroup *kptr;
+	struct __cgrps_kfunc_map_value *v;
+
+	v = insert_lookup_cgrp(cgrp);
+	if (!v)
+		return 0;
+
+	kptr = bpf_cgroup_kptr_get(&v->cgrp);
+	if (!kptr)
+		return 0;
+
+	/* Kptr acquired above is never released. */
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_release_untrusted, struct cgroup *cgrp, const char *path)
+{
+	struct __cgrps_kfunc_map_value *v;
+
+	v = insert_lookup_cgrp(cgrp);
+	if (!v)
+		return 0;
+
+	/* Can't invoke bpf_cgroup_release() on an untrusted pointer. */
+	bpf_cgroup_release(v->cgrp);
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_release_fp, struct cgroup *cgrp, const char *path)
+{
+	struct cgroup *acquired = (struct cgroup *)&path;
+
+	/* Cannot release random frame pointer. */
+	bpf_cgroup_release(acquired);
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_release_null, struct cgroup *cgrp, const char *path)
+{
+	struct __cgrps_kfunc_map_value local, *v;
+	long status;
+	struct cgroup *acquired, *old;
+	s32 id;
+
+	status = bpf_probe_read_kernel(&id, sizeof(id), &cgrp->self.id);
+	if (status)
+		return 0;
+
+	local.cgrp = NULL;
+	status = bpf_map_update_elem(&__cgrps_kfunc_map, &id, &local, BPF_NOEXIST);
+	if (status)
+		return status;
+
+	v = bpf_map_lookup_elem(&__cgrps_kfunc_map, &id);
+	if (!v)
+		return -ENOENT;
+
+	acquired = bpf_cgroup_acquire(cgrp);
+
+	old = bpf_kptr_xchg(&v->cgrp, acquired);
+
+	/* old cannot be passed to bpf_cgroup_release() without a NULL check. */
+	bpf_cgroup_release(old);
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(cgrp_kfunc_release_unacquired, struct cgroup *cgrp, const char *path)
+{
+	/* Cannot release trusted cgroup pointer which was not acquired. */
+	bpf_cgroup_release(cgrp);
+
+	return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c
new file mode 100644
index 000000000000..9f4569f7598b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/cgrp_kfunc_success.c
@@ -0,0 +1,125 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+#include "cgrp_kfunc_common.h"
+
+char _license[] SEC("license") = "GPL";
+
+int err, pid, invocations;
+
+/* Prototype for all of the program trace events below:
+ *
+ * TRACE_EVENT(cgroup_mkdir,
+ *         TP_PROTO(struct cgroup *cgrp, const char *path),
+ *         TP_ARGS(cgrp, path)
+ */
+
+static bool is_test_kfunc_task(void)
+{
+	int cur_pid = bpf_get_current_pid_tgid() >> 32;
+	bool same = pid == cur_pid;
+
+	if (same)
+		__sync_fetch_and_add(&invocations, 1);
+
+	return same;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_acquire_release_argument, struct cgroup *cgrp, const char *path)
+{
+	struct cgroup *acquired;
+
+	if (!is_test_kfunc_task())
+		return 0;
+
+	acquired = bpf_cgroup_acquire(cgrp);
+	bpf_cgroup_release(acquired);
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_acquire_leave_in_map, struct cgroup *cgrp, const char *path)
+{
+	long status;
+
+	if (!is_test_kfunc_task())
+		return 0;
+
+	status = cgrps_kfunc_map_insert(cgrp);
+	if (status)
+		err = 1;
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_xchg_release, struct cgroup *cgrp, const char *path)
+{
+	struct cgroup *kptr;
+	struct __cgrps_kfunc_map_value *v;
+	long status;
+
+	if (!is_test_kfunc_task())
+		return 0;
+
+	status = cgrps_kfunc_map_insert(cgrp);
+	if (status) {
+		err = 1;
+		return 0;
+	}
+
+	v = cgrps_kfunc_map_value_lookup(cgrp);
+	if (!v) {
+		err = 2;
+		return 0;
+	}
+
+	kptr = bpf_kptr_xchg(&v->cgrp, NULL);
+	if (!kptr) {
+		err = 3;
+		return 0;
+	}
+
+	bpf_cgroup_release(kptr);
+
+	return 0;
+}
+
+SEC("tp_btf/cgroup_mkdir")
+int BPF_PROG(test_cgrp_get_release, struct cgroup *cgrp, const char *path)
+{
+	struct cgroup *kptr;
+	struct __cgrps_kfunc_map_value *v;
+	long status;
+
+	if (!is_test_kfunc_task())
+		return 0;
+
+	status = cgrps_kfunc_map_insert(cgrp);
+	if (status) {
+		err = 1;
+		return 0;
+	}
+
+	v = cgrps_kfunc_map_value_lookup(cgrp);
+	if (!v) {
+		err = 2;
+		return 0;
+	}
+
+	kptr = bpf_cgroup_kptr_get(&v->cgrp);
+	if (!kptr) {
+		err = 3;
+		return 0;
+	}
+
+	bpf_cgroup_release(kptr);
+
+	return 0;
+}