new file mode 100644
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <sys/syscall.h>
+#include <test_progs.h>
+#include "rbtree_map.skel.h"
+#include "rbtree_map_fail.skel.h"
+#include "rbtree_map_load_fail.skel.h"
+
+static size_t log_buf_sz = 1048576; /* 1 MB */
+static char obj_log_buf[1048576];
+
+static struct {
+ const char *prog_name;
+ const char *expected_err_msg;
+} rbtree_prog_load_fail_tests[] = {
+ {"rb_node__field_store", "only read is supported"},
+ {"rb_node__alloc_no_add", "Unreleased reference id=2 alloc_insn=3"},
+ {"rb_node__two_alloc_one_add", "Unreleased reference id=2 alloc_insn=3"},
+ {"rb_node__remove_no_free", "Unreleased reference id=5 alloc_insn=28"},
+ {"rb_tree__add_wrong_type", "rbtree: R2 is of type task_struct but node_data is expected"},
+ {"rb_tree__conditional_release_helper_usage",
+ "R2 type=ptr_cond_rel_ expected=ptr_"},
+};
+
+void test_rbtree_map_load_fail(void)
+{
+ struct rbtree_map_load_fail *skel;
+
+ skel = rbtree_map_load_fail__open_and_load();
+ if (!ASSERT_ERR_PTR(skel, "rbtree_map_load_fail__open_and_load"))
+ rbtree_map_load_fail__destroy(skel);
+}
+
+static void verify_fail(const char *prog_name, const char *expected_err_msg)
+{
+ LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct rbtree_map_fail *skel;
+ struct bpf_program *prog;
+ int err;
+
+ opts.kernel_log_buf = obj_log_buf;
+ opts.kernel_log_size = log_buf_sz;
+ opts.kernel_log_level = 1;
+
+ skel = rbtree_map_fail__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "rbtree_map_fail__open_opts"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(skel->obj, prog_name);
+ if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
+ goto cleanup;
+
+ bpf_program__set_autoload(prog, true);
+ err = rbtree_map_fail__load(skel);
+ if (!ASSERT_ERR(err, "unexpected load success"))
+ goto cleanup;
+
+ if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
+ fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
+ fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
+ }
+
+cleanup:
+ rbtree_map_fail__destroy(skel);
+}
+
+void test_rbtree_map_alloc_node__size_too_small(void)
+{
+ struct rbtree_map_fail *skel;
+ struct bpf_program *prog;
+ struct bpf_link *link;
+ int err;
+
+ skel = rbtree_map_fail__open();
+ if (!ASSERT_OK_PTR(skel, "rbtree_map_fail__open"))
+ goto cleanup;
+
+ prog = skel->progs.alloc_node__size_too_small;
+ bpf_program__set_autoload(prog, true);
+
+ err = rbtree_map_fail__load(skel);
+ if (!ASSERT_OK(err, "unexpected load fail"))
+ goto cleanup;
+
+ link = bpf_program__attach(skel->progs.alloc_node__size_too_small);
+ if (!ASSERT_OK_PTR(link, "link"))
+ goto cleanup;
+
+ syscall(SYS_getpgid);
+
+ ASSERT_EQ(skel->bss->size_too_small__alloc_fail, 1, "alloc_fail");
+
+ bpf_link__destroy(link);
+cleanup:
+ rbtree_map_fail__destroy(skel);
+}
+
+void test_rbtree_map_add_node__no_lock(void)
+{
+ struct rbtree_map_fail *skel;
+ struct bpf_program *prog;
+ struct bpf_link *link;
+ int err;
+
+ skel = rbtree_map_fail__open();
+ if (!ASSERT_OK_PTR(skel, "rbtree_map_fail__open"))
+ goto cleanup;
+
+ prog = skel->progs.add_node__no_lock;
+ bpf_program__set_autoload(prog, true);
+
+ err = rbtree_map_fail__load(skel);
+ if (!ASSERT_OK(err, "unexpected load fail"))
+ goto cleanup;
+
+ link = bpf_program__attach(skel->progs.add_node__no_lock);
+ if (!ASSERT_OK_PTR(link, "link"))
+ goto cleanup;
+
+ syscall(SYS_getpgid);
+
+ ASSERT_EQ(skel->bss->no_lock_add__fail, 1, "alloc_fail");
+
+ bpf_link__destroy(link);
+cleanup:
+ rbtree_map_fail__destroy(skel);
+}
+
+void test_rbtree_map_prog_load_fail(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rbtree_prog_load_fail_tests); i++) {
+ if (!test__start_subtest(rbtree_prog_load_fail_tests[i].prog_name))
+ continue;
+
+ verify_fail(rbtree_prog_load_fail_tests[i].prog_name,
+ rbtree_prog_load_fail_tests[i].expected_err_msg);
+ }
+}
+
+void test_rbtree_map(void)
+{
+ struct rbtree_map *skel;
+ struct bpf_link *link;
+
+ skel = rbtree_map__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "rbtree_map__open_and_load"))
+ goto cleanup;
+
+ link = bpf_program__attach(skel->progs.check_rbtree);
+ if (!ASSERT_OK_PTR(link, "link"))
+ goto cleanup;
+
+ for (int i = 0; i < 100; i++)
+ syscall(SYS_getpgid);
+
+ ASSERT_EQ(skel->bss->calls, 100, "calls_equal");
+
+ bpf_link__destroy(link);
+cleanup:
+ rbtree_map__destroy(skel);
+}
new file mode 100644
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct node_data {
+ struct rb_node node;
+ __u32 one;
+ __u32 two;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RBTREE);
+ __type(value, struct node_data);
+} rbtree SEC(".maps");
+
+long calls;
+
+static bool less(struct rb_node *a, const struct rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+
+ return node_a->one < node_b->one;
+}
+
+// Key = node_datq
+static int cmp(const void *key, const struct rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(key, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+
+ return node_b->one - node_a->one;
+}
+
+// Key = just node_data.one
+static int cmp2(const void *key, const struct rb_node *b)
+{
+ __u32 one;
+ struct node_data *node_b;
+
+ one = *(__u32 *)key;
+ node_b = container_of(b, struct node_data, node);
+
+ return node_b->one - one;
+}
+
+SEC("fentry/" SYS_PREFIX "sys_getpgid")
+int check_rbtree(void *ctx)
+{
+ struct node_data *node, *found, *ret;
+ struct node_data popped;
+ struct node_data search;
+ __u32 search2;
+
+ node = bpf_rbtree_alloc_node(&rbtree, sizeof(struct node_data));
+ if (!node)
+ return 0;
+
+ node->one = calls;
+ node->two = 6;
+ bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+
+ ret = (struct node_data *)bpf_rbtree_add(&rbtree, node, less);
+ if (!ret) {
+ bpf_rbtree_free_node(&rbtree, node);
+ goto unlock_ret;
+ }
+
+ bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+
+ bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+
+ search.one = calls;
+ found = (struct node_data *)bpf_rbtree_find(&rbtree, &search, cmp);
+ if (!found)
+ goto unlock_ret;
+
+ int node_ct = 0;
+ struct node_data *iter = (struct node_data *)bpf_rbtree_first(&rbtree);
+
+ while (iter) {
+ node_ct++;
+ iter = (struct node_data *)bpf_rbtree_next(&rbtree, iter);
+ }
+
+ ret = (struct node_data *)bpf_rbtree_remove(&rbtree, found);
+ if (!ret)
+ goto unlock_ret;
+
+ bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+
+ bpf_rbtree_free_node(&rbtree, ret);
+
+ __sync_fetch_and_add(&calls, 1);
+ return 0;
+
+unlock_ret:
+ bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
new file mode 100644
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+struct node_data {
+ struct rb_node node;
+ __u32 one;
+ __u32 two;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_RBTREE);
+ __type(value, struct node_data);
+} rbtree SEC(".maps");
+
+long calls;
+
+static bool less(struct rb_node *a, const struct rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(a, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+
+ return node_a->one < node_b->one;
+}
+
+// Key = node_datq
+static int cmp(const void *key, const struct rb_node *b)
+{
+ struct node_data *node_a;
+ struct node_data *node_b;
+
+ node_a = container_of(key, struct node_data, node);
+ node_b = container_of(b, struct node_data, node);
+
+ return node_b->one - node_a->one;
+}
+
+long size_too_small__alloc_fail;
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int alloc_node__size_too_small(void *ctx)
+{
+ struct node_data *node, *ret;
+
+ node = bpf_rbtree_alloc_node(&rbtree, sizeof(char));
+ if (!node) {
+ size_too_small__alloc_fail++;
+ return 0;
+ }
+
+ bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+ /* will never execute, alloc_node should fail */
+ node->one = 1;
+ ret = bpf_rbtree_add(&rbtree, node, less);
+ if (!ret) {
+ bpf_rbtree_free_node(&rbtree, node);
+ goto unlock_ret;
+ }
+
+unlock_ret:
+ bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+ return 0;
+}
+
+long no_lock_add__fail;
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int add_node__no_lock(void *ctx)
+{
+ struct node_data *node, *ret;
+
+ node = bpf_rbtree_alloc_node(&rbtree, sizeof(struct node_data));
+ if (!node)
+ return 0;
+
+ node->one = 1;
+ ret = bpf_rbtree_add(&rbtree, node, less);
+ if (!ret) {
+ no_lock_add__fail++;
+ /* will always execute, rbtree_add should fail
+ * because no lock held
+ */
+ bpf_rbtree_free_node(&rbtree, node);
+ }
+
+unlock_ret:
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int rb_node__field_store(void *ctx)
+{
+ struct node_data *node;
+
+ node = bpf_rbtree_alloc_node(&rbtree, sizeof(struct node_data));
+ if (!node)
+ return 0;
+
+ /* Only rbtree_map helpers can modify rb_node field */
+ node->node.rb_left = NULL;
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int rb_node__alloc_no_add(void *ctx)
+{
+ struct node_data *node;
+
+ node = bpf_rbtree_alloc_node(&rbtree, sizeof(struct node_data));
+ if (!node)
+ return 0;
+ /* The node alloc'd above is never added to the rbtree. It must be
+ * added or free'd before prog terminates.
+ */
+
+ node->one = 42;
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int rb_node__two_alloc_one_add(void *ctx)
+{
+ struct node_data *node, *ret;
+
+ node = bpf_rbtree_alloc_node(&rbtree, sizeof(struct node_data));
+ if (!node)
+ return 0;
+ node->one = 1;
+ /* The node alloc'd above is never added to the rbtree. It must be
+ * added or free'd before prog terminates.
+ */
+
+ node = bpf_rbtree_alloc_node(&rbtree, sizeof(struct node_data));
+ if (!node)
+ return 0;
+ node->one = 42;
+
+ bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+
+ ret = bpf_rbtree_add(&rbtree, node, less);
+ if (!ret) {
+ bpf_rbtree_free_node(&rbtree, node);
+ goto unlock_ret;
+ }
+
+unlock_ret:
+ bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int rb_node__remove_no_free(void *ctx)
+{
+ struct node_data *node, *ret;
+
+ node = bpf_rbtree_alloc_node(&rbtree, sizeof(struct node_data));
+ if (!node)
+ return 0;
+ node->one = 42;
+
+ bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+
+ ret = bpf_rbtree_add(&rbtree, node, less);
+ if (!ret) {
+ bpf_rbtree_free_node(&rbtree, node);
+ goto unlock_ret;
+ }
+
+ ret = bpf_rbtree_remove(&rbtree, ret);
+ if (!ret)
+ goto unlock_ret;
+ /* At this point we've successfully acquired a reference from
+ * bpf_rbtree_remove. It must be released via rbtree_add or
+ * rbtree_free_node before prog terminates.
+ */
+
+unlock_ret:
+ bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int rb_tree__add_wrong_type(void *ctx)
+{
+ /* Can't add a task_struct to rbtree
+ */
+ struct task_struct *task;
+ struct node_data *ret;
+
+ task = bpf_get_current_task_btf();
+
+ bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+
+ ret = bpf_rbtree_add(&rbtree, task, less);
+ /* Verifier should fail at bpf_rbtree_add, so don't bother handling
+ * failure.
+ */
+
+ bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+ return 0;
+}
+
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+int rb_tree__conditional_release_helper_usage(void *ctx)
+{
+ struct node_data *node, *ret;
+
+ node = bpf_rbtree_alloc_node(&rbtree, sizeof(struct node_data));
+ if (!node)
+ return 0;
+ node->one = 42;
+
+ bpf_rbtree_lock(bpf_rbtree_get_lock(&rbtree));
+
+ ret = bpf_rbtree_add(&rbtree, node, less);
+ /* Verifier should fail when trying to use CONDITIONAL_RELEASE
+ * type in a helper
+ */
+ bpf_rbtree_free_node(&rbtree, node);
+ if (!ret) {
+ bpf_rbtree_free_node(&rbtree, node);
+ goto unlock_ret;
+ }
+
+unlock_ret:
+ bpf_rbtree_unlock(bpf_rbtree_get_lock(&rbtree));
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
new file mode 100644
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+struct node_data_no_rb_node {
+ __u64 one;
+ __u64 two;
+ __u64 three;
+ __u64 four;
+ __u64 five;
+ __u64 six;
+ __u64 seven;
+};
+
+/* Should fail because value struct has no rb_node
+ */
+struct {
+ __uint(type, BPF_MAP_TYPE_RBTREE);
+ __type(value, struct node_data_no_rb_node);
+} rbtree_fail_no_rb_node SEC(".maps");
+
+char _license[] SEC("license") = "GPL";
Add tests demonstrating happy path of rbtree map usage as well as exercising numerous failure paths and conditions. Structure of failing test runner is based on dynptr tests. Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com> --- .../selftests/bpf/prog_tests/rbtree_map.c | 164 ++++++++++++ .../testing/selftests/bpf/progs/rbtree_map.c | 111 ++++++++ .../selftests/bpf/progs/rbtree_map_fail.c | 236 ++++++++++++++++++ .../bpf/progs/rbtree_map_load_fail.c | 24 ++ 4 files changed, 535 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/rbtree_map.c create mode 100644 tools/testing/selftests/bpf/progs/rbtree_map.c create mode 100644 tools/testing/selftests/bpf/progs/rbtree_map_fail.c create mode 100644 tools/testing/selftests/bpf/progs/rbtree_map_load_fail.c