new file mode 100644
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitmap.h>
+#include <test_progs.h>
+
+#include "local_storage_excl_cache.skel.h"
+#include "local_storage_excl_cache_fail.skel.h"
+
+void test_test_local_storage_excl_cache(void)
+{
+ u64 cache_idx_exclusive, cache_idx_exclusive_expected;
+ struct local_storage_excl_cache_fail *skel_fail = NULL;
+ struct local_storage_excl_cache *skel = NULL;
+ u16 cache_size, i;
+ int err;
+
+ skel_fail = local_storage_excl_cache_fail__open_and_load();
+ ASSERT_ERR_PTR(skel_fail, "excl_cache_fail load should fail");
+ local_storage_excl_cache_fail__destroy(skel_fail);
+
+ skel = local_storage_excl_cache__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "excl_cache load should succeed"))
+ goto cleanup;
+
+ cache_size = skel->data->__BPF_LOCAL_STORAGE_CACHE_SIZE;
+
+ err = local_storage_excl_cache__attach(skel);
+ if (!ASSERT_OK(err, "excl_cache__attach"))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ usleep(1);
+ cache_idx_exclusive = skel->data->out__cache_bitmap;
+ cache_idx_exclusive_expected = 0;
+ for (i = 0; i < cache_size; i++)
+ cache_idx_exclusive_expected |= (1U << i);
+
+ if (!ASSERT_EQ(cache_idx_exclusive & cache_idx_exclusive_expected,
+ cache_idx_exclusive_expected, "excl cache bitmap should be full"))
+ goto cleanup;
+
+ usleep(1);
+ for (i = 0; i < cache_size; i++)
+ if (!ASSERT_EQ(skel->data->out__cache_smaps[i],
+ skel->data->out__declared_smaps[i],
+ "cached map not equal"))
+ goto cleanup;
+
+cleanup:
+ local_storage_excl_cache__destroy(skel);
+}
new file mode 100644
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+#define make_task_local_excl_map(name, num) \
+struct { \
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE); \
+ __uint(map_flags, BPF_F_NO_PREALLOC); \
+ __type(key, int); \
+ __type(value, __u32); \
+ __uint(map_extra, BPF_LOCAL_STORAGE_FORCE_CACHE); \
+} name ## num SEC(".maps");
+
+#define make_task_local_map(name, num) \
+struct { \
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE); \
+ __uint(map_flags, BPF_F_NO_PREALLOC); \
+ __type(key, int); \
+ __type(value, __u32); \
+} name ## num SEC(".maps");
+
+#define task_storage_get_excl(map, num) \
+({ \
+ bpf_task_storage_get(&map ## num, task, 0, BPF_LOCAL_STORAGE_GET_F_CREATE); \
+ bpf_probe_read_kernel(&out__cache_smaps[num], \
+ sizeof(void *), \
+ &task->bpf_storage->cache[num]->smap); \
+ out__declared_smaps[num] = &map ## num; \
+})
+
+/* must match define in bpf_local_storage.h */
+#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
+
+/* Try adding BPF_LOCAL_STORAGE_CACHE_SIZE task_storage maps w/ exclusive
+ * cache slot
+ */
+make_task_local_excl_map(task_storage_map, 0);
+make_task_local_excl_map(task_storage_map, 1);
+make_task_local_excl_map(task_storage_map, 2);
+make_task_local_excl_map(task_storage_map, 3);
+make_task_local_excl_map(task_storage_map, 4);
+make_task_local_excl_map(task_storage_map, 5);
+make_task_local_excl_map(task_storage_map, 6);
+make_task_local_excl_map(task_storage_map, 7);
+make_task_local_excl_map(task_storage_map, 8);
+make_task_local_excl_map(task_storage_map, 9);
+make_task_local_excl_map(task_storage_map, 10);
+make_task_local_excl_map(task_storage_map, 11);
+make_task_local_excl_map(task_storage_map, 12);
+make_task_local_excl_map(task_storage_map, 13);
+make_task_local_excl_map(task_storage_map, 14);
+make_task_local_excl_map(task_storage_map, 15);
+
+make_task_local_map(task_storage_map, 16);
+
+extern const void task_cache __ksym;
+__u64 __BPF_LOCAL_STORAGE_CACHE_SIZE = BPF_LOCAL_STORAGE_CACHE_SIZE;
+__u64 out__cache_bitmap = -1;
+void *out__cache_smaps[BPF_LOCAL_STORAGE_CACHE_SIZE] = { (void *)-1 };
+void *out__declared_smaps[BPF_LOCAL_STORAGE_CACHE_SIZE] = { (void *)-1 };
+
+SEC("raw_tp/sys_enter")
+int handler(const void *ctx)
+{
+ struct task_struct *task = bpf_get_current_task_btf();
+ __u32 *ptr;
+
+ bpf_probe_read_kernel(&out__cache_bitmap, sizeof(out__cache_bitmap),
+ &task_cache +
+ offsetof(struct bpf_local_storage_cache, idx_exclusive));
+
+ /* Get all BPF_LOCAL_STORAGE_CACHE_SIZE exclusive-cache maps into cache,
+ * and one that shouldn't be cached
+ */
+ task_storage_get_excl(task_storage_map, 0);
+ task_storage_get_excl(task_storage_map, 1);
+ task_storage_get_excl(task_storage_map, 2);
+ task_storage_get_excl(task_storage_map, 3);
+ task_storage_get_excl(task_storage_map, 4);
+ task_storage_get_excl(task_storage_map, 5);
+ task_storage_get_excl(task_storage_map, 6);
+ task_storage_get_excl(task_storage_map, 7);
+ task_storage_get_excl(task_storage_map, 8);
+ task_storage_get_excl(task_storage_map, 9);
+ task_storage_get_excl(task_storage_map, 10);
+ task_storage_get_excl(task_storage_map, 11);
+ task_storage_get_excl(task_storage_map, 12);
+ task_storage_get_excl(task_storage_map, 13);
+ task_storage_get_excl(task_storage_map, 14);
+ task_storage_get_excl(task_storage_map, 15);
+
+ bpf_task_storage_get(&task_storage_map16, task, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+#define make_task_local_excl_map(name, num) \
+struct { \
+ __uint(type, BPF_MAP_TYPE_TASK_STORAGE); \
+ __uint(map_flags, BPF_F_NO_PREALLOC); \
+ __type(key, int); \
+ __type(value, __u32); \
+ __uint(map_extra, BPF_LOCAL_STORAGE_FORCE_CACHE); \
+} name ## num SEC(".maps");
+
+/* Try adding BPF_LOCAL_STORAGE_CACHE_SIZE+1 task_storage maps w/ exclusive
+ * cache slot */
+make_task_local_excl_map(task_storage_map, 0);
+make_task_local_excl_map(task_storage_map, 1);
+make_task_local_excl_map(task_storage_map, 2);
+make_task_local_excl_map(task_storage_map, 3);
+make_task_local_excl_map(task_storage_map, 4);
+make_task_local_excl_map(task_storage_map, 5);
+make_task_local_excl_map(task_storage_map, 6);
+make_task_local_excl_map(task_storage_map, 7);
+make_task_local_excl_map(task_storage_map, 8);
+make_task_local_excl_map(task_storage_map, 9);
+make_task_local_excl_map(task_storage_map, 10);
+make_task_local_excl_map(task_storage_map, 11);
+make_task_local_excl_map(task_storage_map, 12);
+make_task_local_excl_map(task_storage_map, 13);
+make_task_local_excl_map(task_storage_map, 14);
+make_task_local_excl_map(task_storage_map, 15);
+make_task_local_excl_map(task_storage_map, 16);
Validate local_storage exclusive caching functionality: * Adding >BPF_LOCAL_STORAGE_CACHE_SIZE task_storage maps w/ BPF_LOCAL_STORAGE_FORCE_CACHE results in failure to load program as there are free slots to claim. * Adding BPF_LOCAL_STORAGE_CACHE_SIZE task_storage maps w/ FORCE_CACHE succeeds and results in a filled idx_bitmap for the cache. After first bpf_task_storage_get call for each map, the map's local storage data is in the cache slot. Subsequent bpf_task_storage_get calls to non-exclusive-cached maps don't replace exclusive-cached maps. Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com> --- .../test_local_storage_excl_cache.c | 52 +++++++++ .../bpf/progs/local_storage_excl_cache.c | 100 ++++++++++++++++++ .../bpf/progs/local_storage_excl_cache_fail.c | 36 +++++++ 3 files changed, 188 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/test_local_storage_excl_cache.c create mode 100644 tools/testing/selftests/bpf/progs/local_storage_excl_cache.c create mode 100644 tools/testing/selftests/bpf/progs/local_storage_excl_cache_fail.c