diff mbox series

[bpf-next,v3,3/3] selftests/bpf: Add tests for arena spin lock

Message ID 20250305011849.1168917-4-memxor@gmail.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series Arena Spin Lock | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-11 success Logs for aarch64-gcc / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-7 success Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-13 success Logs for s390x-gcc / GCC BPF
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / GCC BPF
bpf/vmtest-bpf-next-VM_Test-18 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-19 success Logs for s390x-gcc / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-21 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-30 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-31 success Logs for x86_64-gcc / veristat-kernel / x86_64-gcc veristat_kernel
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for s390x-gcc / veristat-meta
bpf/vmtest-bpf-next-VM_Test-32 success Logs for x86_64-gcc / veristat-meta / x86_64-gcc veristat_meta
bpf/vmtest-bpf-next-VM_Test-12 success Logs for aarch64-gcc / veristat-meta
bpf/vmtest-bpf-next-VM_Test-15 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17-O2
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-39 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-40 success Logs for x86_64-llvm-17 / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-17 / veristat-meta
bpf/vmtest-bpf-next-VM_Test-43 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-44 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18-O2
bpf/vmtest-bpf-next-VM_Test-45 success Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-49 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-50 success Logs for x86_64-llvm-18 / veristat-kernel
bpf/vmtest-bpf-next-VM_Test-51 success Logs for x86_64-llvm-18 / veristat-meta
bpf/vmtest-bpf-next-VM_Test-22 success Logs for x86_64-gcc / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-8 success Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-37 success Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-38 success Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / GCC BPF / GCC BPF
bpf/vmtest-bpf-next-VM_Test-47 success Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-46 success Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-48 success Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 0 this patch: 0
netdev/build_tools success Errors and warnings before: 26 (+0) this patch: 26 (+0)
netdev/cc_maintainers warning 11 maintainers not CCed: jolsa@kernel.org kpsingh@kernel.org linux-kselftest@vger.kernel.org sdf@fomichev.me john.fastabend@gmail.com haoluo@google.com mykolal@fb.com martin.lau@linux.dev shuah@kernel.org song@kernel.org yonghong.song@linux.dev
netdev/build_clang success Errors and warnings before: 1 this patch: 1
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 8 this patch: 8
netdev/checkpatch fail CHECK: Alignment should match open parenthesis ERROR: do not use assignment in if condition WARNING: Missing a blank line after declarations WARNING: added, moved or deleted file(s), does MAINTAINERS need updating? WARNING: do not add new typedefs WARNING: line length of 81 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 99 exceeds 80 columns WARNING: void function return statements are not generally useful
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Kumar Kartikeya Dwivedi March 5, 2025, 1:18 a.m. UTC
Add some basic selftests for qspinlock built over BPF arena using
cond_break_label macro.

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
---
 .../bpf/prog_tests/arena_spin_lock.c          | 102 ++++++++++++++++++
 .../selftests/bpf/progs/arena_spin_lock.c     |  51 +++++++++
 2 files changed, 153 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
 create mode 100644 tools/testing/selftests/bpf/progs/arena_spin_lock.c

Comments

Alexei Starovoitov March 5, 2025, 2:03 a.m. UTC | #1
On Tue, Mar 4, 2025 at 5:18 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
>
> Add some basic selftests for qspinlock built over BPF arena using
> cond_break_label macro.
>
> Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> ---
>  .../bpf/prog_tests/arena_spin_lock.c          | 102 ++++++++++++++++++
>  .../selftests/bpf/progs/arena_spin_lock.c     |  51 +++++++++
>  2 files changed, 153 insertions(+)
>  create mode 100644 tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
>  create mode 100644 tools/testing/selftests/bpf/progs/arena_spin_lock.c
>
> diff --git a/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
> new file mode 100644
> index 000000000000..2cc078ed1ddb
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
> @@ -0,0 +1,102 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
> +#include <test_progs.h>
> +#include <network_helpers.h>
> +#include <sys/sysinfo.h>
> +
> +struct qspinlock { int val; };
> +typedef struct qspinlock arena_spinlock_t;
> +
> +struct arena_qnode {
> +       unsigned long next;
> +       int count;
> +       int locked;
> +};
> +
> +#include "arena_spin_lock.skel.h"
> +
> +static long cpu;
> +int *counter;
> +
> +static void *spin_lock_thread(void *arg)
> +{
> +       int err, prog_fd = *(u32 *)arg;
> +       LIBBPF_OPTS(bpf_test_run_opts, topts,
> +               .data_in = &pkt_v4,
> +               .data_size_in = sizeof(pkt_v4),
> +               .repeat = 1,
> +       );

Why bother with 'tc' prog type?
Pick syscall type, and above will be shorter:
LIBBPF_OPTS(bpf_test_run_opts, opts);

> +       cpu_set_t cpuset;
> +
> +       CPU_ZERO(&cpuset);
> +       CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset);
> +       ASSERT_OK(pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset), "cpu affinity");
> +
> +       while (*READ_ONCE(counter) <= 1000) {

READ_ONCE(*counter) ?

but why add this user->kernel switch overhead.
Use .repeat = 1000
one bpf_prog_test_run_opts()
and check at the end that *counter == 1000 ?

> +               err = bpf_prog_test_run_opts(prog_fd, &topts);
> +               if (!ASSERT_OK(err, "test_run err"))
> +                       break;
> +               if (!ASSERT_EQ((int)topts.retval, 0, "test_run retval"))
> +                       break;
> +       }
> +       pthread_exit(arg);
> +}
> +
> +static void test_arena_spin_lock_size(int size)
> +{
> +       LIBBPF_OPTS(bpf_test_run_opts, topts);
> +       struct arena_spin_lock *skel;
> +       pthread_t thread_id[16];
> +       int prog_fd, i, err;
> +       void *ret;
> +
> +       if (get_nprocs() < 2) {
> +               test__skip();
> +               return;
> +       }
> +
> +       skel = arena_spin_lock__open_and_load();
> +       if (!ASSERT_OK_PTR(skel, "arena_spin_lock__open_and_load"))
> +               return;
> +       if (skel->data->test_skip == 2) {
> +               test__skip();
> +               goto end;
> +       }
> +       counter = &skel->bss->counter;
> +       skel->bss->cs_count = size;
> +
> +       prog_fd = bpf_program__fd(skel->progs.prog);
> +       for (i = 0; i < 16; i++) {
> +               err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd);
> +               if (!ASSERT_OK(err, "pthread_create"))
> +                       goto end;
> +       }
> +
> +       for (i = 0; i < 16; i++) {
> +               if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join"))
> +                       goto end;
> +               if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd"))
> +                       goto end;
> +       }
> +end:
> +       arena_spin_lock__destroy(skel);
> +       return;
> +}
> +
> +void test_arena_spin_lock(void)
> +{
> +       if (test__start_subtest("arena_spin_lock_1"))
> +               test_arena_spin_lock_size(1);
> +       cpu = 0;
> +       if (test__start_subtest("arena_spin_lock_1000"))
> +               test_arena_spin_lock_size(1000);
> +       cpu = 0;
> +       if (test__start_subtest("arena_spin_lock_10000"))
> +               test_arena_spin_lock_size(10000);
> +       cpu = 0;
> +       if (test__start_subtest("arena_spin_lock_100000"))
> +               test_arena_spin_lock_size(100000);
> +       cpu = 0;
> +       if (test__start_subtest("arena_spin_lock_500000"))
> +               test_arena_spin_lock_size(500000);

Do 10k and 500k make a difference?
I suspect 1, 1k and 100k would cover the interesting range.

> +}
> diff --git a/tools/testing/selftests/bpf/progs/arena_spin_lock.c b/tools/testing/selftests/bpf/progs/arena_spin_lock.c
> new file mode 100644
> index 000000000000..3e8ce807e028
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/arena_spin_lock.c
> @@ -0,0 +1,51 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
> +#include <vmlinux.h>
> +#include <bpf/bpf_tracing.h>
> +#include <bpf/bpf_helpers.h>
> +#include "bpf_misc.h"
> +#include "bpf_arena_spin_lock.h"
> +
> +struct {
> +       __uint(type, BPF_MAP_TYPE_ARENA);
> +       __uint(map_flags, BPF_F_MMAPABLE);
> +       __uint(max_entries, 100); /* number of pages */
> +#ifdef __TARGET_ARCH_arm64
> +       __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
> +#else
> +       __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
> +#endif
> +} arena SEC(".maps");
> +
> +int cs_count;
> +
> +#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
> +arena_spinlock_t __arena lock;
> +void *ptr;
> +int test_skip = 1;
> +#else
> +int test_skip = 2;
> +#endif
> +
> +int counter;
> +
> +SEC("tc")
> +int prog(void *ctx)
> +{
> +       int ret = -2;
> +
> +#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
> +       unsigned long flags;
> +
> +       ptr = &arena;

Is it really necessary?

> +       if ((ret = arena_spin_lock_irqsave(&lock, flags)))
> +               return ret;
> +       WRITE_ONCE(counter, READ_ONCE(counter) + 1);
> +       bpf_repeat(cs_count);
> +       ret = 0;
> +       arena_spin_unlock_irqrestore(&lock, flags);
> +#endif
> +       return ret;
> +}
> +
> +char _license[] SEC("license") = "GPL";
> --
> 2.47.1
>
Kumar Kartikeya Dwivedi March 5, 2025, 2:14 a.m. UTC | #2
On Wed, 5 Mar 2025 at 03:04, Alexei Starovoitov
<alexei.starovoitov@gmail.com> wrote:
>
> On Tue, Mar 4, 2025 at 5:18 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
> >
> > Add some basic selftests for qspinlock built over BPF arena using
> > cond_break_label macro.
> >
> > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> > ---
> >  .../bpf/prog_tests/arena_spin_lock.c          | 102 ++++++++++++++++++
> >  .../selftests/bpf/progs/arena_spin_lock.c     |  51 +++++++++
> >  2 files changed, 153 insertions(+)
> >  create mode 100644 tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
> >  create mode 100644 tools/testing/selftests/bpf/progs/arena_spin_lock.c
> >
> > diff --git a/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
> > new file mode 100644
> > index 000000000000..2cc078ed1ddb
> > --- /dev/null
> > +++ b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
> > @@ -0,0 +1,102 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
> > +#include <test_progs.h>
> > +#include <network_helpers.h>
> > +#include <sys/sysinfo.h>
> > +
> > +struct qspinlock { int val; };
> > +typedef struct qspinlock arena_spinlock_t;
> > +
> > +struct arena_qnode {
> > +       unsigned long next;
> > +       int count;
> > +       int locked;
> > +};
> > +
> > +#include "arena_spin_lock.skel.h"
> > +
> > +static long cpu;
> > +int *counter;
> > +
> > +static void *spin_lock_thread(void *arg)
> > +{
> > +       int err, prog_fd = *(u32 *)arg;
> > +       LIBBPF_OPTS(bpf_test_run_opts, topts,
> > +               .data_in = &pkt_v4,
> > +               .data_size_in = sizeof(pkt_v4),
> > +               .repeat = 1,
> > +       );
>
> Why bother with 'tc' prog type?
> Pick syscall type, and above will be shorter:
> LIBBPF_OPTS(bpf_test_run_opts, opts);
>

Ack.

> > +       cpu_set_t cpuset;
> > +
> > +       CPU_ZERO(&cpuset);
> > +       CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset);
> > +       ASSERT_OK(pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset), "cpu affinity");
> > +
> > +       while (*READ_ONCE(counter) <= 1000) {
>
> READ_ONCE(*counter) ?
>
> but why add this user->kernel switch overhead.
> Use .repeat = 1000
> one bpf_prog_test_run_opts()
> and check at the end that *counter == 1000 ?

Ok.

>
> > +               err = bpf_prog_test_run_opts(prog_fd, &topts);
> > +               if (!ASSERT_OK(err, "test_run err"))
> > +                       break;
> > +               if (!ASSERT_EQ((int)topts.retval, 0, "test_run retval"))
> > +                       break;
> > +       }
> > +       pthread_exit(arg);
> > +}
> > +
> > +static void test_arena_spin_lock_size(int size)
> > +{
> > +       LIBBPF_OPTS(bpf_test_run_opts, topts);
> > +       struct arena_spin_lock *skel;
> > +       pthread_t thread_id[16];
> > +       int prog_fd, i, err;
> > +       void *ret;
> > +
> > +       if (get_nprocs() < 2) {
> > +               test__skip();
> > +               return;
> > +       }
> > +
> > +       skel = arena_spin_lock__open_and_load();
> > +       if (!ASSERT_OK_PTR(skel, "arena_spin_lock__open_and_load"))
> > +               return;
> > +       if (skel->data->test_skip == 2) {
> > +               test__skip();
> > +               goto end;
> > +       }
> > +       counter = &skel->bss->counter;
> > +       skel->bss->cs_count = size;
> > +
> > +       prog_fd = bpf_program__fd(skel->progs.prog);
> > +       for (i = 0; i < 16; i++) {
> > +               err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd);
> > +               if (!ASSERT_OK(err, "pthread_create"))
> > +                       goto end;
> > +       }
> > +
> > +       for (i = 0; i < 16; i++) {
> > +               if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join"))
> > +                       goto end;
> > +               if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd"))
> > +                       goto end;
> > +       }
> > +end:
> > +       arena_spin_lock__destroy(skel);
> > +       return;
> > +}
> > +
> > +void test_arena_spin_lock(void)
> > +{
> > +       if (test__start_subtest("arena_spin_lock_1"))
> > +               test_arena_spin_lock_size(1);
> > +       cpu = 0;
> > +       if (test__start_subtest("arena_spin_lock_1000"))
> > +               test_arena_spin_lock_size(1000);
> > +       cpu = 0;
> > +       if (test__start_subtest("arena_spin_lock_10000"))
> > +               test_arena_spin_lock_size(10000);
> > +       cpu = 0;
> > +       if (test__start_subtest("arena_spin_lock_100000"))
> > +               test_arena_spin_lock_size(100000);
> > +       cpu = 0;
> > +       if (test__start_subtest("arena_spin_lock_500000"))
> > +               test_arena_spin_lock_size(500000);
>
> Do 10k and 500k make a difference?
> I suspect 1, 1k and 100k would cover the interesting range.

They do make a difference inside a VM, but not on bare-metal.
I can stick to three sizes though.

>
> > +}
> > diff --git a/tools/testing/selftests/bpf/progs/arena_spin_lock.c b/tools/testing/selftests/bpf/progs/arena_spin_lock.c
> > new file mode 100644
> > index 000000000000..3e8ce807e028
> > --- /dev/null
> > +++ b/tools/testing/selftests/bpf/progs/arena_spin_lock.c
> > @@ -0,0 +1,51 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
> > +#include <vmlinux.h>
> > +#include <bpf/bpf_tracing.h>
> > +#include <bpf/bpf_helpers.h>
> > +#include "bpf_misc.h"
> > +#include "bpf_arena_spin_lock.h"
> > +
> > +struct {
> > +       __uint(type, BPF_MAP_TYPE_ARENA);
> > +       __uint(map_flags, BPF_F_MMAPABLE);
> > +       __uint(max_entries, 100); /* number of pages */
> > +#ifdef __TARGET_ARCH_arm64
> > +       __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
> > +#else
> > +       __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
> > +#endif
> > +} arena SEC(".maps");
> > +
> > +int cs_count;
> > +
> > +#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
> > +arena_spinlock_t __arena lock;
> > +void *ptr;
> > +int test_skip = 1;
> > +#else
> > +int test_skip = 2;
> > +#endif
> > +
> > +int counter;
> > +
> > +SEC("tc")
> > +int prog(void *ctx)
> > +{
> > +       int ret = -2;
> > +
> > +#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
> > +       unsigned long flags;
> > +
> > +       ptr = &arena;
>
> Is it really necessary?

Probably a remnant from previous versions, but sometimes if you don't do this
it gives you an error saying addr_space_cast cannot be used in a
program with no associated arena.

Probably if it never sees the arena map reference anywhere in the
program. This is a way to do that.
But in this case I dropped it and it works.

>
> > +       if ((ret = arena_spin_lock_irqsave(&lock, flags)))
> > +               return ret;
> > +       WRITE_ONCE(counter, READ_ONCE(counter) + 1);
> > +       bpf_repeat(cs_count);
> > +       ret = 0;
> > +       arena_spin_unlock_irqrestore(&lock, flags);
> > +#endif
> > +       return ret;
> > +}
> > +
> > +char _license[] SEC("license") = "GPL";
> > --
> > 2.47.1
> >
Kumar Kartikeya Dwivedi March 5, 2025, 2:24 a.m. UTC | #3
On Wed, 5 Mar 2025 at 03:14, Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
>
> On Wed, 5 Mar 2025 at 03:04, Alexei Starovoitov
> <alexei.starovoitov@gmail.com> wrote:
> >
> > On Tue, Mar 4, 2025 at 5:18 PM Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
> > >
> > > Add some basic selftests for qspinlock built over BPF arena using
> > > cond_break_label macro.
> > >
> > > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
> > > ---
> > >  .../bpf/prog_tests/arena_spin_lock.c          | 102 ++++++++++++++++++
> > >  .../selftests/bpf/progs/arena_spin_lock.c     |  51 +++++++++
> > >  2 files changed, 153 insertions(+)
> > >  create mode 100644 tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
> > >  create mode 100644 tools/testing/selftests/bpf/progs/arena_spin_lock.c
> > >
> > > diff --git a/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
> > > new file mode 100644
> > > index 000000000000..2cc078ed1ddb
> > > --- /dev/null
> > > +++ b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
> > > @@ -0,0 +1,102 @@
> > > +// SPDX-License-Identifier: GPL-2.0
> > > +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
> > > +#include <test_progs.h>
> > > +#include <network_helpers.h>
> > > +#include <sys/sysinfo.h>
> > > +
> > > +struct qspinlock { int val; };
> > > +typedef struct qspinlock arena_spinlock_t;
> > > +
> > > +struct arena_qnode {
> > > +       unsigned long next;
> > > +       int count;
> > > +       int locked;
> > > +};
> > > +
> > > +#include "arena_spin_lock.skel.h"
> > > +
> > > +static long cpu;
> > > +int *counter;
> > > +
> > > +static void *spin_lock_thread(void *arg)
> > > +{
> > > +       int err, prog_fd = *(u32 *)arg;
> > > +       LIBBPF_OPTS(bpf_test_run_opts, topts,
> > > +               .data_in = &pkt_v4,
> > > +               .data_size_in = sizeof(pkt_v4),
> > > +               .repeat = 1,
> > > +       );
> >
> > Why bother with 'tc' prog type?
> > Pick syscall type, and above will be shorter:
> > LIBBPF_OPTS(bpf_test_run_opts, opts);
> >
>
> Ack.

Sadly, syscall prog_test_run doesn't support 'repeat' field, so we'll
have to stick with tc.

>
> > > +       cpu_set_t cpuset;
> > > +
> > > +       CPU_ZERO(&cpuset);
> > > +       CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset);
> > > +       ASSERT_OK(pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset), "cpu affinity");
> > > +
> > > +       while (*READ_ONCE(counter) <= 1000) {
> >
> > READ_ONCE(*counter) ?
> >
> > but why add this user->kernel switch overhead.
> > Use .repeat = 1000
> > one bpf_prog_test_run_opts()
> > and check at the end that *counter == 1000 ?
>
> Ok.
>

One of the reasons to do this was to give other threads time to be
able to catch up with the first threads as it starts storming through
the counter increments.
So in case of short section there is no contention at all and most
code paths don't get triggered.
But I'll use a pthread_barrier instead.
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
new file mode 100644
index 000000000000..2cc078ed1ddb
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
@@ -0,0 +1,102 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <sys/sysinfo.h>
+
+struct qspinlock { int val; };
+typedef struct qspinlock arena_spinlock_t;
+
+struct arena_qnode {
+	unsigned long next;
+	int count;
+	int locked;
+};
+
+#include "arena_spin_lock.skel.h"
+
+static long cpu;
+int *counter;
+
+static void *spin_lock_thread(void *arg)
+{
+	int err, prog_fd = *(u32 *)arg;
+	LIBBPF_OPTS(bpf_test_run_opts, topts,
+		.data_in = &pkt_v4,
+		.data_size_in = sizeof(pkt_v4),
+		.repeat = 1,
+	);
+	cpu_set_t cpuset;
+
+	CPU_ZERO(&cpuset);
+	CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset);
+	ASSERT_OK(pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset), "cpu affinity");
+
+	while (*READ_ONCE(counter) <= 1000) {
+		err = bpf_prog_test_run_opts(prog_fd, &topts);
+		if (!ASSERT_OK(err, "test_run err"))
+			break;
+		if (!ASSERT_EQ((int)topts.retval, 0, "test_run retval"))
+			break;
+	}
+	pthread_exit(arg);
+}
+
+static void test_arena_spin_lock_size(int size)
+{
+	LIBBPF_OPTS(bpf_test_run_opts, topts);
+	struct arena_spin_lock *skel;
+	pthread_t thread_id[16];
+	int prog_fd, i, err;
+	void *ret;
+
+	if (get_nprocs() < 2) {
+		test__skip();
+		return;
+	}
+
+	skel = arena_spin_lock__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "arena_spin_lock__open_and_load"))
+		return;
+	if (skel->data->test_skip == 2) {
+		test__skip();
+		goto end;
+	}
+	counter = &skel->bss->counter;
+	skel->bss->cs_count = size;
+
+	prog_fd = bpf_program__fd(skel->progs.prog);
+	for (i = 0; i < 16; i++) {
+		err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd);
+		if (!ASSERT_OK(err, "pthread_create"))
+			goto end;
+	}
+
+	for (i = 0; i < 16; i++) {
+		if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join"))
+			goto end;
+		if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd"))
+			goto end;
+	}
+end:
+	arena_spin_lock__destroy(skel);
+	return;
+}
+
+void test_arena_spin_lock(void)
+{
+	if (test__start_subtest("arena_spin_lock_1"))
+		test_arena_spin_lock_size(1);
+	cpu = 0;
+	if (test__start_subtest("arena_spin_lock_1000"))
+		test_arena_spin_lock_size(1000);
+	cpu = 0;
+	if (test__start_subtest("arena_spin_lock_10000"))
+		test_arena_spin_lock_size(10000);
+	cpu = 0;
+	if (test__start_subtest("arena_spin_lock_100000"))
+		test_arena_spin_lock_size(100000);
+	cpu = 0;
+	if (test__start_subtest("arena_spin_lock_500000"))
+		test_arena_spin_lock_size(500000);
+}
diff --git a/tools/testing/selftests/bpf/progs/arena_spin_lock.c b/tools/testing/selftests/bpf/progs/arena_spin_lock.c
new file mode 100644
index 000000000000..3e8ce807e028
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/arena_spin_lock.c
@@ -0,0 +1,51 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_arena_spin_lock.h"
+
+struct {
+	__uint(type, BPF_MAP_TYPE_ARENA);
+	__uint(map_flags, BPF_F_MMAPABLE);
+	__uint(max_entries, 100); /* number of pages */
+#ifdef __TARGET_ARCH_arm64
+	__ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
+#else
+	__ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
+#endif
+} arena SEC(".maps");
+
+int cs_count;
+
+#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+arena_spinlock_t __arena lock;
+void *ptr;
+int test_skip = 1;
+#else
+int test_skip = 2;
+#endif
+
+int counter;
+
+SEC("tc")
+int prog(void *ctx)
+{
+	int ret = -2;
+
+#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
+	unsigned long flags;
+
+	ptr = &arena;
+	if ((ret = arena_spin_lock_irqsave(&lock, flags)))
+		return ret;
+	WRITE_ONCE(counter, READ_ONCE(counter) + 1);
+	bpf_repeat(cs_count);
+	ret = 0;
+	arena_spin_unlock_irqrestore(&lock, flags);
+#endif
+	return ret;
+}
+
+char _license[] SEC("license") = "GPL";