diff mbox series

[RFC,bpf-next,3/3] selftests/bpf: Add selftest for for_each_cpu

Message ID 20230801142912.55078-4-laoar.shao@gmail.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series bpf: Add new bpf helper bpf_for_each_cpu | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ${{ matrix.test }} on ${{ matrix.arch }} with ${{ matrix.toolchain_full }}
bpf/vmtest-bpf-next-VM_Test-2 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-3 fail Logs for build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-4 fail Logs for build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-5 fail Logs for build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 fail Logs for build for x86_64 with llvm-16
bpf/vmtest-bpf-next-VM_Test-7 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-8 success Logs for veristat
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 9 this patch: 9
netdev/cc_maintainers warning 4 maintainers not CCed: yonghong.song@linux.dev mykolal@fb.com shuah@kernel.org linux-kselftest@vger.kernel.org
netdev/build_clang success Errors and warnings before: 9 this patch: 9
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 9 this patch: 9
netdev/checkpatch fail ERROR: that open brace { should be on the previous line WARNING: added, moved or deleted file(s), does MAINTAINERS need updating? WARNING: line length of 93 exceeds 80 columns WARNING: line length of 96 exceeds 80 columns WARNING: line length of 99 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Yafang Shao Aug. 1, 2023, 2:29 p.m. UTC
Add selftest for the new for_each_cpu helper.

The result:
  $ tools/testing/selftests/bpf/test_progs --name=for_each_cpu
  #84/1    for_each_cpu/psi_system:OK
  #84/2    for_each_cpu/psi_cgroup:OK
  #84/3    for_each_cpu/invalid_cpumask:OK
  #84      for_each_cpu:OK
  Summary: 1/3 PASSED, 0 SKIPPED, 0 FAILED

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
 .../selftests/bpf/prog_tests/for_each_cpu.c        | 137 +++++++++++++++++++++
 .../selftests/bpf/progs/test_for_each_cpu.c        |  63 ++++++++++
 2 files changed, 200 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/prog_tests/for_each_cpu.c
 create mode 100644 tools/testing/selftests/bpf/progs/test_for_each_cpu.c
diff mbox series

Patch

diff --git a/tools/testing/selftests/bpf/prog_tests/for_each_cpu.c b/tools/testing/selftests/bpf/prog_tests/for_each_cpu.c
new file mode 100644
index 0000000..b0eaaec
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/for_each_cpu.c
@@ -0,0 +1,137 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Yafang Shao <laoar.shao@gmail.com> */
+
+#include <test_progs.h>
+#include <bpf/libbpf.h>
+#include "cgroup_helpers.h"
+#include "test_for_each_cpu.skel.h"
+
+static void verify_percpu_psi_value(struct test_for_each_cpu *skel, int fd, __u32 running, int res)
+{
+	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+	union bpf_iter_link_info linfo;
+	int len, iter_fd, result;
+	struct bpf_link *link;
+	static char buf[128];
+	__u32 nr_running;
+	size_t left;
+	char *p;
+
+	memset(&linfo, 0, sizeof(linfo));
+	linfo.cgroup.cgroup_fd = fd;
+	linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
+	opts.link_info = &linfo;
+	opts.link_info_len = sizeof(linfo);
+
+	link = bpf_program__attach_iter(skel->progs.psi_cgroup, &opts);
+	if (!ASSERT_OK_PTR(link, "attach_iter"))
+		return;
+
+	iter_fd = bpf_iter_create(bpf_link__fd(link));
+	if (!ASSERT_GE(iter_fd, 0, "iter_fd"))
+		goto free_link;
+
+	memset(buf, 0, sizeof(buf));
+	left = ARRAY_SIZE(buf);
+	p = buf;
+	while ((len = read(iter_fd, p, left)) > 0) {
+		p += len;
+		left -= len;
+	}
+
+	ASSERT_EQ(sscanf(buf, "nr_running %u ret %d\n", &nr_running, &result), 2, "seq_format");
+	ASSERT_EQ(result, res, "for_each_cpu_result");
+	if (running)
+		ASSERT_GE(nr_running, running, "nr_running");
+	else
+		ASSERT_EQ(nr_running, running, "nr_running");
+
+	/* read() after iter finishes should be ok. */
+	if (len == 0)
+		ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read");
+	close(iter_fd);
+free_link:
+	bpf_link__destroy(link);
+}
+
+void test_root_cgroup(struct test_for_each_cpu *skel)
+{
+	int cgrp_fd, nr_cpus;
+
+	cgrp_fd = get_root_cgroup();
+	if (!ASSERT_GE(cgrp_fd, 0, "create cgrp"))
+		return;
+
+	skel->bss->cpu_mask = CPU_MASK_POSSIBLE;
+	skel->bss->pid = 0;
+	nr_cpus = bpf_num_possible_cpus();
+	/* At least current is running */
+	verify_percpu_psi_value(skel, cgrp_fd, 1, nr_cpus);
+	close(cgrp_fd);
+}
+
+void test_child_cgroup(struct test_for_each_cpu *skel)
+{
+	int cgrp_fd, nr_cpus;
+
+	cgrp_fd = create_and_get_cgroup("for_each_cpu");
+	if (!ASSERT_GE(cgrp_fd, 0, "create cgrp"))
+		return;
+
+	skel->bss->cpu_mask = CPU_MASK_POSSIBLE;
+	skel->bss->pid = 0;
+	nr_cpus = bpf_num_possible_cpus();
+	/* No tasks in the cgroup */
+	verify_percpu_psi_value(skel, cgrp_fd, 0, nr_cpus);
+	close(cgrp_fd);
+	remove_cgroup("for_each_cpu");
+}
+
+void verify_invalid_cpumask(struct test_for_each_cpu *skel, int fd, __u32 cpumask, __u32 pid)
+{
+	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+
+	skel->bss->cpu_mask = cpumask;
+	skel->bss->pid = pid;
+	verify_percpu_psi_value(skel, fd, 0, -EINVAL);
+}
+
+void test_invalid_cpumask(struct test_for_each_cpu *skel)
+{
+	int cgrp_fd;
+
+	cgrp_fd = create_and_get_cgroup("for_each_cpu");
+	if (!ASSERT_GE(cgrp_fd, 0, "create cgrp"))
+		return;
+
+	verify_invalid_cpumask(skel, cgrp_fd, CPU_MASK_POSSIBLE, 1);
+	verify_invalid_cpumask(skel, cgrp_fd, CPU_MASK_PRESENT, 1);
+	verify_invalid_cpumask(skel, cgrp_fd, CPU_MASK_ONLINE, 1);
+	verify_invalid_cpumask(skel, cgrp_fd, CPU_MASK_TASK, 0);
+	verify_invalid_cpumask(skel, cgrp_fd, -1, 0);
+	verify_invalid_cpumask(skel, cgrp_fd, -1, 1);
+	close(cgrp_fd);
+	remove_cgroup("for_each_cpu");
+}
+
+void test_for_each_cpu(void)
+{
+	struct test_for_each_cpu *skel = NULL;
+
+	skel = test_for_each_cpu__open_and_load();
+	if (!ASSERT_OK_PTR(skel, "test_for_each_cpu__open_and_load"))
+		return;
+
+	if (setup_cgroup_environment())
+		return;
+
+	if (test__start_subtest("psi_system"))
+		test_root_cgroup(skel);
+	if (test__start_subtest("psi_cgroup"))
+		test_child_cgroup(skel);
+	if (test__start_subtest("invalid_cpumask"))
+		test_invalid_cpumask(skel);
+
+	test_for_each_cpu__destroy(skel);
+	cleanup_cgroup_environment();
+}
diff --git a/tools/testing/selftests/bpf/progs/test_for_each_cpu.c b/tools/testing/selftests/bpf/progs/test_for_each_cpu.c
new file mode 100644
index 0000000..1554895
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_for_each_cpu.c
@@ -0,0 +1,63 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2023 Yafang Shao <laoar.shao@gmail.com> */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define __percpu __attribute__((btf_type_tag("percpu")))
+
+enum bpf_cpu_mask_type cpu_mask;
+__u32 pid;
+
+struct callback_ctx {
+	__u32 nr_running;
+	__u32 id;
+};
+
+static uint64_t cgroup_id(struct cgroup *cgrp)
+{
+	return cgrp->kn->id;
+}
+
+static int callback(__u32 cpu, void *ctx, const void *ptr)
+{
+	unsigned int tasks[NR_PSI_TASK_COUNTS];
+	const struct psi_group_cpu *groupc = ptr;
+	struct callback_ctx *data = ctx;
+
+	bpf_probe_read_kernel(&tasks, sizeof(tasks), &groupc->tasks);
+	data->nr_running += tasks[NR_RUNNING];
+	return 0;
+}
+
+SEC("iter.s/cgroup")
+int BPF_PROG(psi_cgroup, struct bpf_iter_meta *meta, struct cgroup *cgrp)
+{
+	struct seq_file *seq = (struct seq_file *)meta->seq;
+	struct psi_group_cpu __percpu *pcpu_ptr;
+	struct callback_ctx data;
+	struct psi_group *psi;
+	__u64 cg_id;
+	int ret;
+
+	cg_id = cgrp ? cgroup_id(cgrp) : 0;
+	if (!cg_id)
+		return 1;
+
+	psi = cgrp->psi;
+	if (!psi)
+		return 1;
+
+	pcpu_ptr = psi->pcpu;
+	if (!pcpu_ptr)
+		return 1;
+
+	data.nr_running = 0;
+	data.id = cg_id;
+	ret = bpf_for_each_cpu(callback, &data, pcpu_ptr, cpu_mask, pid);
+	BPF_SEQ_PRINTF(seq, "nr_running %d ret %d\n", data.nr_running, ret);
+
+	return ret ? 1 : 0;
+}
+
+char _license[] SEC("license") = "GPL";