diff mbox series

[bpf-next,v2,3/5] bpf: add dummy BPF STRUCT_OPS for test purpose

Message ID 20211016124806.1547989-4-houtao1@huawei.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series introduce dummy BPF STRUCT_OPS | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR success PR summary
netdev/cover_letter success Series has a cover letter
netdev/fixes_present success Fixes tag not required for -next series
netdev/patch_count success Link
netdev/tree_selection success Clearly marked for bpf-next
netdev/subject_prefix success Link
netdev/cc_maintainers warning 5 maintainers not CCed: songliubraving@fb.com john.fastabend@gmail.com davem@davemloft.net kuba@kernel.org kpsingh@kernel.org
netdev/source_inline success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/module_param success Was 0 now: 0
netdev/build_32bit success Errors and warnings before: 11730 this patch: 11730
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/verify_fixes success No Fixes tag
netdev/checkpatch warning CHECK: Please use a blank line after function/struct/union/enum declarations WARNING: added, moved or deleted file(s), does MAINTAINERS need updating? WARNING: line length of 81 exceeds 80 columns
netdev/build_allmodconfig_warn success Errors and warnings before: 11361 this patch: 11361
netdev/header_inline success No static functions without inline keyword in header files
bpf/vmtest-bpf-next success VM_Test

Commit Message

Hou Tao Oct. 16, 2021, 12:48 p.m. UTC
Currently the test of BPF STRUCT_OPS depends on the specific bpf
implementation of tcp_congestion_ops, but it can not cover all
basic functionalities (e.g, return value handling), so introduce
a dummy BPF STRUCT_OPS for test purpose.

Loading a bpf_dummy_ops implementation from userspace is prohibited,
and its only purpose is to run BPF_PROG_TYPE_STRUCT_OPS program
through bpf(BPF_PROG_TEST_RUN). Now programs for test_1() & test_2()
are supported. The following three cases are exercised in
bpf_dummy_struct_ops_test_run():

(1) test and check the value returned from state arg in test_1(state)
The content of state is copied from userspace pointer and copied back
after calling test_1(state). The user pointer is saved in an u64 array
and the array address is passed through ctx_in.

(2) test and check the return value of test_1(NULL)
Just simulate the case in which an invalid input argument is passed in.

(3) test multiple arguments passing in test_2(state, ...)
5 arguments are passed through ctx_in in form of u64 array. The first
element of array is userspace pointer of state and others 4 arguments
follow.

Signed-off-by: Hou Tao <houtao1@huawei.com>
---
 include/linux/bpf.h               |  14 ++
 kernel/bpf/bpf_struct_ops_types.h |   1 +
 net/bpf/Makefile                  |   3 +
 net/bpf/bpf_dummy_struct_ops.c    | 206 ++++++++++++++++++++++++++++++
 4 files changed, 224 insertions(+)
 create mode 100644 net/bpf/bpf_dummy_struct_ops.c

Comments

Martin KaFai Lau Oct. 20, 2021, 1:22 a.m. UTC | #1
On Sat, Oct 16, 2021 at 08:48:04PM +0800, Hou Tao wrote:
> +static struct bpf_dummy_ops_test_args *
> +dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
> +{
> +	__u32 size_in;
> +	struct bpf_dummy_ops_test_args *args;
> +	void __user *ctx_in;
> +	void __user *u_state;
> +
> +	if (!nr || nr > MAX_BPF_FUNC_ARGS)
> +		return ERR_PTR(-EINVAL);
> +
> +	size_in = kattr->test.ctx_size_in;
> +	if (size_in != sizeof(u64) * nr)
> +		return ERR_PTR(-EINVAL);
> +
> +	args = kzalloc(sizeof(*args), GFP_KERNEL);
> +	if (!args)
> +		return ERR_PTR(-ENOMEM);
> +
> +	ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
> +	if (copy_from_user(args->args, ctx_in, size_in))
args is leaked.

> +		return ERR_PTR(-EFAULT);
> +
> +	u_state = u64_to_user_ptr(args->args[0]);
> +	if (!u_state)
> +		return args;
> +
> +	if (copy_from_user(&args->state, u_state, sizeof(args->state))) {
> +		kfree(args);
> +		return ERR_PTR(-EFAULT);
> +	}
> +
> +	return args;
> +}

[ ... ]

> +int bpf_dummy_struct_ops_test_run(struct bpf_prog *prog,
> +				  const union bpf_attr *kattr,
> +				  union bpf_attr __user *uattr)
> +{
> +	const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
> +	const struct btf_type *func_proto = prog->aux->attach_func_proto;
> +	struct bpf_dummy_ops_test_args *args = NULL;
> +	struct bpf_tramp_progs *tprogs = NULL;
args = NULL and tprogs = NULL are not needed.

> +	void *image = NULL;
> +	unsigned int op_idx;
> +	int err;
> +	int prog_ret;
> +
> +	args = dummy_ops_init_args(kattr, btf_type_vlen(func_proto));
> +	if (IS_ERR(args))
> +		return PTR_ERR(args);
> +
> +	tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
> +	if (!tprogs) {
> +		err = -ENOMEM;
> +		goto out;
> +	}
> +
> +	image = bpf_jit_alloc_exec(PAGE_SIZE);
> +	if (!image) {
> +		err = -ENOMEM;
> +		goto out;
> +	}
> +	set_vm_flush_reset_perms(image);
> +
> +	op_idx = prog->expected_attach_type;
> +	err = bpf_struct_ops_prepare_trampoline(tprogs, prog,
> +						&st_ops->func_models[op_idx],
> +						image, image + PAGE_SIZE);
> +	if (err < 0)
> +		goto out;
> +
> +	set_memory_ro((long)image, 1);
> +	set_memory_x((long)image, 1);
> +	prog_ret = dummy_ops_call_op(image, args);
> +
> +	err = dummy_ops_copy_args(args);
> +	if (err)
> +		goto out;
> +	if (put_user(prog_ret, &uattr->test.retval))
> +		err = -EFAULT;
> +out:
> +	kfree(args);
> +	bpf_jit_free_exec(image);
> +	kfree(tprogs);
> +	return err;
> +}
> +
> +static int bpf_dummy_init(struct btf *btf)
> +{
> +	s32 type_id;
> +
> +	type_id = btf_find_by_name_kind(btf, "bpf_dummy_ops_state",
> +					BTF_KIND_STRUCT);
> +	if (type_id < 0)
> +		return -EINVAL;
> +
> +	dummy_ops_state = btf_type_by_id(btf, type_id);
Probably just do btf_find_by_name_kind("bpf_dummy_ops_state")
in bpf_dummy_ops_btf_struct_access() during each test.   There is
no need to optimize and cache it only for the testing purpose.

> +
> +	return 0;
> +}
> +
> +static bool bpf_dummy_ops_is_valid_access(int off, int size,
> +					  enum bpf_access_type type,
> +					  const struct bpf_prog *prog,
> +					  struct bpf_insn_access_aux *info)
> +{
> +	return bpf_check_btf_func_ctx_access(off, size, type, prog, info);
> +}
> +
> +static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
> +					   const struct btf *btf,
> +					   const struct btf_type *t, int off,
> +					   int size, enum bpf_access_type atype,
> +					   u32 *next_btf_id)
> +{
> +	int err;
> +
> +	if (atype != BPF_READ && t != dummy_ops_state) {
I think this can be further simplified.  The only struct that
the bpf_prog can access is bpf_dummy_ops_state, so the
"atype != BPF_READ" test can be removed.  The log message
then needs to be adjusted.

Others lgtm.

> +		bpf_log(log, "only write to bpf_dummy_ops_state is supported\n");
> +		return -EACCES;
> +	}
> +
> +	err = btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
> +	if (err < 0)
> +		return err;
> +
> +	return atype == BPF_READ ? err : NOT_INIT;
> +}
diff mbox series

Patch

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index b503306da2ab..edcd84077bf1 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1016,6 +1016,20 @@  static inline void bpf_module_put(const void *data, struct module *owner)
 	else
 		module_put(owner);
 }
+
+/* Define it here to avoid the use of forward declaration */
+struct bpf_dummy_ops_state {
+	int val;
+};
+
+struct bpf_dummy_ops {
+	int (*test_1)(struct bpf_dummy_ops_state *cb);
+	int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
+		      char a3, unsigned long a4);
+};
+int bpf_dummy_struct_ops_test_run(struct bpf_prog *prog,
+				  const union bpf_attr *kattr,
+				  union bpf_attr __user *uattr);
 #else
 static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
 {
diff --git a/kernel/bpf/bpf_struct_ops_types.h b/kernel/bpf/bpf_struct_ops_types.h
index 066d83ea1c99..74733ee012e4 100644
--- a/kernel/bpf/bpf_struct_ops_types.h
+++ b/kernel/bpf/bpf_struct_ops_types.h
@@ -2,6 +2,7 @@ 
 /* internal file - do not include directly */
 
 #ifdef CONFIG_BPF_JIT
+BPF_STRUCT_OPS_TYPE(bpf_dummy_ops)
 #ifdef CONFIG_INET
 #include <net/tcp.h>
 BPF_STRUCT_OPS_TYPE(tcp_congestion_ops)
diff --git a/net/bpf/Makefile b/net/bpf/Makefile
index 1c0a98d8c28f..1ebe270bde23 100644
--- a/net/bpf/Makefile
+++ b/net/bpf/Makefile
@@ -1,2 +1,5 @@ 
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_BPF_SYSCALL)	:= test_run.o
+ifeq ($(CONFIG_BPF_JIT),y)
+obj-$(CONFIG_BPF_SYSCALL)	+= bpf_dummy_struct_ops.o
+endif
diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c
new file mode 100644
index 000000000000..478d7b656ab3
--- /dev/null
+++ b/net/bpf/bpf_dummy_struct_ops.c
@@ -0,0 +1,206 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021. Huawei Technologies Co., Ltd
+ */
+#include <linux/kernel.h>
+#include <linux/bpf_verifier.h>
+#include <linux/bpf.h>
+#include <linux/btf.h>
+
+extern struct bpf_struct_ops bpf_bpf_dummy_ops;
+
+/* A common type for test_N with return value in bpf_dummy_ops */
+typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
+
+struct bpf_dummy_ops_test_args {
+	u64 args[MAX_BPF_FUNC_ARGS];
+	struct bpf_dummy_ops_state state;
+};
+
+static const struct btf_type *dummy_ops_state;
+
+static struct bpf_dummy_ops_test_args *
+dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
+{
+	__u32 size_in;
+	struct bpf_dummy_ops_test_args *args;
+	void __user *ctx_in;
+	void __user *u_state;
+
+	if (!nr || nr > MAX_BPF_FUNC_ARGS)
+		return ERR_PTR(-EINVAL);
+
+	size_in = kattr->test.ctx_size_in;
+	if (size_in != sizeof(u64) * nr)
+		return ERR_PTR(-EINVAL);
+
+	args = kzalloc(sizeof(*args), GFP_KERNEL);
+	if (!args)
+		return ERR_PTR(-ENOMEM);
+
+	ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
+	if (copy_from_user(args->args, ctx_in, size_in))
+		return ERR_PTR(-EFAULT);
+
+	u_state = u64_to_user_ptr(args->args[0]);
+	if (!u_state)
+		return args;
+
+	if (copy_from_user(&args->state, u_state, sizeof(args->state))) {
+		kfree(args);
+		return ERR_PTR(-EFAULT);
+	}
+
+	return args;
+}
+
+static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args)
+{
+	void __user *u_state;
+
+	u_state = u64_to_user_ptr(args->args[0]);
+	if (!u_state)
+		return 0;
+
+	if (copy_to_user(u_state, &args->state, sizeof(args->state)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
+{
+	dummy_ops_test_ret_fn test = (void *)image;
+	struct bpf_dummy_ops_state *state = NULL;
+
+	/* state needs to be NULL if args[0] is 0 */
+	if (args->args[0])
+		state = &args->state;
+	return test(state, args->args[1], args->args[2],
+		    args->args[3], args->args[4]);
+}
+
+int bpf_dummy_struct_ops_test_run(struct bpf_prog *prog,
+				  const union bpf_attr *kattr,
+				  union bpf_attr __user *uattr)
+{
+	const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
+	const struct btf_type *func_proto = prog->aux->attach_func_proto;
+	struct bpf_dummy_ops_test_args *args = NULL;
+	struct bpf_tramp_progs *tprogs = NULL;
+	void *image = NULL;
+	unsigned int op_idx;
+	int err;
+	int prog_ret;
+
+	args = dummy_ops_init_args(kattr, btf_type_vlen(func_proto));
+	if (IS_ERR(args))
+		return PTR_ERR(args);
+
+	tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
+	if (!tprogs) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	image = bpf_jit_alloc_exec(PAGE_SIZE);
+	if (!image) {
+		err = -ENOMEM;
+		goto out;
+	}
+	set_vm_flush_reset_perms(image);
+
+	op_idx = prog->expected_attach_type;
+	err = bpf_struct_ops_prepare_trampoline(tprogs, prog,
+						&st_ops->func_models[op_idx],
+						image, image + PAGE_SIZE);
+	if (err < 0)
+		goto out;
+
+	set_memory_ro((long)image, 1);
+	set_memory_x((long)image, 1);
+	prog_ret = dummy_ops_call_op(image, args);
+
+	err = dummy_ops_copy_args(args);
+	if (err)
+		goto out;
+	if (put_user(prog_ret, &uattr->test.retval))
+		err = -EFAULT;
+out:
+	kfree(args);
+	bpf_jit_free_exec(image);
+	kfree(tprogs);
+	return err;
+}
+
+static int bpf_dummy_init(struct btf *btf)
+{
+	s32 type_id;
+
+	type_id = btf_find_by_name_kind(btf, "bpf_dummy_ops_state",
+					BTF_KIND_STRUCT);
+	if (type_id < 0)
+		return -EINVAL;
+
+	dummy_ops_state = btf_type_by_id(btf, type_id);
+
+	return 0;
+}
+
+static bool bpf_dummy_ops_is_valid_access(int off, int size,
+					  enum bpf_access_type type,
+					  const struct bpf_prog *prog,
+					  struct bpf_insn_access_aux *info)
+{
+	return bpf_check_btf_func_ctx_access(off, size, type, prog, info);
+}
+
+static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
+					   const struct btf *btf,
+					   const struct btf_type *t, int off,
+					   int size, enum bpf_access_type atype,
+					   u32 *next_btf_id)
+{
+	int err;
+
+	if (atype != BPF_READ && t != dummy_ops_state) {
+		bpf_log(log, "only write to bpf_dummy_ops_state is supported\n");
+		return -EACCES;
+	}
+
+	err = btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
+	if (err < 0)
+		return err;
+
+	return atype == BPF_READ ? err : NOT_INIT;
+}
+
+static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {
+	.is_valid_access = bpf_dummy_ops_is_valid_access,
+	.btf_struct_access = bpf_dummy_ops_btf_struct_access,
+};
+
+static int bpf_dummy_init_member(const struct btf_type *t,
+				 const struct btf_member *member,
+				 void *kdata, const void *udata)
+{
+	return -EOPNOTSUPP;
+}
+
+static int bpf_dummy_reg(void *kdata)
+{
+	return -EOPNOTSUPP;
+}
+
+static void bpf_dummy_unreg(void *kdata)
+{
+}
+
+struct bpf_struct_ops bpf_bpf_dummy_ops = {
+	.verifier_ops = &bpf_dummy_verifier_ops,
+	.init = bpf_dummy_init,
+	.init_member = bpf_dummy_init_member,
+	.reg = bpf_dummy_reg,
+	.unreg = bpf_dummy_unreg,
+	.name = "bpf_dummy_ops",
+};