diff mbox series

[RFC,bpf-next,1/3] bpf: add dummy BPF STRUCT_OPS for test purpose

Message ID 20210915033753.1201597-2-houtao1@huawei.com (mailing list archive)
State Superseded
Delegated to: BPF
Headers show
Series introduce dummy BPF STRUCT_OPS | expand

Checks

Context Check Description
netdev/apply fail Patch does not apply to bpf-next
netdev/tree_selection success Clearly marked for bpf-next
bpf/vmtest-bpf-next success VM_Test
bpf/vmtest-bpf-next-PR success PR summary

Commit Message

Hou Tao Sept. 15, 2021, 3:37 a.m. UTC
Currently the test of BPF STRUCT_OPS depends on the specific bpf
implementation of tcp_congestion_ops, and it can not cover all
basic functionalities (e.g, return value handling), so introduce
a dummy BPF STRUCT_OPS for test purpose.

Dummy BPF STRUCT_OPS may not being needed for release kernel, so
adding a kconfig option BPF_DUMMY_STRUCT_OPS to enable it separatedly.

Signed-off-by: Hou Tao <houtao1@huawei.com>
---
 include/linux/bpf_dummy_ops.h     |  28 +++++
 kernel/bpf/Kconfig                |   7 ++
 kernel/bpf/Makefile               |   2 +
 kernel/bpf/bpf_dummy_struct_ops.c | 173 ++++++++++++++++++++++++++++++
 kernel/bpf/bpf_struct_ops_types.h |   4 +
 5 files changed, 214 insertions(+)
 create mode 100644 include/linux/bpf_dummy_ops.h
 create mode 100644 kernel/bpf/bpf_dummy_struct_ops.c

Comments

Martin KaFai Lau Sept. 15, 2021, 8:58 p.m. UTC | #1
On Wed, Sep 15, 2021 at 11:37:51AM +0800, Hou Tao wrote:
> Currently the test of BPF STRUCT_OPS depends on the specific bpf
> implementation of tcp_congestion_ops, and it can not cover all
> basic functionalities (e.g, return value handling), so introduce
> a dummy BPF STRUCT_OPS for test purpose.
> 
> Dummy BPF STRUCT_OPS may not being needed for release kernel, so
> adding a kconfig option BPF_DUMMY_STRUCT_OPS to enable it separatedly.
Thanks for the patches !

> diff --git a/include/linux/bpf_dummy_ops.h b/include/linux/bpf_dummy_ops.h
> new file mode 100644
> index 000000000000..b2aad3e6e2fe
> --- /dev/null
> +++ b/include/linux/bpf_dummy_ops.h
> @@ -0,0 +1,28 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * Copyright (C) 2021. Huawei Technologies Co., Ltd
> + */
> +#ifndef _BPF_DUMMY_OPS_H
> +#define _BPF_DUMMY_OPS_H
> +
> +#ifdef CONFIG_BPF_DUMMY_STRUCT_OPS
> +#include <linux/module.h>
> +
> +struct bpf_dummy_ops_state {
> +	int val;
> +};
> +
> +struct bpf_dummy_ops {
> +	int (*init)(struct bpf_dummy_ops_state *state);
> +	struct module *owner;
> +};
> +
> +extern struct bpf_dummy_ops *bpf_get_dummy_ops(void);
> +extern void bpf_put_dummy_ops(struct bpf_dummy_ops *ops);
> +#else
> +struct bpf_dummy_ops {};
This ';' looks different ;)

It probably has dodged the compiler due to the kconfig.
I think CONFIG_BPF_DUMMY_STRUCT_OPS and the bpf_(get|put)_dummy_ops
are not needed.  More on this later.

> diff --git a/kernel/bpf/bpf_dummy_struct_ops.c b/kernel/bpf/bpf_dummy_struct_ops.c
> new file mode 100644
> index 000000000000..f76c4a3733f0
> --- /dev/null
> +++ b/kernel/bpf/bpf_dummy_struct_ops.c
> @@ -0,0 +1,173 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (C) 2021. Huawei Technologies Co., Ltd
> + */
> +#include <linux/kernel.h>
> +#include <linux/spinlock.h>
> +#include <linux/bpf_verifier.h>
> +#include <linux/bpf.h>
> +#include <linux/btf.h>
> +#include <linux/bpf_dummy_ops.h>
> +
> +static struct bpf_dummy_ops *bpf_dummy_ops_singletion;
> +static DEFINE_SPINLOCK(bpf_dummy_ops_lock);
> +
> +static const struct btf_type *dummy_ops_state;
> +
> +struct bpf_dummy_ops *bpf_get_dummy_ops(void)
> +{
> +	struct bpf_dummy_ops *ops;
> +
> +	spin_lock(&bpf_dummy_ops_lock);
> +	ops = bpf_dummy_ops_singletion;
> +	if (ops && !bpf_try_module_get(ops, ops->owner))
> +		ops = NULL;
> +	spin_unlock(&bpf_dummy_ops_lock);
> +
> +	return ops ? ops : ERR_PTR(-ENXIO);
> +}
> +EXPORT_SYMBOL_GPL(bpf_get_dummy_ops);
> +
> +void bpf_put_dummy_ops(struct bpf_dummy_ops *ops)
> +{
> +	bpf_module_put(ops, ops->owner);
> +}
> +EXPORT_SYMBOL_GPL(bpf_put_dummy_ops);

[ ... ]

> +static int bpf_dummy_reg(void *kdata)
> +{
> +	struct bpf_dummy_ops *ops = kdata;
> +	int err = 0;
> +
> +	spin_lock(&bpf_dummy_ops_lock);
> +	if (!bpf_dummy_ops_singletion)
> +		bpf_dummy_ops_singletion = ops;
> +	else
> +		err = -EEXIST;
> +	spin_unlock(&bpf_dummy_ops_lock);
> +
> +	return err;
> +}
I don't think we are interested in testing register/unregister
a struct_ops.  This common infra logic should have already
been covered by bpf_tcp_ca.   Lets see if it can be avoided
such that the above singleton instance and EXPORT_SYMBOL_GPL
can also be removed.

It can reuse the bpf_prog_test_run() which can run a particular
bpf prog.  Then it allows a flexible way to select which prog
to call instead of creating a file and then triggering individual
prog by writing a name string into this new file.

For bpf_prog_test_run(),  it needs a ".test_run" implementation in
"const struct bpf_prog_ops bpf_struct_ops_prog_ops".
This to-be-implemented  ".test_run" can check the prog->aux->attach_btf_id
to ensure it is the bpf_dummy_ops.  The prog->expected_attach_type can
tell which "func" ptr within the bpf_dummy_ops and then ".test_run" will
know how to call it.  The extra thing for the struct_ops's ".test_run" is
to first call arch_prepare_bpf_trampoline() to prepare the trampoline
before calling into the bpf prog.

You can take a look at the other ".test_run" implementations,
e.g. bpf_prog_test_run_skb() and bpf_prog_test_run_tracing().

test_skb_pkt_end.c and fentry_test.c (likely others also) can be
used as reference for prog_tests/ purpose.  For the dummy_ops test in
prog_tests/, it does not need to call bpf_map__attach_struct_ops() since
there is no need to reg().  Instead, directly bpf_prog_test_run() to
exercise each prog in bpf_dummy_ops.skel.h.

bpf_dummy_init_member() should return -ENOTSUPP.
bpf_dummy_reg() and bpf_dummy_unreg() should then be never called.

bpf_dummy_struct_ops.c should be moved into net/bpf/.
No need to have CONFIG_BPF_DUMMY_STRUCT_OPS.  In the future, a generic one
could be created for the test_run related codes, if there is a need.

> +
> +static void bpf_dummy_unreg(void *kdata)
> +{
> +	struct bpf_dummy_ops *ops = kdata;
> +
> +	spin_lock(&bpf_dummy_ops_lock);
> +	if (bpf_dummy_ops_singletion == ops)
> +		bpf_dummy_ops_singletion = NULL;
> +	else
> +		WARN_ON(1);
> +	spin_unlock(&bpf_dummy_ops_lock);
> +}
> +
> +extern struct bpf_struct_ops bpf_bpf_dummy_ops;
> +
> +struct bpf_struct_ops bpf_bpf_dummy_ops = {
> +	.verifier_ops = &bpf_dummy_verifier_ops,
> +	.init = bpf_dummy_init,
> +	.init_member = bpf_dummy_init_member,
> +	.check_member = bpf_dummy_check_member,
> +	.reg = bpf_dummy_reg,
> +	.unreg = bpf_dummy_unreg,
> +	.name = "bpf_dummy_ops",
> +};
Hou Tao Sept. 18, 2021, 2:03 a.m. UTC | #2
Hi,

On 9/16/2021 4:58 AM, Martin KaFai Lau wrote:
> On Wed, Sep 15, 2021 at 11:37:51AM +0800, Hou Tao wrote:
>> Currently the test of BPF STRUCT_OPS depends on the specific bpf
>> implementation of tcp_congestion_ops, and it can not cover all
>> basic functionalities (e.g, return value handling), so introduce
>> a dummy BPF STRUCT_OPS for test purpose.
>>
>> Dummy BPF STRUCT_OPS may not being needed for release kernel, so
>> adding a kconfig option BPF_DUMMY_STRUCT_OPS to enable it separatedly.
> Thanks for the patches !
>
>> diff --git a/include/linux/bpf_dummy_ops.h b/include/linux/bpf_dummy_ops.h
>> new file mode 100644
>> index 000000000000..b2aad3e6e2fe
>> --- /dev/null
>> +++ b/include/linux/bpf_dummy_ops.h
>> @@ -0,0 +1,28 @@
>> +/* SPDX-License-Identifier: GPL-2.0-only */
>> +/*
>> + * Copyright (C) 2021. Huawei Technologies Co., Ltd
>> + */
>> +#ifndef _BPF_DUMMY_OPS_H
>> +#define _BPF_DUMMY_OPS_H
>> +
>> +#ifdef CONFIG_BPF_DUMMY_STRUCT_OPS
>> +#include <linux/module.h>
>> +
>> +struct bpf_dummy_ops_state {
>> +	int val;
>> +};
>> +
>> +struct bpf_dummy_ops {
>> +	int (*init)(struct bpf_dummy_ops_state *state);
>> +	struct module *owner;
>> +};
>> +
>> +extern struct bpf_dummy_ops *bpf_get_dummy_ops(void);
>> +extern void bpf_put_dummy_ops(struct bpf_dummy_ops *ops);
>> +#else
>> +struct bpf_dummy_ops {};
> This ';' looks different ;)
>
> It probably has dodged the compiler due to the kconfig.
> I think CONFIG_BPF_DUMMY_STRUCT_OPS and the bpf_(get|put)_dummy_ops
> are not needed.  More on this later.
>
>> diff --git a/kernel/bpf/bpf_dummy_struct_ops.c b/kernel/bpf/bpf_dummy_struct_ops.c
>> new file mode 100644
>> index 000000000000..f76c4a3733f0
>> --- /dev/null
>> +++ b/kernel/bpf/bpf_dummy_struct_ops.c
>> @@ -0,0 +1,173 @@
>> +// SPDX-License-Identifier: GPL-2.0
>> +/*
>> + * Copyright (C) 2021. Huawei Technologies Co., Ltd
>> + */
>> +#include <linux/kernel.h>
>> +#include <linux/spinlock.h>
>> +#include <linux/bpf_verifier.h>
>> +#include <linux/bpf.h>
>> +#include <linux/btf.h>
>> +#include <linux/bpf_dummy_ops.h>
>> +
>> +static struct bpf_dummy_ops *bpf_dummy_ops_singletion;
>> +static DEFINE_SPINLOCK(bpf_dummy_ops_lock);
>> +
>> +static const struct btf_type *dummy_ops_state;
>> +
>> +struct bpf_dummy_ops *bpf_get_dummy_ops(void)
>> +{
>> +	struct bpf_dummy_ops *ops;
>> +
>> +	spin_lock(&bpf_dummy_ops_lock);
>> +	ops = bpf_dummy_ops_singletion;
>> +	if (ops && !bpf_try_module_get(ops, ops->owner))
>> +		ops = NULL;
>> +	spin_unlock(&bpf_dummy_ops_lock);
>> +
>> +	return ops ? ops : ERR_PTR(-ENXIO);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_get_dummy_ops);
>> +
>> +void bpf_put_dummy_ops(struct bpf_dummy_ops *ops)
>> +{
>> +	bpf_module_put(ops, ops->owner);
>> +}
>> +EXPORT_SYMBOL_GPL(bpf_put_dummy_ops);
> [ ... ]
>
>> +static int bpf_dummy_reg(void *kdata)
>> +{
>> +	struct bpf_dummy_ops *ops = kdata;
>> +	int err = 0;
>> +
>> +	spin_lock(&bpf_dummy_ops_lock);
>> +	if (!bpf_dummy_ops_singletion)
>> +		bpf_dummy_ops_singletion = ops;
>> +	else
>> +		err = -EEXIST;
>> +	spin_unlock(&bpf_dummy_ops_lock);
>> +
>> +	return err;
>> +}
> I don't think we are interested in testing register/unregister
> a struct_ops.  This common infra logic should have already
> been covered by bpf_tcp_ca.   Lets see if it can be avoided
> such that the above singleton instance and EXPORT_SYMBOL_GPL
> can also be removed.
>
> It can reuse the bpf_prog_test_run() which can run a particular
> bpf prog.  Then it allows a flexible way to select which prog
> to call instead of creating a file and then triggering individual
> prog by writing a name string into this new file.
>
> For bpf_prog_test_run(),  it needs a ".test_run" implementation in
> "const struct bpf_prog_ops bpf_struct_ops_prog_ops".
> This to-be-implemented  ".test_run" can check the prog->aux->attach_btf_id
> to ensure it is the bpf_dummy_ops.  The prog->expected_attach_type can
> tell which "func" ptr within the bpf_dummy_ops and then ".test_run" will
> know how to call it.  The extra thing for the struct_ops's ".test_run" is
> to first call arch_prepare_bpf_trampoline() to prepare the trampoline
> before calling into the bpf prog.
>
> You can take a look at the other ".test_run" implementations,
> e.g. bpf_prog_test_run_skb() and bpf_prog_test_run_tracing().
>
> test_skb_pkt_end.c and fentry_test.c (likely others also) can be
> used as reference for prog_tests/ purpose.  For the dummy_ops test in
> prog_tests/, it does not need to call bpf_map__attach_struct_ops() since
> there is no need to reg().  Instead, directly bpf_prog_test_run() to
> exercise each prog in bpf_dummy_ops.skel.h.
>
> bpf_dummy_init_member() should return -ENOTSUPP.
> bpf_dummy_reg() and bpf_dummy_unreg() should then be never called.
>
> bpf_dummy_struct_ops.c should be moved into net/bpf/.
> No need to have CONFIG_BPF_DUMMY_STRUCT_OPS.  In the future, a generic one
> could be created for the test_run related codes, if there is a need.
Will do and thanks for your suggestions.
>> +
>> +static void bpf_dummy_unreg(void *kdata)
>> +{
>> +	struct bpf_dummy_ops *ops = kdata;
>> +
>> +	spin_lock(&bpf_dummy_ops_lock);
>> +	if (bpf_dummy_ops_singletion == ops)
>> +		bpf_dummy_ops_singletion = NULL;
>> +	else
>> +		WARN_ON(1);
>> +	spin_unlock(&bpf_dummy_ops_lock);
>> +}
>> +
>> +extern struct bpf_struct_ops bpf_bpf_dummy_ops;
>> +
>> +struct bpf_struct_ops bpf_bpf_dummy_ops = {
>> +	.verifier_ops = &bpf_dummy_verifier_ops,
>> +	.init = bpf_dummy_init,
>> +	.init_member = bpf_dummy_init_member,
>> +	.check_member = bpf_dummy_check_member,
>> +	.reg = bpf_dummy_reg,
>> +	.unreg = bpf_dummy_unreg,
>> +	.name = "bpf_dummy_ops",
>> +};
> .
diff mbox series

Patch

diff --git a/include/linux/bpf_dummy_ops.h b/include/linux/bpf_dummy_ops.h
new file mode 100644
index 000000000000..b2aad3e6e2fe
--- /dev/null
+++ b/include/linux/bpf_dummy_ops.h
@@ -0,0 +1,28 @@ 
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021. Huawei Technologies Co., Ltd
+ */
+#ifndef _BPF_DUMMY_OPS_H
+#define _BPF_DUMMY_OPS_H
+
+#ifdef CONFIG_BPF_DUMMY_STRUCT_OPS
+#include <linux/module.h>
+
+struct bpf_dummy_ops_state {
+	int val;
+};
+
+struct bpf_dummy_ops {
+	int (*init)(struct bpf_dummy_ops_state *state);
+	struct module *owner;
+};
+
+extern struct bpf_dummy_ops *bpf_get_dummy_ops(void);
+extern void bpf_put_dummy_ops(struct bpf_dummy_ops *ops);
+#else
+struct bpf_dummy_ops {};
+static inline struct bpf_dummy_ops *bpf_get_dummy_ops(void) { return NULL; }
+static inline void bpf_put_dummy_ops(struct bpf_dummy_ops *ops) {}
+#endif
+
+#endif
diff --git a/kernel/bpf/Kconfig b/kernel/bpf/Kconfig
index a82d6de86522..4a11eca42791 100644
--- a/kernel/bpf/Kconfig
+++ b/kernel/bpf/Kconfig
@@ -86,4 +86,11 @@  config BPF_LSM
 
 	  If you are unsure how to answer this question, answer N.
 
+config BPF_DUMMY_STRUCT_OPS
+	bool "Enable dummy struct ops"
+	depends on BPF_SYSCALL && BPF_JIT
+	help
+	  Enables dummy struct ops to test the basic functionalities of
+	  BPF STRUCT_OPS.
+
 endmenu # "BPF subsystem"
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 7f33098ca63f..17e2bb59cceb 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -33,6 +33,8 @@  obj-$(CONFIG_DEBUG_INFO_BTF) += sysfs_btf.o
 endif
 ifeq ($(CONFIG_BPF_JIT),y)
 obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
+obj-$(CONFIG_BPF_SYSCALL) += bpf_dummy_struct_ops.o
 obj-${CONFIG_BPF_LSM} += bpf_lsm.o
 endif
+obj-$(CONFIG_BPF_DUMMY_STRUCT_OPS) += bpf_dummy_struct_ops.o
 obj-$(CONFIG_BPF_PRELOAD) += preload/
diff --git a/kernel/bpf/bpf_dummy_struct_ops.c b/kernel/bpf/bpf_dummy_struct_ops.c
new file mode 100644
index 000000000000..f76c4a3733f0
--- /dev/null
+++ b/kernel/bpf/bpf_dummy_struct_ops.c
@@ -0,0 +1,173 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021. Huawei Technologies Co., Ltd
+ */
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/bpf_verifier.h>
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/bpf_dummy_ops.h>
+
+static struct bpf_dummy_ops *bpf_dummy_ops_singletion;
+static DEFINE_SPINLOCK(bpf_dummy_ops_lock);
+
+static const struct btf_type *dummy_ops_state;
+
+struct bpf_dummy_ops *bpf_get_dummy_ops(void)
+{
+	struct bpf_dummy_ops *ops;
+
+	spin_lock(&bpf_dummy_ops_lock);
+	ops = bpf_dummy_ops_singletion;
+	if (ops && !bpf_try_module_get(ops, ops->owner))
+		ops = NULL;
+	spin_unlock(&bpf_dummy_ops_lock);
+
+	return ops ? ops : ERR_PTR(-ENXIO);
+}
+EXPORT_SYMBOL_GPL(bpf_get_dummy_ops);
+
+void bpf_put_dummy_ops(struct bpf_dummy_ops *ops)
+{
+	bpf_module_put(ops, ops->owner);
+}
+EXPORT_SYMBOL_GPL(bpf_put_dummy_ops);
+
+static int bpf_dummy_init(struct btf *btf)
+{
+	s32 type_id;
+
+	type_id = btf_find_by_name_kind(btf, "bpf_dummy_ops_state",
+					BTF_KIND_STRUCT);
+	if (type_id < 0)
+		return -EINVAL;
+
+	dummy_ops_state = btf_type_by_id(btf, type_id);
+
+	return 0;
+}
+
+static const struct bpf_func_proto *
+bpf_dummy_ops_get_func_proto(enum bpf_func_id func_id,
+			     const struct bpf_prog *prog)
+{
+	switch (func_id) {
+	case BPF_FUNC_map_lookup_elem:
+		return &bpf_map_lookup_elem_proto;
+	default:
+		return NULL;
+	}
+}
+
+static bool bpf_dummy_ops_is_valid_access(int off, int size,
+					  enum bpf_access_type type,
+					  const struct bpf_prog *prog,
+					  struct bpf_insn_access_aux *info)
+{
+	/* a common helper ? */
+	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
+		return false;
+	if (type != BPF_READ)
+		return false;
+	if (off % size != 0)
+		return false;
+
+	return btf_ctx_access(off, size, type, prog, info);
+}
+
+static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
+					   const struct btf *btf,
+					   const struct btf_type *t, int off,
+					   int size, enum bpf_access_type atype,
+					   u32 *next_btf_id)
+{
+	size_t end;
+
+	if (atype == BPF_READ)
+		return btf_struct_access(log, btf, t, off, size, atype,
+					 next_btf_id);
+
+	if (t != dummy_ops_state) {
+		bpf_log(log, "only read is supported\n");
+		return -EACCES;
+	}
+
+	switch (off) {
+	case offsetof(struct bpf_dummy_ops_state, val):
+		end = offsetofend(struct bpf_dummy_ops_state, val);
+		break;
+	default:
+		bpf_log(log, "no write support to bpf_dummy_ops_state at off %d\n",
+			off);
+		return -EACCES;
+	}
+
+	if (off + size > end) {
+		bpf_log(log,
+			"write access at off %d with size %d beyond the member of bpf_dummy_ops_state ended at %zu\n",
+			off, size, end);
+		return -EACCES;
+	}
+
+	return NOT_INIT;
+}
+
+static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {
+	.get_func_proto = bpf_dummy_ops_get_func_proto,
+	.is_valid_access = bpf_dummy_ops_is_valid_access,
+	.btf_struct_access = bpf_dummy_ops_btf_struct_access,
+};
+
+static int bpf_dummy_check_member(const struct btf_type *t,
+				  const struct btf_member *member)
+{
+	return 0;
+}
+
+
+static int bpf_dummy_init_member(const struct btf_type *t,
+				 const struct btf_member *member,
+				 void *kdata, const void *udata)
+{
+	return 0;
+}
+
+static int bpf_dummy_reg(void *kdata)
+{
+	struct bpf_dummy_ops *ops = kdata;
+	int err = 0;
+
+	spin_lock(&bpf_dummy_ops_lock);
+	if (!bpf_dummy_ops_singletion)
+		bpf_dummy_ops_singletion = ops;
+	else
+		err = -EEXIST;
+	spin_unlock(&bpf_dummy_ops_lock);
+
+	return err;
+}
+
+static void bpf_dummy_unreg(void *kdata)
+{
+	struct bpf_dummy_ops *ops = kdata;
+
+	spin_lock(&bpf_dummy_ops_lock);
+	if (bpf_dummy_ops_singletion == ops)
+		bpf_dummy_ops_singletion = NULL;
+	else
+		WARN_ON(1);
+	spin_unlock(&bpf_dummy_ops_lock);
+}
+
+extern struct bpf_struct_ops bpf_bpf_dummy_ops;
+
+struct bpf_struct_ops bpf_bpf_dummy_ops = {
+	.verifier_ops = &bpf_dummy_verifier_ops,
+	.init = bpf_dummy_init,
+	.init_member = bpf_dummy_init_member,
+	.check_member = bpf_dummy_check_member,
+	.reg = bpf_dummy_reg,
+	.unreg = bpf_dummy_unreg,
+	.name = "bpf_dummy_ops",
+};
diff --git a/kernel/bpf/bpf_struct_ops_types.h b/kernel/bpf/bpf_struct_ops_types.h
index 7ec458ead497..6d24c75f4d70 100644
--- a/kernel/bpf/bpf_struct_ops_types.h
+++ b/kernel/bpf/bpf_struct_ops_types.h
@@ -2,6 +2,10 @@ 
 /* internal file - do not include directly */
 
 #ifdef CONFIG_BPF_JIT
+#ifdef CONFIG_BPF_DUMMY_STRUCT_OPS
+#include <linux/bpf_dummy_ops.h>
+BPF_STRUCT_OPS_TYPE(bpf_dummy_ops)
+#endif
 #ifdef CONFIG_INET
 #include <net/tcp.h>
 BPF_STRUCT_OPS_TYPE(tcp_congestion_ops)