@@ -30,6 +30,7 @@
#include <linux/static_call.h>
#include <linux/memcontrol.h>
#include <linux/cfi.h>
+#include <linux/io_uring/bpf.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@@ -83,6 +83,10 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_SYSCALL, bpf_syscall,
BPF_PROG_TYPE(BPF_PROG_TYPE_NETFILTER, netfilter,
struct bpf_nf_ctx, struct bpf_nf_ctx)
#endif
+#ifdef CONFIG_IO_URING
+BPF_PROG_TYPE(BPF_PROG_TYPE_IOURING, bpf_io_uring,
+ struct io_uring_bpf_ctx, struct io_bpf_ctx_kern)
+#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
new file mode 100644
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#ifndef _LINUX_IO_URING_BPF_H
+#define _LINUX_IO_URING_BPF_H
+
+#include <uapi/linux/io_uring/bpf.h>
+
+struct io_bpf_ctx_kern {
+};
+
+#endif
@@ -1055,6 +1055,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_SK_LOOKUP,
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
BPF_PROG_TYPE_NETFILTER,
+ BPF_PROG_TYPE_IOURING,
__MAX_BPF_PROG_TYPE
};
new file mode 100644
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
+/*
+ * Header file for the io_uring bpf interface.
+ *
+ * Copyright (C) 2024 Pavel Begunkov
+ */
+#ifndef LINUX_IO_URING_BPF_H
+#define LINUX_IO_URING_BPF_H
+
+#include <linux/types.h>
+
+enum {
+ IOU_BPF_RET_OK,
+ IOU_BPF_RET_STOP,
+
+ __IOU_BPF_RET_MAX,
+};
+
+struct io_uring_bpf_ctx {
+};
+
+#endif
@@ -17,3 +17,4 @@ obj-$(CONFIG_IO_URING) += io_uring.o opdef.o kbuf.o rsrc.o notif.o \
obj-$(CONFIG_IO_WQ) += io-wq.o
obj-$(CONFIG_FUTEX) += futex.o
obj-$(CONFIG_NET_RX_BUSY_POLL) += napi.o
+obj-$(CONFIG_BPF) += bpf.o
new file mode 100644
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+
+static const struct bpf_func_proto *
+io_bpf_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ return bpf_base_func_proto(func_id, prog);
+}
+
+static bool io_bpf_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ return false;
+}
+
+const struct bpf_prog_ops bpf_io_uring_prog_ops = {};
+
+const struct bpf_verifier_ops bpf_io_uring_verifier_ops = {
+ .get_func_proto = io_bpf_func_proto,
+ .is_valid_access = io_bpf_is_valid_access,
+};
@@ -219,6 +219,7 @@ enum btf_kfunc_hook {
BTF_KFUNC_HOOK_LWT,
BTF_KFUNC_HOOK_NETFILTER,
BTF_KFUNC_HOOK_KPROBE,
+ BTF_KFUNC_HOOK_IOURING,
BTF_KFUNC_HOOK_MAX,
};
@@ -8393,6 +8394,8 @@ static int bpf_prog_type_to_kfunc_hook(enum bpf_prog_type prog_type)
return BTF_KFUNC_HOOK_NETFILTER;
case BPF_PROG_TYPE_KPROBE:
return BTF_KFUNC_HOOK_KPROBE;
+ case BPF_PROG_TYPE_IOURING:
+ return BTF_KFUNC_HOOK_IOURING;
default:
return BTF_KFUNC_HOOK_MAX;
}
@@ -2571,6 +2571,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
return -EINVAL;
case BPF_PROG_TYPE_SYSCALL:
case BPF_PROG_TYPE_EXT:
+ case BPF_PROG_TYPE_IOURING:
if (expected_attach_type)
return -EINVAL;
fallthrough;
@@ -15946,6 +15946,9 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char
case BPF_PROG_TYPE_NETFILTER:
range = retval_range(NF_DROP, NF_ACCEPT);
break;
+ case BPF_PROG_TYPE_IOURING:
+ range = retval_range(IOU_BPF_RET_OK, __IOU_BPF_RET_MAX - 1);
+ break;
case BPF_PROG_TYPE_EXT:
/* freplace program can return anything as its return value
* depends on the to-be-replaced kernel func or bpf program.
@@ -22209,7 +22212,8 @@ static bool can_be_sleepable(struct bpf_prog *prog)
}
return prog->type == BPF_PROG_TYPE_LSM ||
prog->type == BPF_PROG_TYPE_KPROBE /* only for uprobes */ ||
- prog->type == BPF_PROG_TYPE_STRUCT_OPS;
+ prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
+ prog->type == BPF_PROG_TYPE_IOURING;
}
static int check_attach_btf_id(struct bpf_verifier_env *env)
@@ -22229,6 +22233,10 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
verbose(env, "Syscall programs can only be sleepable\n");
return -EINVAL;
}
+ if (prog->type == BPF_PROG_TYPE_IOURING && !prog->sleepable) {
+ verbose(env, "io_uring programs can only be sleepable\n");
+ return -EINVAL;
+ }
if (prog->sleepable && !can_be_sleepable(prog)) {
verbose(env, "Only fentry/fexit/fmod_ret, lsm, iter, uprobe, and struct_ops programs can be sleepable\n");
Add a new BPF program type and bare minimum implementation that would be responsible orchestrating in-kernel request handling in the io_uring waiting loop. The program is supposed to replace the logic which terminates the traditional waiting loop based on a number of parameters like the number of completion event to wait for, and it returns one of the IOU_BPF_RET_* return codes telling the kernel whether it should return back to the user space or continue waiting. At the moment there is no way to attach it anywhere, and the program is pretty useless and doesn't know yet how to interact with io_uring. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- include/linux/bpf.h | 1 + include/linux/bpf_types.h | 4 ++++ include/linux/io_uring/bpf.h | 10 ++++++++++ include/uapi/linux/bpf.h | 1 + include/uapi/linux/io_uring/bpf.h | 22 ++++++++++++++++++++++ io_uring/Makefile | 1 + io_uring/bpf.c | 24 ++++++++++++++++++++++++ kernel/bpf/btf.c | 3 +++ kernel/bpf/syscall.c | 1 + kernel/bpf/verifier.c | 10 +++++++++- 10 files changed, 76 insertions(+), 1 deletion(-) create mode 100644 include/linux/io_uring/bpf.h create mode 100644 include/uapi/linux/io_uring/bpf.h create mode 100644 io_uring/bpf.c