@@ -10371,6 +10371,29 @@ BPF_CALL_3(io_bpf_queue_sqe, struct io_bpf_ctx *, bpf_ctx,
return !io_submit_sqe(ctx, req, sqe);
}
+BPF_CALL_5(io_bpf_emit_cqe, struct io_bpf_ctx *, bpf_ctx,
+ u32, cq_idx,
+ u64, user_data,
+ s32, res,
+ u32, flags)
+{
+ struct io_ring_ctx *ctx = bpf_ctx->ctx;
+ bool submitted;
+
+ if (unlikely(cq_idx >= ctx->cq_nr))
+ return -EINVAL;
+
+ spin_lock_irq(&ctx->completion_lock);
+ submitted = io_cqring_fill_event(ctx, user_data, res, flags, cq_idx);
+ io_commit_cqring(ctx);
+ ctx->cq_extra++;
+ spin_unlock_irq(&ctx->completion_lock);
+ if (submitted)
+ io_cqring_ev_posted(ctx);
+
+ return submitted ? 0 : -ENOMEM;
+}
+
const struct bpf_func_proto io_bpf_queue_sqe_proto = {
.func = io_bpf_queue_sqe,
.gpl_only = false,
@@ -10380,6 +10403,17 @@ const struct bpf_func_proto io_bpf_queue_sqe_proto = {
.arg3_type = ARG_CONST_SIZE,
};
+const struct bpf_func_proto io_bpf_emit_cqe_proto = {
+ .func = io_bpf_emit_cqe,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+};
+
static const struct bpf_func_proto *
io_bpf_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -10388,6 +10422,8 @@ io_bpf_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
case BPF_FUNC_iouring_queue_sqe:
return prog->aux->sleepable ? &io_bpf_queue_sqe_proto : NULL;
+ case BPF_FUNC_iouring_emit_cqe:
+ return &io_bpf_emit_cqe_proto;
default:
return bpf_base_func_proto(func_id);
}
@@ -4083,6 +4083,7 @@ union bpf_attr {
FN(sock_from_file), \
FN(check_mtu), \
FN(iouring_queue_sqe), \
+ FN(iouring_emit_cqe), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> --- fs/io_uring.c | 36 ++++++++++++++++++++++++++++++++++++ include/uapi/linux/bpf.h | 1 + 2 files changed, 37 insertions(+)