@@ -771,6 +771,24 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
io_wqe_dec_running(worker);
}
+int io_uringlet_offload(struct io_wq *wq)
+{
+ struct io_wqe *wqe = wq->wqes[numa_node_id()];
+ struct io_wqe_acct *acct = io_get_acct(wqe, true);
+ bool waken;
+
+ raw_spin_lock(&wqe->lock);
+ rcu_read_lock();
+ waken = io_wqe_activate_free_worker(wqe, acct);
+ rcu_read_unlock();
+ raw_spin_unlock(&wqe->lock);
+
+ if (waken)
+ return 0;
+
+ return io_wqe_create_worker(wqe, acct);
+}
+
static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
struct task_struct *tsk)
{
@@ -90,4 +90,5 @@ static inline bool io_wq_current_is_worker(void)
extern struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
struct task_struct *task);
+extern int io_uringlet_offload(struct io_wq *wq);
#endif
@@ -3051,15 +3051,22 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
if (unlikely(ret))
goto out;
- mutex_lock(&ctx->uring_lock);
- ret = io_submit_sqes(ctx, to_submit);
- if (ret != to_submit) {
+ if (!(ctx->flags & IORING_SETUP_URINGLET)) {
+ mutex_lock(&ctx->uring_lock);
+ ret = io_submit_sqes(ctx, to_submit);
+ if (ret != to_submit) {
+ mutex_unlock(&ctx->uring_lock);
+ goto out;
+ }
+ if ((flags & IORING_ENTER_GETEVENTS) && ctx->syscall_iopoll)
+ goto iopoll_locked;
mutex_unlock(&ctx->uring_lock);
- goto out;
+ } else {
+ ret = io_uringlet_offload(ctx->let);
+ if (ret)
+ goto out;
+ ret = to_submit;
}
- if ((flags & IORING_ENTER_GETEVENTS) && ctx->syscall_iopoll)
- goto iopoll_locked;
- mutex_unlock(&ctx->uring_lock);
}
if (flags & IORING_ENTER_GETEVENTS) {
int ret2;