@@ -356,6 +356,7 @@ struct io_ring_ctx {
unsigned sq_thread_idle;
/* protected by ->completion_lock */
unsigned evfd_last_cq_tail;
+ struct io_wq *let;
};
enum {
@@ -127,6 +127,8 @@ struct io_wq {
struct task_struct *task;
+ void *private;
+
struct io_wqe *wqes[];
};
@@ -392,6 +394,11 @@ static bool io_queue_worker_create(struct io_worker *worker,
return false;
}
+static inline bool io_wq_is_uringlet(struct io_wq *wq)
+{
+ return wq->private;
+}
+
static void io_wqe_dec_running(struct io_worker *worker)
{
struct io_wqe_acct *acct = io_wqe_get_acct(worker);
@@ -1153,6 +1160,7 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
wq->hash = data->hash;
wq->free_work = data->free_work;
wq->do_work = data->do_work;
+ wq->private = data->private;
ret = -ENOMEM;
for_each_node(node) {
@@ -1188,7 +1196,8 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
INIT_LIST_HEAD(&wqe->all_list);
}
- wq->task = get_task_struct(data->task);
+ if (data->task)
+ wq->task = get_task_struct(data->task);
atomic_set(&wq->worker_refs, 1);
init_completion(&wq->worker_done);
return wq;
@@ -41,6 +41,7 @@ struct io_wq_data {
struct task_struct *task;
io_wq_work_fn *do_work;
free_work_fn *free_work;
+ void *private;
};
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
@@ -80,4 +81,7 @@ static inline bool io_wq_current_is_worker(void)
return in_task() && (current->flags & PF_IO_WORKER) &&
current->worker_private;
}
+
+extern struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
+ struct task_struct *task);
#endif
@@ -3318,6 +3318,15 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
ret = io_sq_offload_create(ctx, p);
if (ret)
goto err;
+
+ if (ctx->flags & IORING_SETUP_URINGLET) {
+ ctx->let = io_init_wq_offload(ctx, current);
+ if (IS_ERR(ctx->let)) {
+ ret = PTR_ERR(ctx->let);
+ goto err;
+ }
+ }
+
/* always set a rsrc node */
ret = io_rsrc_node_switch_start(ctx);
if (ret)
@@ -12,7 +12,7 @@
#include "io_uring.h"
#include "tctx.h"
-static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
+struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
struct task_struct *task)
{
struct io_wq_hash *hash;
@@ -34,9 +34,15 @@ static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
mutex_unlock(&ctx->uring_lock);
data.hash = hash;
+ /* for uringlet, wq->task is the iouring instance creator */
data.task = task;
data.free_work = io_wq_free_work;
data.do_work = io_wq_submit_work;
+ /* distinguish normal iowq and uringlet by wq->private for now */
+ if (ctx->flags & IORING_SETUP_URINGLET)
+ data.private = ctx;
+ else
+ data.private = NULL;
/* Do QD, or 4 * CPUS, whatever is smallest */
concurrency = min(ctx->sq_entries, 4 * num_online_cpus());