@@ -1039,32 +1039,6 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
}
EXPORT_SYMBOL(kblockd_mod_delayed_work_on);
-void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
-{
- struct task_struct *tsk = current;
-
- /*
- * If this is a nested plug, don't actually assign it.
- */
- if (tsk->plug)
- return;
-
- plug->mq_list = NULL;
- plug->cached_rq = NULL;
- plug->nr_ios = min_t(unsigned short, nr_ios, BLK_MAX_REQUEST_COUNT);
- plug->rq_count = 0;
- plug->multiple_queues = false;
- plug->has_elevator = false;
- plug->nowait = false;
- INIT_LIST_HEAD(&plug->cb_list);
-
- /*
- * Store ordering should not be needed here, since a potential
- * preempt will imply a full memory barrier
- */
- tsk->plug = plug;
-}
-
/**
* blk_start_plug - initialize blk_plug and track it inside the task_struct
* @plug: The &struct blk_plug that needs to be initialized
@@ -1090,7 +1064,28 @@ void blk_start_plug_nr_ios(struct blk_plug *plug, unsigned short nr_ios)
*/
void blk_start_plug(struct blk_plug *plug)
{
- blk_start_plug_nr_ios(plug, 1);
+ struct task_struct *tsk = current;
+
+ /*
+ * If this is a nested plug, don't actually assign it.
+ */
+ if (tsk->plug)
+ return;
+
+ plug->mq_list = NULL;
+ plug->cached_rq = NULL;
+ plug->nr_ios = 1;
+ plug->rq_count = 0;
+ plug->multiple_queues = false;
+ plug->has_elevator = false;
+ plug->nowait = false;
+ INIT_LIST_HEAD(&plug->cb_list);
+
+ /*
+ * Store ordering should not be needed here, since a potential
+ * preempt will imply a full memory barrier
+ */
+ tsk->plug = plug;
}
EXPORT_SYMBOL(blk_start_plug);
@@ -995,7 +995,6 @@ struct blk_plug_cb {
extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
void *data, int size);
extern void blk_start_plug(struct blk_plug *);
-extern void blk_start_plug_nr_ios(struct blk_plug *, unsigned short);
extern void blk_finish_plug(struct blk_plug *);
void __blk_flush_plug(struct blk_plug *plug, bool from_schedule);
@@ -1011,11 +1010,6 @@ long nr_blockdev_pages(void);
struct blk_plug {
};
-static inline void blk_start_plug_nr_ios(struct blk_plug *plug,
- unsigned short nr_ios)
-{
-}
-
static inline void blk_start_plug(struct blk_plug *plug)
{
}
@@ -175,6 +175,7 @@ struct io_submit_state {
bool need_plug;
unsigned short submit_nr;
unsigned int cqes_count;
+ int fd;
struct blk_plug plug;
struct io_uring_cqe cqes[16];
};
@@ -2276,7 +2276,11 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (state->need_plug && def->plug) {
state->plug_started = true;
state->need_plug = false;
- blk_start_plug_nr_ios(&state->plug, state->submit_nr);
+ state->fd = req->cqe.fd;
+ blk_start_plug(&state->plug);
+ } else if (state->plug_started && req->cqe.fd == state->fd &&
+ !ctx->submit_state.link.head) {
+ state->plug.nr_ios++;
}
}
@@ -2337,7 +2341,8 @@ static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
}
static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- const struct io_uring_sqe *sqe)
+ const struct io_uring_sqe *sqe,
+ struct io_wq_work_list *req_list)
__must_hold(&ctx->uring_lock)
{
struct io_submit_link *link = &ctx->submit_state.link;
@@ -2385,7 +2390,7 @@ static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return 0;
}
- io_queue_sqe(req);
+ wq_list_add_tail(&req->comp_list, req_list);
return 0;
}
@@ -2470,6 +2475,7 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
__must_hold(&ctx->uring_lock)
{
unsigned int entries = io_sqring_entries(ctx);
+ struct io_wq_work_list req_list;
unsigned int left;
int ret;
@@ -2480,6 +2486,7 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
io_get_task_refs(left);
io_submit_state_start(&ctx->submit_state, left);
+ INIT_WQ_LIST(&req_list);
do {
const struct io_uring_sqe *sqe;
struct io_kiocb *req;
@@ -2495,13 +2502,22 @@ int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
* Continue submitting even for sqe failure if the
* ring was setup with IORING_SETUP_SUBMIT_ALL
*/
- if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
+ if (unlikely(io_submit_sqe(ctx, req, sqe, &req_list)) &&
!(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
left--;
break;
}
} while (--left);
+ while (req_list.first) {
+ struct io_kiocb *req;
+
+ req = container_of(req_list.first, struct io_kiocb, comp_list);
+ req_list.first = req->comp_list.next;
+
+ io_queue_sqe(req);
+ }
+
if (unlikely(left)) {
ret -= left;
/* try again if it submitted nothing and can't allocate a req */