@@ -18,9 +18,8 @@
#include "blk-mq-tag.h"
#include "blk-wbt.h"
-void blk_mq_sched_assign_ioc(struct request *rq)
+struct io_cq *blk_mq_sched_lookup_icq(struct request_queue *q)
{
- struct request_queue *q = rq->q;
struct io_context *ioc;
struct io_cq *icq;
@@ -29,17 +28,20 @@ void blk_mq_sched_assign_ioc(struct request *rq)
*/
ioc = current->io_context;
if (!ioc)
- return;
+ return NULL;
spin_lock_irq(&q->queue_lock);
icq = ioc_lookup_icq(ioc, q);
spin_unlock_irq(&q->queue_lock);
+ if (icq)
+ return icq;
+ return ioc_create_icq(ioc, q, GFP_ATOMIC);
+}
- if (!icq) {
- icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
- if (!icq)
- return;
- }
+void blk_mq_sched_assign_ioc(struct request *rq, struct io_cq *icq)
+{
+ if (!icq)
+ return;
get_io_context(icq->ioc);
rq->elv.icq = icq;
}
@@ -7,7 +7,8 @@
#define MAX_SCHED_RQ (16 * BLKDEV_MAX_RQ)
-void blk_mq_sched_assign_ioc(struct request *rq);
+struct io_cq *blk_mq_sched_lookup_icq(struct request_queue *q);
+void blk_mq_sched_assign_ioc(struct request *rq, struct io_cq *icq);
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs, struct request **merged_request);
@@ -333,9 +333,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->elv.icq = NULL;
if (e && e->type->ops.prepare_request) {
- if (e->type->icq_cache)
- blk_mq_sched_assign_ioc(rq);
-
+ blk_mq_sched_assign_ioc(rq, data->icq);
e->type->ops.prepare_request(rq);
rq->rq_flags |= RQF_ELVPRIV;
}
@@ -360,6 +358,9 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
data->flags |= BLK_MQ_REQ_NOWAIT;
if (e) {
+ if (!op_is_flush(data->cmd_flags) && e->type->icq_cache &&
+ e->type->ops.prepare_request)
+ data->icq = blk_mq_sched_lookup_icq(q);
/*
* Flush/passthrough requests are special and go directly to the
* dispatch list. Don't include reserved tags in the
@@ -151,6 +151,7 @@ static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
struct blk_mq_alloc_data {
/* input parameter */
struct request_queue *q;
+ struct io_cq *icq;
blk_mq_req_flags_t flags;
unsigned int shallow_depth;
unsigned int cmd_flags;
Currently we lookup ICQ only after the request is allocated. However BFQ will want to decide how many scheduler tags it allows a given bfq queue (effectively a process) to consume based on cgroup weight. So lookup ICQ earlier and provide it in struct blk_mq_alloc_data so that BFQ can use it. Signed-off-by: Jan Kara <jack@suse.cz> --- block/blk-mq-sched.c | 18 ++++++++++-------- block/blk-mq-sched.h | 3 ++- block/blk-mq.c | 7 ++++--- block/blk-mq.h | 1 + 4 files changed, 17 insertions(+), 12 deletions(-)