@@ -349,11 +349,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->mq_ctx = ctx;
rq->mq_hctx = hctx;
rq->cmd_flags = data->cmd_flags;
-
- if (data->flags & BLK_MQ_REQ_PM)
- data->rq_flags |= RQF_PM;
- if (blk_queue_io_stat(q))
- data->rq_flags |= RQF_IO_STAT;
rq->rq_flags = data->rq_flags;
if (data->rq_flags & RQF_SCHED_TAGS) {
@@ -447,6 +442,15 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
if (data->cmd_flags & REQ_NOWAIT)
data->flags |= BLK_MQ_REQ_NOWAIT;
+ if (data->flags & BLK_MQ_REQ_RESERVED)
+ data->rq_flags |= RQF_RESV;
+
+ if (data->flags & BLK_MQ_REQ_PM)
+ data->rq_flags |= RQF_PM;
+
+ if (blk_queue_io_stat(q))
+ data->rq_flags |= RQF_IO_STAT;
+
if (q->elevator) {
/*
* All requests use scheduler tags when an I/O scheduler is
@@ -471,14 +475,15 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
}
retry:
- data->ctx = blk_mq_get_ctx(q);
- data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
+ /* See blk_mq_alloc_request_hctx() for details */
+ if (!data->ctx) {
+ data->ctx = blk_mq_get_ctx(q);
+ data->hctx = blk_mq_map_queue(q, data->cmd_flags, data->ctx);
+ }
+
if (!(data->rq_flags & RQF_SCHED_TAGS))
blk_mq_tag_busy(data->hctx);
- if (data->flags & BLK_MQ_REQ_RESERVED)
- data->rq_flags |= RQF_RESV;
-
/*
* Try batched alloc if we want more than 1 tag.
*/
@@ -505,6 +510,7 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
* is going away.
*/
msleep(3);
+ data->ctx = NULL;
goto retry;
}
@@ -613,16 +619,10 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
.cmd_flags = opf,
.nr_tags = 1,
};
- u64 alloc_time_ns = 0;
struct request *rq;
unsigned int cpu;
- unsigned int tag;
int ret;
- /* alloc_time includes depth and tag waits */
- if (blk_queue_rq_alloc_time(q))
- alloc_time_ns = ktime_get_ns();
-
/*
* If the tag allocator sleeps we could get an allocation for a
* different hardware context. No need to complicate the low level
@@ -653,20 +653,10 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
goto out_queue_exit;
data.ctx = __blk_mq_get_ctx(q, cpu);
- if (q->elevator)
- data.rq_flags |= RQF_SCHED_TAGS;
- else
- blk_mq_tag_busy(data.hctx);
-
- if (flags & BLK_MQ_REQ_RESERVED)
- data.rq_flags |= RQF_RESV;
-
ret = -EWOULDBLOCK;
- tag = blk_mq_get_tag(&data);
- if (tag == BLK_MQ_NO_TAG)
+ rq = __blk_mq_alloc_requests(&data);
+ if (!rq)
goto out_queue_exit;
- rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
- alloc_time_ns);
rq->__data_len = 0;
rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL;