@@ -90,9 +90,11 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
return atomic_read(&hctx->nr_active) < depth;
}
-static int __blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt)
+static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
+ struct sbitmap_queue *bt)
{
- if (!hctx_may_queue(hctx, bt))
+ if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
+ !hctx_may_queue(data->hctx, bt))
return -1;
return __sbitmap_queue_get(bt);
}
@@ -118,7 +120,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
tag_offset = tags->nr_reserved_tags;
}
- tag = __blk_mq_get_tag(data->hctx, bt);
+ tag = __blk_mq_get_tag(data, bt);
if (tag != -1)
goto found_tag;
@@ -129,7 +131,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
do {
prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
- tag = __blk_mq_get_tag(data->hctx, bt);
+ tag = __blk_mq_get_tag(data, bt);
if (tag != -1)
break;
@@ -144,7 +146,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
* Retry tag allocation after running the hardware queue,
* as running the queue may also have found completions.
*/
- tag = __blk_mq_get_tag(data->hctx, bt);
+ tag = __blk_mq_get_tag(data, bt);
if (tag != -1)
break;
@@ -230,15 +230,14 @@ struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
rq = tags->static_rqs[tag];
- if (blk_mq_tag_busy(data->hctx)) {
- rq->rq_flags = RQF_MQ_INFLIGHT;
- atomic_inc(&data->hctx->nr_active);
- }
-
if (data->flags & BLK_MQ_REQ_INTERNAL) {
rq->tag = -1;
rq->internal_tag = tag;
} else {
+ if (blk_mq_tag_busy(data->hctx)) {
+ rq->rq_flags = RQF_MQ_INFLIGHT;
+ atomic_inc(&data->hctx->nr_active);
+ }
rq->tag = tag;
rq->internal_tag = -1;
}
@@ -869,6 +868,10 @@ static bool blk_mq_get_driver_tag(struct request *rq,
rq->tag = blk_mq_get_tag(&data);
if (rq->tag >= 0) {
+ if (blk_mq_tag_busy(data.hctx)) {
+ rq->rq_flags |= RQF_MQ_INFLIGHT;
+ atomic_inc(&data.hctx->nr_active);
+ }
data.hctx->tags->rqs[rq->tag] = rq;
goto done;
}