@@ -14,6 +14,22 @@
#include "blk-mq.h"
#include "blk-mq-sched.h"
+static void blk_mq_update_available_driver_tags(struct blk_mq_hw_ctx *hctx)
+{
+ struct blk_mq_tags *tags = hctx->tags;
+ unsigned int nr_tags;
+ struct tag_sharing *tag_sharing;
+
+ if (tags->ctl.share_queues <= 1)
+ nr_tags = tags->nr_tags;
+ else
+ nr_tags = max((tags->nr_tags + tags->ctl.share_queues - 1) /
+ tags->ctl.share_queues, 4U);
+
+ list_for_each_entry(tag_sharing, &tags->ctl.head, node)
+ tag_sharing->available_tags = nr_tags;
+}
+
/*
* Recalculate wakeup batch when tag is shared by hctx.
*/
@@ -51,6 +67,7 @@ void __blk_mq_driver_tag_busy(struct blk_mq_hw_ctx *hctx)
spin_lock_irq(&tags->lock);
WRITE_ONCE(tags->ctl.share_queues, tags->ctl.active_queues);
+ blk_mq_update_available_driver_tags(hctx);
blk_mq_update_wake_batch(tags, tags->ctl.share_queues);
spin_unlock_irq(&tags->lock);
}
@@ -136,9 +153,11 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
spin_lock_irq(&tags->lock);
list_del_init(&tag_sharing->node);
+ tag_sharing->available_tags = tags->nr_tags;
__blk_mq_driver_tag_idle(hctx);
WRITE_ONCE(tags->ctl.active_queues, tags->ctl.active_queues - 1);
WRITE_ONCE(tags->ctl.share_queues, tags->ctl.active_queues);
+ blk_mq_update_available_driver_tags(hctx);
blk_mq_update_wake_batch(tags, tags->ctl.share_queues);
spin_unlock_irq(&tags->lock);
@@ -3621,6 +3621,7 @@ static int blk_mq_init_hctx(struct request_queue *q,
cpuhp_state_add_instance_nocalls(CPUHP_BLK_MQ_DEAD, &hctx->cpuhp_dead);
hctx->tags = set->tags[hctx_idx];
+ hctx->tag_sharing.available_tags = hctx->tags->nr_tags;
if (set->ops->init_hctx &&
set->ops->init_hctx(hctx, set->driver_data, hctx_idx))
@@ -3881,6 +3882,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
}
hctx->tags = set->tags[i];
+ hctx->tag_sharing.available_tags = hctx->tags->nr_tags;
WARN_ON(!hctx->tags);
/*
@@ -4234,6 +4236,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
spin_lock_init(&q->requeue_lock);
q->nr_requests = set->queue_depth;
+ q->tag_sharing.available_tags = set->queue_depth;
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
blk_mq_add_queue_tag_set(set, q);
@@ -398,7 +398,7 @@ static inline void blk_mq_free_requests(struct list_head *list)
static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
struct sbitmap_queue *bt)
{
- unsigned int depth, users;
+ unsigned int depth;
if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED))
return true;
@@ -414,19 +414,15 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
return true;
+
+ depth = READ_ONCE(q->tag_sharing.available_tags);
} else {
if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
return true;
- }
- users = READ_ONCE(hctx->tags->ctl.share_queues);
- if (!users)
- return true;
+ depth = READ_ONCE(hctx->tag_sharing.available_tags);
+ }
- /*
- * Allow at least some tags
- */
- depth = max((bt->sb.depth + users - 1) / users, 4U);
return __blk_mq_active_requests(hctx) < depth;
}
@@ -376,7 +376,8 @@ struct blk_independent_access_ranges {
};
struct tag_sharing {
- struct list_head node;
+ struct list_head node;
+ unsigned int available_tags;
};
struct request_queue {