@@ -145,6 +145,8 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
{
struct blkcg_gq *blkg;
+ lock_is_held(&q->q_enter_map);
+
/*
* Hint didn't match. Look up from the radix tree. Note that the
* hint can only be updated under queue_lock as otherwise @blkg
@@ -931,8 +931,13 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
}
rcu_read_unlock();
- if (success)
+ if (success) {
+ mutex_acquire_nest(&q->q_enter_map, 0, 0,
+ lock_is_held(&q->q_enter_map) ?
+ &q->q_enter_map : NULL,
+ _RET_IP_);
return 0;
+ }
if (flags & BLK_MQ_REQ_NOWAIT)
return -EBUSY;
@@ -959,6 +964,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
void blk_queue_exit(struct request_queue *q)
{
+ mutex_release(&q->q_enter_map, 0, _RET_IP_);
percpu_ref_put(&q->q_usage_counter);
}
@@ -994,12 +1000,15 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
spinlock_t *lock)
{
struct request_queue *q;
+ static struct lock_class_key __key;
q = kmem_cache_alloc_node(blk_requestq_cachep,
gfp_mask | __GFP_ZERO, node_id);
if (!q)
return NULL;
+ lockdep_init_map(&q->q_enter_map, "q_enter_map", &__key, 0);
+
q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
goto fail_q;
@@ -2264,6 +2273,8 @@ generic_make_request_checks(struct bio *bio)
goto end_io;
}
+ lock_is_held(&q->q_enter_map);
+
/*
* For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue is not a request based queue.
@@ -145,6 +145,7 @@ static inline void blk_queue_enter_live(struct request_queue *q)
* been established, further references under that same context
* need not check that the queue has been frozen (marked dead).
*/
+ mutex_acquire_nest(&q->q_enter_map, 0, 0, &q->q_enter_map, _RET_IP_);
percpu_ref_get(&q->q_usage_counter);
}
@@ -266,6 +266,8 @@ static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
{
struct blkcg_gq *blkg;
+ lock_is_held(&q->q_enter_map);
+
if (blkcg == &blkcg_root)
return q->root_blkg;
@@ -631,6 +631,7 @@ struct request_queue {
struct rcu_head rcu_head;
wait_queue_head_t mq_freeze_wq;
struct percpu_ref q_usage_counter;
+ struct lockdep_map q_enter_map;
struct list_head all_q_node;
struct blk_mq_tag_set *tag_set;