@@ -630,7 +630,7 @@ void blk_set_queue_dying(struct request_queue *q)
* We need to ensure that processes currently waiting on
* the queue are notified as well.
*/
- wake_up_all(&q->mq_freeze_wq);
+ wake_up_all(&q->freeze_wq);
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
@@ -793,14 +793,14 @@ int blk_queue_enter(struct request_queue *q, bool nowait)
/*
* read pair of barrier in blk_freeze_queue_start(),
* we need to order reading __PERCPU_REF_DEAD flag of
- * .q_usage_counter and reading .mq_freeze_depth or
+ * .q_usage_counter and reading .freeze_depth or
* queue dying flag, otherwise the following wait may
* never return if the two reads are reordered.
*/
smp_rmb();
- ret = wait_event_interruptible(q->mq_freeze_wq,
- !atomic_read(&q->mq_freeze_depth) ||
+ ret = wait_event_interruptible(q->freeze_wq,
+ !atomic_read(&q->freeze_depth) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
@@ -819,7 +819,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref)
struct request_queue *q =
container_of(ref, struct request_queue, q_usage_counter);
- wake_up_all(&q->mq_freeze_wq);
+ wake_up_all(&q->freeze_wq);
}
static void blk_rq_timed_out_timer(unsigned long data)
@@ -891,7 +891,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
- init_waitqueue_head(&q->mq_freeze_wq);
+ init_waitqueue_head(&q->freeze_wq);
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
@@ -122,7 +122,7 @@ void blk_freeze_queue_start(struct request_queue *q)
{
int freeze_depth;
- freeze_depth = atomic_inc_return(&q->mq_freeze_depth);
+ freeze_depth = atomic_inc_return(&q->freeze_depth);
if (freeze_depth == 1) {
percpu_ref_kill(&q->q_usage_counter);
if (q->mq_ops)
@@ -135,14 +135,14 @@ void blk_freeze_queue_wait(struct request_queue *q)
{
if (!q->mq_ops)
blk_drain_queue(q);
- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
+ wait_event(q->freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
}
EXPORT_SYMBOL_GPL(blk_freeze_queue_wait);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout)
{
- return wait_event_timeout(q->mq_freeze_wq,
+ return wait_event_timeout(q->freeze_wq,
percpu_ref_is_zero(&q->q_usage_counter),
timeout);
}
@@ -170,11 +170,11 @@ void blk_unfreeze_queue(struct request_queue *q)
{
int freeze_depth;
- freeze_depth = atomic_dec_return(&q->mq_freeze_depth);
+ freeze_depth = atomic_dec_return(&q->freeze_depth);
WARN_ON_ONCE(freeze_depth < 0);
if (!freeze_depth) {
percpu_ref_reinit(&q->q_usage_counter);
- wake_up_all(&q->mq_freeze_wq);
+ wake_up_all(&q->freeze_wq);
}
}
EXPORT_SYMBOL_GPL(blk_unfreeze_queue);
@@ -2424,7 +2424,7 @@ void blk_mq_free_queue(struct request_queue *q)
/* Basically redo blk_mq_init_queue with queue frozen */
static void blk_mq_queue_reinit(struct request_queue *q)
{
- WARN_ON_ONCE(!atomic_read(&q->mq_freeze_depth));
+ WARN_ON_ONCE(!atomic_read(&q->freeze_depth));
blk_mq_debugfs_unregister_hctxs(q);
blk_mq_sysfs_unregister(q);
@@ -564,7 +564,7 @@ struct request_queue {
struct mutex sysfs_lock;
int bypass_depth;
- atomic_t mq_freeze_depth;
+ atomic_t freeze_depth;
#if defined(CONFIG_BLK_DEV_BSG)
bsg_job_fn *bsg_job_fn;
@@ -576,7 +576,7 @@ struct request_queue {
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
- wait_queue_head_t mq_freeze_wq;
+ wait_queue_head_t freeze_wq;
struct percpu_ref q_usage_counter;
struct list_head all_q_node;
Both two are used for legacy and blk-mq, so rename them as .freeze_wq and .freeze_depth for avoiding to confuse people. No functional change. Signed-off-by: Ming Lei <ming.lei@redhat.com> --- block/blk-core.c | 12 ++++++------ block/blk-mq.c | 12 ++++++------ include/linux/blkdev.h | 4 ++-- 3 files changed, 14 insertions(+), 14 deletions(-)