diff mbox series

block: make queue stat accounting a reference

Message ID d6819554-8632-e707-3037-773a6ee904cb@kernel.dk (mailing list archive)
State New, archived
Headers show
Series block: make queue stat accounting a reference | expand

Commit Message

Jens Axboe Dec. 15, 2021, 12:02 a.m. UTC
kyber turns on IO statistics when it is loaded on a queue, which means
that even if kyber is then later unloaded, we're still stuck with stats
enabled on the queue.

Change the account enabled from a bool to an int, and pair the enable call
with the equivalent disable call. This ensures that stats gets turned off
again appropriately.

Signed-off-by: Jens Axboe <axboe@kernel.dk>

---

Comments

Omar Sandoval Dec. 15, 2021, 12:18 a.m. UTC | #1
On Tue, Dec 14, 2021 at 05:02:57PM -0700, Jens Axboe wrote:
> kyber turns on IO statistics when it is loaded on a queue, which means
> that even if kyber is then later unloaded, we're still stuck with stats
> enabled on the queue.
> 
> Change the account enabled from a bool to an int, and pair the enable call
> with the equivalent disable call. This ensures that stats gets turned off
> again appropriately.
> 
> Signed-off-by: Jens Axboe <axboe@kernel.dk>

Reviewed-by: Omar Sandoval <osandov@fb.com>
Christoph Hellwig Dec. 15, 2021, 8:49 a.m. UTC | #2
On Tue, Dec 14, 2021 at 05:02:57PM -0700, Jens Axboe wrote:
> -	bool enable_accounting;
> +	int accounting;

unsigned?
diff mbox series

Patch

diff --git a/block/blk-stat.c b/block/blk-stat.c
index efb2a80db906..2ea01b5c1aca 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -15,7 +15,7 @@ 
 struct blk_queue_stats {
 	struct list_head callbacks;
 	spinlock_t lock;
-	bool enable_accounting;
+	int accounting;
 };
 
 void blk_rq_stat_init(struct blk_rq_stat *stat)
@@ -161,7 +161,7 @@  void blk_stat_remove_callback(struct request_queue *q,
 
 	spin_lock_irqsave(&q->stats->lock, flags);
 	list_del_rcu(&cb->list);
-	if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
+	if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
 		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
 	spin_unlock_irqrestore(&q->stats->lock, flags);
 
@@ -184,13 +184,24 @@  void blk_stat_free_callback(struct blk_stat_callback *cb)
 		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
 }
 
+void blk_stat_disable_accounting(struct request_queue *q)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&q->stats->lock, flags);
+	if (!--q->stats->accounting)
+		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
+	spin_unlock_irqrestore(&q->stats->lock, flags);
+}
+EXPORT_SYMBOL_GPL(blk_stat_disable_accounting);
+
 void blk_stat_enable_accounting(struct request_queue *q)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(&q->stats->lock, flags);
-	q->stats->enable_accounting = true;
-	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
+	if (!q->stats->accounting++)
+		blk_queue_flag_set(QUEUE_FLAG_STATS, q);
 	spin_unlock_irqrestore(&q->stats->lock, flags);
 }
 EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
@@ -205,7 +216,7 @@  struct blk_queue_stats *blk_alloc_queue_stats(void)
 
 	INIT_LIST_HEAD(&stats->callbacks);
 	spin_lock_init(&stats->lock);
-	stats->enable_accounting = false;
+	stats->accounting = 0;
 
 	return stats;
 }
diff --git a/block/blk-stat.h b/block/blk-stat.h
index 58f029af49e5..17e1eb4ec7e2 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -70,6 +70,7 @@  void blk_stat_add(struct request *rq, u64 now);
 
 /* record time/size info in request but not add a callback */
 void blk_stat_enable_accounting(struct request_queue *q);
+void blk_stat_disable_accounting(struct request_queue *q);
 
 /**
  * blk_stat_alloc_callback() - Allocate a block statistics callback.
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index fdd74a4df56f..70ff2a599ef6 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -433,6 +433,7 @@  static void kyber_exit_sched(struct elevator_queue *e)
 	int i;
 
 	del_timer_sync(&kqd->timer);
+	blk_stat_disable_accounting(kqd->q);
 
 	for (i = 0; i < KYBER_NUM_DOMAINS; i++)
 		sbitmap_queue_free(&kqd->domain_tokens[i]);