diff mbox series

[4/4] block/part_stat: add helper blk_account_io_merge_bio()

Message ID 158859906056.19958.10435750035306672420.stgit@buzz (mailing list archive)
State New, archived
Headers show
Series [1/4] block/part_stat: remove rcu_read_lock() from part_stat_lock() | expand

Commit Message

Konstantin Khlebnikov May 4, 2020, 1:31 p.m. UTC
Move non-"new_io" branch of blk_account_io_start() into separate function.
Fix merge accounting for discards (they were counted as write merges).

Also blk_account_io_merge_bio() doesn't call update_io_ticks() unlike to
blk_account_io_start(), there is no reason for that.

Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
---
 block/blk-core.c |   39 ++++++++++++++++++++++-----------------
 block/blk-exec.c |    2 +-
 block/blk-mq.c   |    2 +-
 block/blk.h      |    2 +-
 4 files changed, 25 insertions(+), 20 deletions(-)

Comments

Christoph Hellwig May 4, 2020, 2:06 p.m. UTC | #1
On Mon, May 04, 2020 at 04:31:04PM +0300, Konstantin Khlebnikov wrote:
> Move non-"new_io" branch of blk_account_io_start() into separate function.
> Fix merge accounting for discards (they were counted as write merges).
> 
> Also blk_account_io_merge_bio() doesn't call update_io_ticks() unlike to
> blk_account_io_start(), there is no reason for that.
> 
> Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>

Looks good,

Reviewed-by: Christoph Hellwig <hch@lst.de>

Nitpick below:

> +void blk_account_io_start(struct request *rq)
>  {
>  	struct hd_struct *part;
>  	int rw = rq_data_dir(rq);
>  
> +	if (blk_do_io_stat(rq)) {

part and rw probably should move inside this branch.
diff mbox series

Patch

diff --git a/block/blk-core.c b/block/blk-core.c
index 45ddf7238c06..18fb42eb2f18 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -622,6 +622,17 @@  void blk_put_request(struct request *req)
 }
 EXPORT_SYMBOL(blk_put_request);
 
+static void blk_account_io_merge_bio(struct request *req)
+{
+	if (blk_do_io_stat(req)) {
+		const int sgrp = op_stat_group(req_op(req));
+
+		part_stat_lock();
+		part_stat_inc(req->part, merges[sgrp]);
+		part_stat_unlock();
+	}
+}
+
 bool bio_attempt_back_merge(struct request *req, struct bio *bio,
 		unsigned int nr_segs)
 {
@@ -640,7 +651,7 @@  bool bio_attempt_back_merge(struct request *req, struct bio *bio,
 	req->biotail = bio;
 	req->__data_len += bio->bi_iter.bi_size;
 
-	blk_account_io_start(req, false);
+	blk_account_io_merge_bio(req);
 	return true;
 }
 
@@ -664,7 +675,7 @@  bool bio_attempt_front_merge(struct request *req, struct bio *bio,
 	req->__sector = bio->bi_iter.bi_sector;
 	req->__data_len += bio->bi_iter.bi_size;
 
-	blk_account_io_start(req, false);
+	blk_account_io_merge_bio(req);
 	return true;
 }
 
@@ -686,7 +697,7 @@  bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
 	req->__data_len += bio->bi_iter.bi_size;
 	req->nr_phys_segments = segments + 1;
 
-	blk_account_io_start(req, false);
+	blk_account_io_merge_bio(req);
 	return true;
 no_merge:
 	req_set_nomerge(q, req);
@@ -1258,7 +1269,7 @@  blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
 		return BLK_STS_IOERR;
 
 	if (blk_queue_io_stat(q))
-		blk_account_io_start(rq, true);
+		blk_account_io_start(rq);
 
 	/*
 	 * Since we have a scheduler attached on the top device,
@@ -1348,20 +1359,14 @@  void blk_account_io_done(struct request *req, u64 now)
 	}
 }
 
-void blk_account_io_start(struct request *rq, bool new_io)
+void blk_account_io_start(struct request *rq)
 {
 	struct hd_struct *part;
 	int rw = rq_data_dir(rq);
 
-	if (!blk_do_io_stat(rq))
-		return;
-
-	part_stat_lock();
+	if (blk_do_io_stat(rq)) {
+		part_stat_lock();
 
-	if (!new_io) {
-		part = rq->part;
-		part_stat_inc(part, merges[rw]);
-	} else {
 		rcu_read_lock();
 		part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
 		if (!hd_struct_try_get(part)) {
@@ -1378,13 +1383,13 @@  void blk_account_io_start(struct request *rq, bool new_io)
 		}
 		rcu_read_unlock();
 
-		part_inc_in_flight(rq->q, part, rw);
 		rq->part = part;
-	}
 
-	update_io_ticks(part, jiffies, false);
+		part_inc_in_flight(rq->q, part, rw);
+		update_io_ticks(part, jiffies, false);
 
-	part_stat_unlock();
+		part_stat_unlock();
+	}
 }
 
 /*
diff --git a/block/blk-exec.c b/block/blk-exec.c
index e20a852ae432..85324d53d072 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -55,7 +55,7 @@  void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
 	rq->rq_disk = bd_disk;
 	rq->end_io = done;
 
-	blk_account_io_start(rq, true);
+	blk_account_io_start(rq);
 
 	/*
 	 * don't check dying flag for MQ because the request won't
diff --git a/block/blk-mq.c b/block/blk-mq.c
index bcc3a2397d4a..049c4f9417c3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1794,7 +1794,7 @@  static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
 	rq->write_hint = bio->bi_write_hint;
 	blk_rq_bio_prep(rq, bio, nr_segs);
 
-	blk_account_io_start(rq, true);
+	blk_account_io_start(rq);
 }
 
 static blk_status_t __blk_mq_issue_directly(struct blk_mq_hw_ctx *hctx,
diff --git a/block/blk.h b/block/blk.h
index 73bd3b1c6938..06cd57cc10fb 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -195,7 +195,7 @@  bool bio_attempt_discard_merge(struct request_queue *q, struct request *req,
 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
 		unsigned int nr_segs, struct request **same_queue_rq);
 
-void blk_account_io_start(struct request *req, bool new_io);
+void blk_account_io_start(struct request *req);
 void blk_account_io_completion(struct request *req, unsigned int bytes);
 void blk_account_io_done(struct request *req, u64 now);