diff mbox series

[3/7] blk-throttle: Split throtl_charge_bio() into bps and iops functions

Message ID 20250414132731.167620-4-wozizhi@huawei.com (mailing list archive)
State New
Headers show
Series blk-throttle: Split the blkthrotl queue to solve the IO delay issue | expand

Commit Message

Zizhi Wo April 14, 2025, 1:27 p.m. UTC
Split throtl_charge_bio() to facilitate subsequent patches that will
separately charge bps and iops after queue separation.

Signed-off-by: Zizhi Wo <wozizhi@huawei.com>
---
 block/blk-throttle.c | 35 ++++++++++++++++++++---------------
 1 file changed, 20 insertions(+), 15 deletions(-)

Comments

Yu Kuai April 15, 2025, 2:22 a.m. UTC | #1
在 2025/04/14 21:27, Zizhi Wo 写道:
> Split throtl_charge_bio() to facilitate subsequent patches that will
> separately charge bps and iops after queue separation.
> 
> Signed-off-by: Zizhi Wo <wozizhi@huawei.com>
> ---
>   block/blk-throttle.c | 35 ++++++++++++++++++++---------------
>   1 file changed, 20 insertions(+), 15 deletions(-)
> 
LGTM
Reviewed-by: Yu Kuai <yukuai3@huawei.com>

> diff --git a/block/blk-throttle.c b/block/blk-throttle.c
> index 0633ae0cce90..91ee1c502b41 100644
> --- a/block/blk-throttle.c
> +++ b/block/blk-throttle.c
> @@ -736,6 +736,20 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
>   	return jiffy_wait;
>   }
>   
> +static void throtl_charge_bps_bio(struct throtl_grp *tg, struct bio *bio)
> +{
> +	unsigned int bio_size = throtl_bio_data_size(bio);
> +
> +	/* Charge the bio to the group */
> +	if (!bio_flagged(bio, BIO_BPS_THROTTLED))
> +		tg->bytes_disp[bio_data_dir(bio)] += bio_size;
> +}
> +
> +static void throtl_charge_iops_bio(struct throtl_grp *tg, struct bio *bio)
> +{
> +	tg->io_disp[bio_data_dir(bio)]++;
> +}
> +
>   /*
>    * If previous slice expired, start a new one otherwise renew/extend existing
>    * slice to make sure it is at least throtl_slice interval long since now.
> @@ -808,18 +822,6 @@ static unsigned long tg_dispatch_time(struct throtl_grp *tg, struct bio *bio)
>   	return max(bps_wait, iops_wait);
>   }
>   
> -static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
> -{
> -	bool rw = bio_data_dir(bio);
> -	unsigned int bio_size = throtl_bio_data_size(bio);
> -
> -	/* Charge the bio to the group */
> -	if (!bio_flagged(bio, BIO_BPS_THROTTLED))
> -		tg->bytes_disp[rw] += bio_size;
> -
> -	tg->io_disp[rw]++;
> -}
> -
>   /**
>    * throtl_add_bio_tg - add a bio to the specified throtl_grp
>    * @bio: bio to add
> @@ -906,7 +908,8 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
>   	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
>   	sq->nr_queued[rw]--;
>   
> -	throtl_charge_bio(tg, bio);
> +	throtl_charge_bps_bio(tg, bio);
> +	throtl_charge_iops_bio(tg, bio);
>   
>   	/*
>   	 * If our parent is another tg, we just need to transfer @bio to
> @@ -1633,7 +1636,8 @@ bool __blk_throtl_bio(struct bio *bio)
>   	while (true) {
>   		if (tg_within_limit(tg, bio, rw)) {
>   			/* within limits, let's charge and dispatch directly */
> -			throtl_charge_bio(tg, bio);
> +			throtl_charge_bps_bio(tg, bio);
> +			throtl_charge_iops_bio(tg, bio);
>   
>   			/*
>   			 * We need to trim slice even when bios are not being
> @@ -1656,7 +1660,8 @@ bool __blk_throtl_bio(struct bio *bio)
>   			 * control algorithm is adaptive, and extra IO bytes
>   			 * will be throttled for paying the debt
>   			 */
> -			throtl_charge_bio(tg, bio);
> +			throtl_charge_bps_bio(tg, bio);
> +			throtl_charge_iops_bio(tg, bio);
>   		} else {
>   			/* if above limits, break to queue */
>   			break;
>
diff mbox series

Patch

diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 0633ae0cce90..91ee1c502b41 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -736,6 +736,20 @@  static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
 	return jiffy_wait;
 }
 
+static void throtl_charge_bps_bio(struct throtl_grp *tg, struct bio *bio)
+{
+	unsigned int bio_size = throtl_bio_data_size(bio);
+
+	/* Charge the bio to the group */
+	if (!bio_flagged(bio, BIO_BPS_THROTTLED))
+		tg->bytes_disp[bio_data_dir(bio)] += bio_size;
+}
+
+static void throtl_charge_iops_bio(struct throtl_grp *tg, struct bio *bio)
+{
+	tg->io_disp[bio_data_dir(bio)]++;
+}
+
 /*
  * If previous slice expired, start a new one otherwise renew/extend existing
  * slice to make sure it is at least throtl_slice interval long since now.
@@ -808,18 +822,6 @@  static unsigned long tg_dispatch_time(struct throtl_grp *tg, struct bio *bio)
 	return max(bps_wait, iops_wait);
 }
 
-static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
-{
-	bool rw = bio_data_dir(bio);
-	unsigned int bio_size = throtl_bio_data_size(bio);
-
-	/* Charge the bio to the group */
-	if (!bio_flagged(bio, BIO_BPS_THROTTLED))
-		tg->bytes_disp[rw] += bio_size;
-
-	tg->io_disp[rw]++;
-}
-
 /**
  * throtl_add_bio_tg - add a bio to the specified throtl_grp
  * @bio: bio to add
@@ -906,7 +908,8 @@  static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
 	bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
 	sq->nr_queued[rw]--;
 
-	throtl_charge_bio(tg, bio);
+	throtl_charge_bps_bio(tg, bio);
+	throtl_charge_iops_bio(tg, bio);
 
 	/*
 	 * If our parent is another tg, we just need to transfer @bio to
@@ -1633,7 +1636,8 @@  bool __blk_throtl_bio(struct bio *bio)
 	while (true) {
 		if (tg_within_limit(tg, bio, rw)) {
 			/* within limits, let's charge and dispatch directly */
-			throtl_charge_bio(tg, bio);
+			throtl_charge_bps_bio(tg, bio);
+			throtl_charge_iops_bio(tg, bio);
 
 			/*
 			 * We need to trim slice even when bios are not being
@@ -1656,7 +1660,8 @@  bool __blk_throtl_bio(struct bio *bio)
 			 * control algorithm is adaptive, and extra IO bytes
 			 * will be throttled for paying the debt
 			 */
-			throtl_charge_bio(tg, bio);
+			throtl_charge_bps_bio(tg, bio);
+			throtl_charge_iops_bio(tg, bio);
 		} else {
 			/* if above limits, break to queue */
 			break;