diff mbox series

[1/7] blk-throttle: Rename tg_may_dispatch() to tg_dispatch_time()

Message ID 20250414132731.167620-2-wozizhi@huawei.com (mailing list archive)
State New
Headers show
Series blk-throttle: Split the blkthrotl queue to solve the IO delay issue | expand

Commit Message

Zizhi Wo April 14, 2025, 1:27 p.m. UTC
tg_may_dispatch() can directly indicate whether bio can be dispatched by
returning the time to wait, without the need for the redundant "wait"
parameter. Remove it and modify the function's return type accordingly.

Since we have determined by the return time whether bio can be dispatched,
rename tg_may_dispatch() to tg_dispatch_time().

Signed-off-by: Zizhi Wo <wozizhi@huawei.com>
---
 block/blk-throttle.c | 40 +++++++++++++++-------------------------
 1 file changed, 15 insertions(+), 25 deletions(-)

Comments

Yu Kuai April 15, 2025, 1:46 a.m. UTC | #1
在 2025/04/14 21:27, Zizhi Wo 写道:
> tg_may_dispatch() can directly indicate whether bio can be dispatched by
> returning the time to wait, without the need for the redundant "wait"
> parameter. Remove it and modify the function's return type accordingly.
> 
> Since we have determined by the return time whether bio can be dispatched,
> rename tg_may_dispatch() to tg_dispatch_time().
> 
> Signed-off-by: Zizhi Wo <wozizhi@huawei.com>
> ---
>   block/blk-throttle.c | 40 +++++++++++++++-------------------------
>   1 file changed, 15 insertions(+), 25 deletions(-)
> 
LGTM
Reviewed-by: Yu Kuai <yukuai3@huawei.com>

> diff --git a/block/blk-throttle.c b/block/blk-throttle.c
> index 91dab43c65ab..dc23c961c028 100644
> --- a/block/blk-throttle.c
> +++ b/block/blk-throttle.c
> @@ -743,14 +743,13 @@ static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
>   }
>   
>   /*
> - * Returns whether one can dispatch a bio or not. Also returns approx number
> - * of jiffies to wait before this bio is with-in IO rate and can be dispatched
> + * Returns approx number of jiffies to wait before this bio is with-in IO rate
> + * and can be dispatched.
>    */
> -static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
> -			    unsigned long *wait)
> +static unsigned long tg_dispatch_time(struct throtl_grp *tg, struct bio *bio)
>   {
>   	bool rw = bio_data_dir(bio);
> -	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
> +	unsigned long bps_wait, iops_wait, max_wait;
>   	u64 bps_limit = tg_bps_limit(tg, rw);
>   	u32 iops_limit = tg_iops_limit(tg, rw);
>   
> @@ -765,11 +764,8 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
>   
>   	/* If tg->bps = -1, then BW is unlimited */
>   	if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) ||
> -	    tg->flags & THROTL_TG_CANCELING) {
> -		if (wait)
> -			*wait = 0;
> -		return true;
> -	}
> +	    tg->flags & THROTL_TG_CANCELING)
> +		return 0;
>   
>   	/*
>   	 * If previous slice expired, start a new one otherwise renew/extend
> @@ -789,21 +785,15 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
>   
>   	bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
>   	iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
> -	if (bps_wait + iops_wait == 0) {
> -		if (wait)
> -			*wait = 0;
> -		return true;
> -	}
> +	if (bps_wait + iops_wait == 0)
> +		return 0;
>   
>   	max_wait = max(bps_wait, iops_wait);
>   
> -	if (wait)
> -		*wait = max_wait;
> -
>   	if (time_before(tg->slice_end[rw], jiffies + max_wait))
>   		throtl_extend_slice(tg, rw, jiffies + max_wait);
>   
> -	return false;
> +	return max_wait;
>   }
>   
>   static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
> @@ -854,16 +844,16 @@ static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
>   static void tg_update_disptime(struct throtl_grp *tg)
>   {
>   	struct throtl_service_queue *sq = &tg->service_queue;
> -	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
> +	unsigned long read_wait = -1, write_wait = -1, min_wait, disptime;
>   	struct bio *bio;
>   
>   	bio = throtl_peek_queued(&sq->queued[READ]);
>   	if (bio)
> -		tg_may_dispatch(tg, bio, &read_wait);
> +		read_wait = tg_dispatch_time(tg, bio);
>   
>   	bio = throtl_peek_queued(&sq->queued[WRITE]);
>   	if (bio)
> -		tg_may_dispatch(tg, bio, &write_wait);
> +		write_wait = tg_dispatch_time(tg, bio);
>   
>   	min_wait = min(read_wait, write_wait);
>   	disptime = jiffies + min_wait;
> @@ -941,7 +931,7 @@ static int throtl_dispatch_tg(struct throtl_grp *tg)
>   	/* Try to dispatch 75% READS and 25% WRITES */
>   
>   	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
> -	       tg_may_dispatch(tg, bio, NULL)) {
> +	       tg_dispatch_time(tg, bio) == 0) {
>   
>   		tg_dispatch_one_bio(tg, READ);
>   		nr_reads++;
> @@ -951,7 +941,7 @@ static int throtl_dispatch_tg(struct throtl_grp *tg)
>   	}
>   
>   	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
> -	       tg_may_dispatch(tg, bio, NULL)) {
> +	       tg_dispatch_time(tg, bio) == 0) {
>   
>   		tg_dispatch_one_bio(tg, WRITE);
>   		nr_writes++;
> @@ -1610,7 +1600,7 @@ static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw)
>   	if (tg->service_queue.nr_queued[rw])
>   		return false;
>   
> -	return tg_may_dispatch(tg, bio, NULL);
> +	return tg_dispatch_time(tg, bio) == 0;
>   }
>   
>   bool __blk_throtl_bio(struct bio *bio)
>
diff mbox series

Patch

diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 91dab43c65ab..dc23c961c028 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -743,14 +743,13 @@  static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
 }
 
 /*
- * Returns whether one can dispatch a bio or not. Also returns approx number
- * of jiffies to wait before this bio is with-in IO rate and can be dispatched
+ * Returns approx number of jiffies to wait before this bio is with-in IO rate
+ * and can be dispatched.
  */
-static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
-			    unsigned long *wait)
+static unsigned long tg_dispatch_time(struct throtl_grp *tg, struct bio *bio)
 {
 	bool rw = bio_data_dir(bio);
-	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
+	unsigned long bps_wait, iops_wait, max_wait;
 	u64 bps_limit = tg_bps_limit(tg, rw);
 	u32 iops_limit = tg_iops_limit(tg, rw);
 
@@ -765,11 +764,8 @@  static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
 
 	/* If tg->bps = -1, then BW is unlimited */
 	if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) ||
-	    tg->flags & THROTL_TG_CANCELING) {
-		if (wait)
-			*wait = 0;
-		return true;
-	}
+	    tg->flags & THROTL_TG_CANCELING)
+		return 0;
 
 	/*
 	 * If previous slice expired, start a new one otherwise renew/extend
@@ -789,21 +785,15 @@  static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
 
 	bps_wait = tg_within_bps_limit(tg, bio, bps_limit);
 	iops_wait = tg_within_iops_limit(tg, bio, iops_limit);
-	if (bps_wait + iops_wait == 0) {
-		if (wait)
-			*wait = 0;
-		return true;
-	}
+	if (bps_wait + iops_wait == 0)
+		return 0;
 
 	max_wait = max(bps_wait, iops_wait);
 
-	if (wait)
-		*wait = max_wait;
-
 	if (time_before(tg->slice_end[rw], jiffies + max_wait))
 		throtl_extend_slice(tg, rw, jiffies + max_wait);
 
-	return false;
+	return max_wait;
 }
 
 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
@@ -854,16 +844,16 @@  static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
 static void tg_update_disptime(struct throtl_grp *tg)
 {
 	struct throtl_service_queue *sq = &tg->service_queue;
-	unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
+	unsigned long read_wait = -1, write_wait = -1, min_wait, disptime;
 	struct bio *bio;
 
 	bio = throtl_peek_queued(&sq->queued[READ]);
 	if (bio)
-		tg_may_dispatch(tg, bio, &read_wait);
+		read_wait = tg_dispatch_time(tg, bio);
 
 	bio = throtl_peek_queued(&sq->queued[WRITE]);
 	if (bio)
-		tg_may_dispatch(tg, bio, &write_wait);
+		write_wait = tg_dispatch_time(tg, bio);
 
 	min_wait = min(read_wait, write_wait);
 	disptime = jiffies + min_wait;
@@ -941,7 +931,7 @@  static int throtl_dispatch_tg(struct throtl_grp *tg)
 	/* Try to dispatch 75% READS and 25% WRITES */
 
 	while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
-	       tg_may_dispatch(tg, bio, NULL)) {
+	       tg_dispatch_time(tg, bio) == 0) {
 
 		tg_dispatch_one_bio(tg, READ);
 		nr_reads++;
@@ -951,7 +941,7 @@  static int throtl_dispatch_tg(struct throtl_grp *tg)
 	}
 
 	while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
-	       tg_may_dispatch(tg, bio, NULL)) {
+	       tg_dispatch_time(tg, bio) == 0) {
 
 		tg_dispatch_one_bio(tg, WRITE);
 		nr_writes++;
@@ -1610,7 +1600,7 @@  static bool tg_within_limit(struct throtl_grp *tg, struct bio *bio, bool rw)
 	if (tg->service_queue.nr_queued[rw])
 		return false;
 
-	return tg_may_dispatch(tg, bio, NULL);
+	return tg_dispatch_time(tg, bio) == 0;
 }
 
 bool __blk_throtl_bio(struct bio *bio)