diff mbox

[RFC] blk-throttle: add burst allowance.

Message ID 20171114231022.42961-1-khazhy@google.com (mailing list archive)
State New, archived
Headers show

Commit Message

Khazhy Kumykov Nov. 14, 2017, 11:10 p.m. UTC
Allows configuration additional bytes or ios before a throttle is
triggered.

This allows implementation of a bucket style rate-limit/throttle on a
block device. Previously, bursting to a device was limited to allowance
granted in a single throtl_slice (similar to a bucket with limit N and
refill rate N/slice).

Additional parameters bytes/io_burst_conf defined for tg, which define a
number of bytes/ios that must be depleted before throttling happens. A
tg that does not deplete this allowance functions as though it has no
configured limits. tgs earn additional allowance at rate defined by
bps/iops for the tg. Once a tg has *_disp > *_burst_conf, throttling
kicks in. If a tg is idle for a while, it will again have some burst
allowance before it gets throttled again.

slice_end for a tg is extended until io_disp/byte_disp would fall to 0,
when all "used" burst allowance would be earned back. trim_slice still
does progress slice_start as before and decrements *_disp as before, and
tgs continue to get bytes/ios in throtl_slice intervals.

Signed-off-by: Khazhismel Kumykov <khazhy@google.com>
---
 block/Kconfig        |  11 +++
 block/blk-throttle.c | 192 +++++++++++++++++++++++++++++++++++++++++++++++----
 2 files changed, 189 insertions(+), 14 deletions(-)

Comments

Khazhy Kumykov Nov. 14, 2017, 11:22 p.m. UTC | #1
(Botched the to line, sorry)
Shaohua Li Nov. 16, 2017, 4:50 p.m. UTC | #2
On Tue, Nov 14, 2017 at 03:10:22PM -0800, Khazhismel Kumykov wrote:
> Allows configuration additional bytes or ios before a throttle is
> triggered.
> 
> This allows implementation of a bucket style rate-limit/throttle on a
> block device. Previously, bursting to a device was limited to allowance
> granted in a single throtl_slice (similar to a bucket with limit N and
> refill rate N/slice).
> 
> Additional parameters bytes/io_burst_conf defined for tg, which define a
> number of bytes/ios that must be depleted before throttling happens. A
> tg that does not deplete this allowance functions as though it has no
> configured limits. tgs earn additional allowance at rate defined by
> bps/iops for the tg. Once a tg has *_disp > *_burst_conf, throttling
> kicks in. If a tg is idle for a while, it will again have some burst
> allowance before it gets throttled again.
> 
> slice_end for a tg is extended until io_disp/byte_disp would fall to 0,
> when all "used" burst allowance would be earned back. trim_slice still
> does progress slice_start as before and decrements *_disp as before, and
> tgs continue to get bytes/ios in throtl_slice intervals.

Can you describe why we need this? It would be great if you can describe the
usage model and an example. Does this work for io.low/io.max or both?

Thanks,
Shaohua
 
> Signed-off-by: Khazhismel Kumykov <khazhy@google.com>
> ---
>  block/Kconfig        |  11 +++
>  block/blk-throttle.c | 192 +++++++++++++++++++++++++++++++++++++++++++++++----
>  2 files changed, 189 insertions(+), 14 deletions(-)
> 
> diff --git a/block/Kconfig b/block/Kconfig
> index 28ec55752b68..fbd05b419f93 100644
> --- a/block/Kconfig
> +++ b/block/Kconfig
> @@ -128,6 +128,17 @@ config BLK_DEV_THROTTLING_LOW
>  
>  	Note, this is an experimental interface and could be changed someday.
>  
> +config BLK_DEV_THROTTLING_BURST
> +    bool "Block throttling .burst allowance interface"
> +    depends on BLK_DEV_THROTTLING
> +    default n
> +    ---help---
> +    Add .burst allowance for block throttling. Burst allowance allows for
> +    additional unthrottled usage, while still limiting speed for sustained
> +    usage.
> +
> +    If in doubt, say N.
> +
>  config BLK_CMDLINE_PARSER
>  	bool "Block device command line partition parser"
>  	default n
> diff --git a/block/blk-throttle.c b/block/blk-throttle.c
> index 96ad32623427..27c084312772 100644
> --- a/block/blk-throttle.c
> +++ b/block/blk-throttle.c
> @@ -157,6 +157,11 @@ struct throtl_grp {
>  	/* Number of bio's dispatched in current slice */
>  	unsigned int io_disp[2];
>  
> +#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
> +	uint64_t bytes_burst_conf[2];
> +	unsigned int io_burst_conf[2];
> +#endif
> +
>  	unsigned long last_low_overflow_time[2];
>  
>  	uint64_t last_bytes_disp[2];
> @@ -507,6 +512,12 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
>  	tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
>  	tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
>  	tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
> +#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
> +	tg->bytes_burst_conf[READ] = 0;
> +	tg->bytes_burst_conf[WRITE] = 0;
> +	tg->io_burst_conf[READ] = 0;
> +	tg->io_burst_conf[WRITE] = 0;
> +#endif
>  	/* LIMIT_LOW will have default value 0 */
>  
>  	tg->latency_target = DFL_LATENCY_TARGET;
> @@ -800,6 +811,26 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
>  		   tg->slice_end[rw], jiffies);
>  }
>  
> +/*
> + * When current slice should end.
> + *
> + * With CONFIG_BLK_DEV_THROTTLING_BURST, we will wait longer than min_wait
> + * for slice to recover used burst allowance. (*_disp -> 0). Setting slice_end
> + * before this would result in tg receiving additional burst allowance.
> + */
> +static inline unsigned long throtl_slice_wait(struct throtl_grp *tg, bool rw,
> +						unsigned long min_wait)
> +{
> +	unsigned long bytes_wait = 0, io_wait = 0;
> +#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
> +	if (tg->bytes_burst_conf[rw])
> +		bytes_wait = (tg->bytes_disp[rw] * HZ) / tg_bps_limit(tg, rw);
> +	if (tg->io_burst_conf[rw])
> +		io_wait = (tg->io_disp[rw] * HZ) / tg_iops_limit(tg, rw);
> +#endif
> +	return max(min_wait, max(bytes_wait, io_wait));
> +}
> +
>  static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
>  					unsigned long jiffy_end)
>  {
> @@ -849,7 +880,8 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
>  	 * is bad because it does not allow new slice to start.
>  	 */
>  
> -	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
> +	throtl_set_slice_end(tg, rw,
> +		jiffies + throtl_slice_wait(tg, rw, tg->td->throtl_slice));
>  
>  	time_elapsed = jiffies - tg->slice_start[rw];
>  
> @@ -889,7 +921,7 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
>  				  unsigned long *wait)
>  {
>  	bool rw = bio_data_dir(bio);
> -	unsigned int io_allowed;
> +	unsigned int io_allowed, io_disp;
>  	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
>  	u64 tmp;
>  
> @@ -908,6 +940,17 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
>  	 * have been trimmed.
>  	 */
>  
> +	io_disp = tg->io_disp[rw];
> +
> +#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
> +	if (tg->io_disp[rw] < tg->io_burst_conf[rw]) {
> +		if (wait)
> +			*wait = 0;
> +		return true;
> +	}
> +	io_disp -= tg->io_burst_conf[rw];
> +#endif
> +
>  	tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
>  	do_div(tmp, HZ);
>  
> @@ -916,14 +959,14 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
>  	else
>  		io_allowed = tmp;
>  
> -	if (tg->io_disp[rw] + 1 <= io_allowed) {
> +	if (io_disp + 1 <= io_allowed) {
>  		if (wait)
>  			*wait = 0;
>  		return true;
>  	}
>  
>  	/* Calc approx time to dispatch */
> -	jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
> +	jiffy_wait = ((io_disp + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
>  
>  	if (jiffy_wait > jiffy_elapsed)
>  		jiffy_wait = jiffy_wait - jiffy_elapsed;
> @@ -939,7 +982,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
>  				 unsigned long *wait)
>  {
>  	bool rw = bio_data_dir(bio);
> -	u64 bytes_allowed, extra_bytes, tmp;
> +	u64 bytes_allowed, extra_bytes, bytes_disp, tmp;
>  	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
>  	unsigned int bio_size = throtl_bio_data_size(bio);
>  
> @@ -951,18 +994,28 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
>  
>  	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
>  
> +	bytes_disp = tg->bytes_disp[rw];
> +#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
> +	if (tg->bytes_disp[rw] < tg->bytes_burst_conf[rw]) {
> +		if (wait)
> +			*wait = 0;
> +		return true;
> +	}
> +	bytes_disp -= tg->bytes_burst_conf[rw];
> +#endif
> +
>  	tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
>  	do_div(tmp, HZ);
>  	bytes_allowed = tmp;
>  
> -	if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
> +	if (bytes_disp + bio_size <= bytes_allowed) {
>  		if (wait)
>  			*wait = 0;
>  		return true;
>  	}
>  
>  	/* Calc approx time to dispatch */
> -	extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
> +	extra_bytes = bytes_disp + bio_size - bytes_allowed;
>  	jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
>  
>  	if (!jiffy_wait)
> @@ -986,7 +1039,7 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
>  			    unsigned long *wait)
>  {
>  	bool rw = bio_data_dir(bio);
> -	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
> +	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0, disp_time;
>  
>  	/*
>   	 * Currently whole state machine of group depends on first bio
> @@ -1015,10 +1068,10 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
>  	if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
>  		throtl_start_new_slice(tg, rw);
>  	else {
> -		if (time_before(tg->slice_end[rw],
> -		    jiffies + tg->td->throtl_slice))
> -			throtl_extend_slice(tg, rw,
> -				jiffies + tg->td->throtl_slice);
> +		disp_time = jiffies + throtl_slice_wait(
> +				tg, rw, tg->td->throtl_slice);
> +		if (time_before(tg->slice_end[rw], disp_time))
> +			throtl_extend_slice(tg, rw, disp_time);
>  	}
>  
>  	if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
> @@ -1033,8 +1086,9 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
>  	if (wait)
>  		*wait = max_wait;
>  
> -	if (time_before(tg->slice_end[rw], jiffies + max_wait))
> -		throtl_extend_slice(tg, rw, jiffies + max_wait);
> +	disp_time = jiffies + throtl_slice_wait(tg, rw, max_wait);
> +	if (time_before(tg->slice_end[rw], disp_time))
> +		throtl_extend_slice(tg, rw, disp_time);
>  
>  	return 0;
>  }
> @@ -1705,6 +1759,108 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
>  	return ret ?: nbytes;
>  }
>  
> +#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
> +static u64 tg_prfill_burst(struct seq_file *sf, struct blkg_policy_data *pd,
> +			   int data)
> +{
> +	struct throtl_grp *tg = pd_to_tg(pd);
> +	const char *dname = blkg_dev_name(pd->blkg);
> +	char bufs[4][21];
> +
> +	if (!dname)
> +		return 0;
> +
> +	if (tg->bytes_burst_conf[READ] == 0 &&
> +	    tg->bytes_burst_conf[WRITE] == 0 &&
> +	    tg->io_burst_conf[READ] == 0 &&
> +	    tg->io_burst_conf[WRITE] == 0)
> +		return 0;
> +
> +	snprintf(bufs[0], sizeof(bufs[0]), "%llu",
> +		tg->bytes_burst_conf[READ]);
> +	snprintf(bufs[1], sizeof(bufs[1]), "%llu",
> +		tg->bytes_burst_conf[WRITE]);
> +	snprintf(bufs[2], sizeof(bufs[2]), "%u",
> +		tg->io_burst_conf[READ]);
> +	snprintf(bufs[3], sizeof(bufs[3]), "%u",
> +		tg->io_burst_conf[WRITE]);
> +
> +	seq_printf(sf, "%s brbyte=%s bwbyte=%s brio=%s bwio=%s\n",
> +		   dname, bufs[0], bufs[1], bufs[2], bufs[3]);
> +	return 0;
> +}
> +
> +static int tg_print_burst(struct seq_file *sf, void *v)
> +{
> +	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_burst,
> +			  &blkcg_policy_throtl, 0, false);
> +	return 0;
> +}
> +
> +static ssize_t tg_set_burst(struct kernfs_open_file *of,
> +			  char *buf, size_t nbytes, loff_t off)
> +{
> +	struct blkcg *blkcg = css_to_blkcg(of_css(of));
> +	struct blkg_conf_ctx ctx;
> +	struct throtl_grp *tg;
> +	u64 v[4];
> +	int ret;
> +
> +	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
> +	if (ret)
> +		return ret;
> +
> +	tg = blkg_to_tg(ctx.blkg);
> +
> +	v[0] = tg->bytes_burst_conf[READ];
> +	v[1] = tg->bytes_burst_conf[WRITE];
> +	v[2] = tg->io_burst_conf[READ];
> +	v[3] = tg->io_burst_conf[WRITE];
> +
> +	while (true) {
> +		char tok[28];	/* bwbyte=18446744073709551616 */
> +		char *p;
> +		u64 val = U64_MAX;
> +		int len;
> +
> +		if (sscanf(ctx.body, "%27s%n", tok, &len) != 1)
> +			break;
> +		if (tok[0] == '\0')
> +			break;
> +		ctx.body += len;
> +
> +		ret = -EINVAL;
> +		p = tok;
> +		strsep(&p, "=");
> +		if (!p || (kstrtoull(p, 0, &val) != 0 && strcmp(p, "max")))
> +			goto out_finish;
> +
> +		ret = -EINVAL;
> +		if (!strcmp(tok, "brbyte"))
> +			v[0] = val;
> +		else if (!strcmp(tok, "bwbyte"))
> +			v[1] = val;
> +		else if (!strcmp(tok, "brio"))
> +			v[2] = min_t(u64, val, UINT_MAX);
> +		else if (!strcmp(tok, "bwio"))
> +			v[3] = min_t(u64, val, UINT_MAX);
> +		else
> +			goto out_finish;
> +	}
> +
> +	tg->bytes_burst_conf[READ] = v[0];
> +	tg->bytes_burst_conf[WRITE] = v[1];
> +	tg->io_burst_conf[READ] = v[2];
> +	tg->io_burst_conf[WRITE] = v[3];
> +
> +	tg_conf_updated(tg, false);
> +	ret = 0;
> +out_finish:
> +	blkg_conf_finish(&ctx);
> +	return ret ?: nbytes;
> +}
> +#endif
> +
>  static struct cftype throtl_files[] = {
>  #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
>  	{
> @@ -1714,6 +1870,14 @@ static struct cftype throtl_files[] = {
>  		.write = tg_set_limit,
>  		.private = LIMIT_LOW,
>  	},
> +#endif
> +#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
> +	{
> +		.name = "burst",
> +		.flags = CFTYPE_NOT_ON_ROOT,
> +		.seq_show = tg_print_burst,
> +		.write = tg_set_burst,
> +	},
>  #endif
>  	{
>  		.name = "max",
> -- 
> 2.15.0.448.gf294e3d99a-goog
>
Khazhy Kumykov Nov. 17, 2017, 4:25 a.m. UTC | #3
On Thu, Nov 16, 2017 at 8:50 AM, Shaohua Li <shli@kernel.org> wrote:
> On Tue, Nov 14, 2017 at 03:10:22PM -0800, Khazhismel Kumykov wrote:
>> Allows configuration additional bytes or ios before a throttle is
>> triggered.
>>
>> This allows implementation of a bucket style rate-limit/throttle on a
>> block device. Previously, bursting to a device was limited to allowance
>> granted in a single throtl_slice (similar to a bucket with limit N and
>> refill rate N/slice).
>>
>> Additional parameters bytes/io_burst_conf defined for tg, which define a
>> number of bytes/ios that must be depleted before throttling happens. A
>> tg that does not deplete this allowance functions as though it has no
>> configured limits. tgs earn additional allowance at rate defined by
>> bps/iops for the tg. Once a tg has *_disp > *_burst_conf, throttling
>> kicks in. If a tg is idle for a while, it will again have some burst
>> allowance before it gets throttled again.
>>
>> slice_end for a tg is extended until io_disp/byte_disp would fall to 0,
>> when all "used" burst allowance would be earned back. trim_slice still
>> does progress slice_start as before and decrements *_disp as before, and
>> tgs continue to get bytes/ios in throtl_slice intervals.
>
> Can you describe why we need this? It would be great if you can describe the
> usage model and an example. Does this work for io.low/io.max or both?
>
> Thanks,
> Shaohua
>

Use case that brought this up was configuring limits for a remote
shared device. Bursting beyond io.max is desired but only for so much
before the limit kicks in, afterwards with sustained usage throughput
is capped. (This proactively avoids remote-side limits). In that case
one would configure in a root container io.max + io.burst, and
configure low/other limits on descendants sharing the resource on the
same node.

With this patch, so long as tg has not dispatched more than the burst,
no limit is applied at all by that tg, including limit imposed by
io.low in tg_iops_limit, etc.

Khazhy

>> Signed-off-by: Khazhismel Kumykov <khazhy@google.com>
>> ---
>>  block/Kconfig        |  11 +++
>>  block/blk-throttle.c | 192 +++++++++++++++++++++++++++++++++++++++++++++++----
>>  2 files changed, 189 insertions(+), 14 deletions(-)
>>
>> diff --git a/block/Kconfig b/block/Kconfig
>> index 28ec55752b68..fbd05b419f93 100644
>> --- a/block/Kconfig
>> +++ b/block/Kconfig
>> @@ -128,6 +128,17 @@ config BLK_DEV_THROTTLING_LOW
>>
>>       Note, this is an experimental interface and could be changed someday.
>>
>> +config BLK_DEV_THROTTLING_BURST
>> +    bool "Block throttling .burst allowance interface"
>> +    depends on BLK_DEV_THROTTLING
>> +    default n
>> +    ---help---
>> +    Add .burst allowance for block throttling. Burst allowance allows for
>> +    additional unthrottled usage, while still limiting speed for sustained
>> +    usage.
>> +
>> +    If in doubt, say N.
>> +
>>  config BLK_CMDLINE_PARSER
>>       bool "Block device command line partition parser"
>>       default n
>> diff --git a/block/blk-throttle.c b/block/blk-throttle.c
>> index 96ad32623427..27c084312772 100644
>> --- a/block/blk-throttle.c
>> +++ b/block/blk-throttle.c
>> @@ -157,6 +157,11 @@ struct throtl_grp {
>>       /* Number of bio's dispatched in current slice */
>>       unsigned int io_disp[2];
>>
>> +#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
>> +     uint64_t bytes_burst_conf[2];
>> +     unsigned int io_burst_conf[2];
>> +#endif
>> +
>>       unsigned long last_low_overflow_time[2];
>>
>>       uint64_t last_bytes_disp[2];
>> @@ -507,6 +512,12 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
>>       tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
>>       tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
>>       tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
>> +#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
>> +     tg->bytes_burst_conf[READ] = 0;
>> +     tg->bytes_burst_conf[WRITE] = 0;
>> +     tg->io_burst_conf[READ] = 0;
>> +     tg->io_burst_conf[WRITE] = 0;
>> +#endif
>>       /* LIMIT_LOW will have default value 0 */
>>
>>       tg->latency_target = DFL_LATENCY_TARGET;
>> @@ -800,6 +811,26 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
>>                  tg->slice_end[rw], jiffies);
>>  }
>>
>> +/*
>> + * When current slice should end.
>> + *
>> + * With CONFIG_BLK_DEV_THROTTLING_BURST, we will wait longer than min_wait
>> + * for slice to recover used burst allowance. (*_disp -> 0). Setting slice_end
>> + * before this would result in tg receiving additional burst allowance.
>> + */
>> +static inline unsigned long throtl_slice_wait(struct throtl_grp *tg, bool rw,
>> +                                             unsigned long min_wait)
>> +{
>> +     unsigned long bytes_wait = 0, io_wait = 0;
>> +#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
>> +     if (tg->bytes_burst_conf[rw])
>> +             bytes_wait = (tg->bytes_disp[rw] * HZ) / tg_bps_limit(tg, rw);
>> +     if (tg->io_burst_conf[rw])
>> +             io_wait = (tg->io_disp[rw] * HZ) / tg_iops_limit(tg, rw);
>> +#endif
>> +     return max(min_wait, max(bytes_wait, io_wait));
>> +}
>> +
>>  static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
>>                                       unsigned long jiffy_end)
>>  {
>> @@ -849,7 +880,8 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
>>        * is bad because it does not allow new slice to start.
>>        */
>>
>> -     throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
>> +     throtl_set_slice_end(tg, rw,
>> +             jiffies + throtl_slice_wait(tg, rw, tg->td->throtl_slice));
>>
>>       time_elapsed = jiffies - tg->slice_start[rw];
>>
>> @@ -889,7 +921,7 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
>>                                 unsigned long *wait)
>>  {
>>       bool rw = bio_data_dir(bio);
>> -     unsigned int io_allowed;
>> +     unsigned int io_allowed, io_disp;
>>       unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
>>       u64 tmp;
>>
>> @@ -908,6 +940,17 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
>>        * have been trimmed.
>>        */
>>
>> +     io_disp = tg->io_disp[rw];
>> +
>> +#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
>> +     if (tg->io_disp[rw] < tg->io_burst_conf[rw]) {
>> +             if (wait)
>> +                     *wait = 0;
>> +             return true;
>> +     }
>> +     io_disp -= tg->io_burst_conf[rw];
>> +#endif
>> +
>>       tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
>>       do_div(tmp, HZ);
>>
>> @@ -916,14 +959,14 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
>>       else
>>               io_allowed = tmp;
>>
>> -     if (tg->io_disp[rw] + 1 <= io_allowed) {
>> +     if (io_disp + 1 <= io_allowed) {
>>               if (wait)
>>                       *wait = 0;
>>               return true;
>>       }
>>
>>       /* Calc approx time to dispatch */
>> -     jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
>> +     jiffy_wait = ((io_disp + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
>>
>>       if (jiffy_wait > jiffy_elapsed)
>>               jiffy_wait = jiffy_wait - jiffy_elapsed;
>> @@ -939,7 +982,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
>>                                unsigned long *wait)
>>  {
>>       bool rw = bio_data_dir(bio);
>> -     u64 bytes_allowed, extra_bytes, tmp;
>> +     u64 bytes_allowed, extra_bytes, bytes_disp, tmp;
>>       unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
>>       unsigned int bio_size = throtl_bio_data_size(bio);
>>
>> @@ -951,18 +994,28 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
>>
>>       jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
>>
>> +     bytes_disp = tg->bytes_disp[rw];
>> +#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
>> +     if (tg->bytes_disp[rw] < tg->bytes_burst_conf[rw]) {
>> +             if (wait)
>> +                     *wait = 0;
>> +             return true;
>> +     }
>> +     bytes_disp -= tg->bytes_burst_conf[rw];
>> +#endif
>> +
>>       tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
>>       do_div(tmp, HZ);
>>       bytes_allowed = tmp;
>>
>> -     if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
>> +     if (bytes_disp + bio_size <= bytes_allowed) {
>>               if (wait)
>>                       *wait = 0;
>>               return true;
>>       }
>>
>>       /* Calc approx time to dispatch */
>> -     extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
>> +     extra_bytes = bytes_disp + bio_size - bytes_allowed;
>>       jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
>>
>>       if (!jiffy_wait)
>> @@ -986,7 +1039,7 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
>>                           unsigned long *wait)
>>  {
>>       bool rw = bio_data_dir(bio);
>> -     unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
>> +     unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0, disp_time;
>>
>>       /*
>>        * Currently whole state machine of group depends on first bio
>> @@ -1015,10 +1068,10 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
>>       if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
>>               throtl_start_new_slice(tg, rw);
>>       else {
>> -             if (time_before(tg->slice_end[rw],
>> -                 jiffies + tg->td->throtl_slice))
>> -                     throtl_extend_slice(tg, rw,
>> -                             jiffies + tg->td->throtl_slice);
>> +             disp_time = jiffies + throtl_slice_wait(
>> +                             tg, rw, tg->td->throtl_slice);
>> +             if (time_before(tg->slice_end[rw], disp_time))
>> +                     throtl_extend_slice(tg, rw, disp_time);
>>       }
>>
>>       if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
>> @@ -1033,8 +1086,9 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
>>       if (wait)
>>               *wait = max_wait;
>>
>> -     if (time_before(tg->slice_end[rw], jiffies + max_wait))
>> -             throtl_extend_slice(tg, rw, jiffies + max_wait);
>> +     disp_time = jiffies + throtl_slice_wait(tg, rw, max_wait);
>> +     if (time_before(tg->slice_end[rw], disp_time))
>> +             throtl_extend_slice(tg, rw, disp_time);
>>
>>       return 0;
>>  }
>> @@ -1705,6 +1759,108 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
>>       return ret ?: nbytes;
>>  }
>>
>> +#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
>> +static u64 tg_prfill_burst(struct seq_file *sf, struct blkg_policy_data *pd,
>> +                        int data)
>> +{
>> +     struct throtl_grp *tg = pd_to_tg(pd);
>> +     const char *dname = blkg_dev_name(pd->blkg);
>> +     char bufs[4][21];
>> +
>> +     if (!dname)
>> +             return 0;
>> +
>> +     if (tg->bytes_burst_conf[READ] == 0 &&
>> +         tg->bytes_burst_conf[WRITE] == 0 &&
>> +         tg->io_burst_conf[READ] == 0 &&
>> +         tg->io_burst_conf[WRITE] == 0)
>> +             return 0;
>> +
>> +     snprintf(bufs[0], sizeof(bufs[0]), "%llu",
>> +             tg->bytes_burst_conf[READ]);
>> +     snprintf(bufs[1], sizeof(bufs[1]), "%llu",
>> +             tg->bytes_burst_conf[WRITE]);
>> +     snprintf(bufs[2], sizeof(bufs[2]), "%u",
>> +             tg->io_burst_conf[READ]);
>> +     snprintf(bufs[3], sizeof(bufs[3]), "%u",
>> +             tg->io_burst_conf[WRITE]);
>> +
>> +     seq_printf(sf, "%s brbyte=%s bwbyte=%s brio=%s bwio=%s\n",
>> +                dname, bufs[0], bufs[1], bufs[2], bufs[3]);
>> +     return 0;
>> +}
>> +
>> +static int tg_print_burst(struct seq_file *sf, void *v)
>> +{
>> +     blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_burst,
>> +                       &blkcg_policy_throtl, 0, false);
>> +     return 0;
>> +}
>> +
>> +static ssize_t tg_set_burst(struct kernfs_open_file *of,
>> +                       char *buf, size_t nbytes, loff_t off)
>> +{
>> +     struct blkcg *blkcg = css_to_blkcg(of_css(of));
>> +     struct blkg_conf_ctx ctx;
>> +     struct throtl_grp *tg;
>> +     u64 v[4];
>> +     int ret;
>> +
>> +     ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
>> +     if (ret)
>> +             return ret;
>> +
>> +     tg = blkg_to_tg(ctx.blkg);
>> +
>> +     v[0] = tg->bytes_burst_conf[READ];
>> +     v[1] = tg->bytes_burst_conf[WRITE];
>> +     v[2] = tg->io_burst_conf[READ];
>> +     v[3] = tg->io_burst_conf[WRITE];
>> +
>> +     while (true) {
>> +             char tok[28];   /* bwbyte=18446744073709551616 */
>> +             char *p;
>> +             u64 val = U64_MAX;
>> +             int len;
>> +
>> +             if (sscanf(ctx.body, "%27s%n", tok, &len) != 1)
>> +                     break;
>> +             if (tok[0] == '\0')
>> +                     break;
>> +             ctx.body += len;
>> +
>> +             ret = -EINVAL;
>> +             p = tok;
>> +             strsep(&p, "=");
>> +             if (!p || (kstrtoull(p, 0, &val) != 0 && strcmp(p, "max")))
>> +                     goto out_finish;
>> +
>> +             ret = -EINVAL;
>> +             if (!strcmp(tok, "brbyte"))
>> +                     v[0] = val;
>> +             else if (!strcmp(tok, "bwbyte"))
>> +                     v[1] = val;
>> +             else if (!strcmp(tok, "brio"))
>> +                     v[2] = min_t(u64, val, UINT_MAX);
>> +             else if (!strcmp(tok, "bwio"))
>> +                     v[3] = min_t(u64, val, UINT_MAX);
>> +             else
>> +                     goto out_finish;
>> +     }
>> +
>> +     tg->bytes_burst_conf[READ] = v[0];
>> +     tg->bytes_burst_conf[WRITE] = v[1];
>> +     tg->io_burst_conf[READ] = v[2];
>> +     tg->io_burst_conf[WRITE] = v[3];
>> +
>> +     tg_conf_updated(tg, false);
>> +     ret = 0;
>> +out_finish:
>> +     blkg_conf_finish(&ctx);
>> +     return ret ?: nbytes;
>> +}
>> +#endif
>> +
>>  static struct cftype throtl_files[] = {
>>  #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
>>       {
>> @@ -1714,6 +1870,14 @@ static struct cftype throtl_files[] = {
>>               .write = tg_set_limit,
>>               .private = LIMIT_LOW,
>>       },
>> +#endif
>> +#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
>> +     {
>> +             .name = "burst",
>> +             .flags = CFTYPE_NOT_ON_ROOT,
>> +             .seq_show = tg_print_burst,
>> +             .write = tg_set_burst,
>> +     },
>>  #endif
>>       {
>>               .name = "max",
>> --
>> 2.15.0.448.gf294e3d99a-goog
>>
>
>
Shaohua Li Nov. 17, 2017, 7:26 p.m. UTC | #4
On Thu, Nov 16, 2017 at 08:25:58PM -0800, Khazhismel Kumykov wrote:
> On Thu, Nov 16, 2017 at 8:50 AM, Shaohua Li <shli@kernel.org> wrote:
> > On Tue, Nov 14, 2017 at 03:10:22PM -0800, Khazhismel Kumykov wrote:
> >> Allows configuration additional bytes or ios before a throttle is
> >> triggered.
> >>
> >> This allows implementation of a bucket style rate-limit/throttle on a
> >> block device. Previously, bursting to a device was limited to allowance
> >> granted in a single throtl_slice (similar to a bucket with limit N and
> >> refill rate N/slice).
> >>
> >> Additional parameters bytes/io_burst_conf defined for tg, which define a
> >> number of bytes/ios that must be depleted before throttling happens. A
> >> tg that does not deplete this allowance functions as though it has no
> >> configured limits. tgs earn additional allowance at rate defined by
> >> bps/iops for the tg. Once a tg has *_disp > *_burst_conf, throttling
> >> kicks in. If a tg is idle for a while, it will again have some burst
> >> allowance before it gets throttled again.
> >>
> >> slice_end for a tg is extended until io_disp/byte_disp would fall to 0,
> >> when all "used" burst allowance would be earned back. trim_slice still
> >> does progress slice_start as before and decrements *_disp as before, and
> >> tgs continue to get bytes/ios in throtl_slice intervals.
> >
> > Can you describe why we need this? It would be great if you can describe the
> > usage model and an example. Does this work for io.low/io.max or both?
> >
> > Thanks,
> > Shaohua
> >
> 
> Use case that brought this up was configuring limits for a remote
> shared device. Bursting beyond io.max is desired but only for so much
> before the limit kicks in, afterwards with sustained usage throughput
> is capped. (This proactively avoids remote-side limits). In that case
> one would configure in a root container io.max + io.burst, and
> configure low/other limits on descendants sharing the resource on the
> same node.
> 
> With this patch, so long as tg has not dispatched more than the burst,
> no limit is applied at all by that tg, including limit imposed by
> io.low in tg_iops_limit, etc.

I'd appreciate if you can give more details about the 'why'. 'configuring
limits for a remote shared device' doesn't justify the change.
Khazhy Kumykov Nov. 21, 2017, 4:36 a.m. UTC | #5
On Fri, Nov 17, 2017 at 11:26 AM, Shaohua Li <shli@kernel.org> wrote:
> On Thu, Nov 16, 2017 at 08:25:58PM -0800, Khazhismel Kumykov wrote:
>> On Thu, Nov 16, 2017 at 8:50 AM, Shaohua Li <shli@kernel.org> wrote:
>> > On Tue, Nov 14, 2017 at 03:10:22PM -0800, Khazhismel Kumykov wrote:
>> >> Allows configuration additional bytes or ios before a throttle is
>> >> triggered.
>> >>
>> >> This allows implementation of a bucket style rate-limit/throttle on a
>> >> block device. Previously, bursting to a device was limited to allowance
>> >> granted in a single throtl_slice (similar to a bucket with limit N and
>> >> refill rate N/slice).
>> >>
>> >> Additional parameters bytes/io_burst_conf defined for tg, which define a
>> >> number of bytes/ios that must be depleted before throttling happens. A
>> >> tg that does not deplete this allowance functions as though it has no
>> >> configured limits. tgs earn additional allowance at rate defined by
>> >> bps/iops for the tg. Once a tg has *_disp > *_burst_conf, throttling
>> >> kicks in. If a tg is idle for a while, it will again have some burst
>> >> allowance before it gets throttled again.
>> >>
>> >> slice_end for a tg is extended until io_disp/byte_disp would fall to 0,
>> >> when all "used" burst allowance would be earned back. trim_slice still
>> >> does progress slice_start as before and decrements *_disp as before, and
>> >> tgs continue to get bytes/ios in throtl_slice intervals.
>> >
>> > Can you describe why we need this? It would be great if you can describe the
>> > usage model and an example. Does this work for io.low/io.max or both?
>> >
>> > Thanks,
>> > Shaohua
>> >
>>
>> Use case that brought this up was configuring limits for a remote
>> shared device. Bursting beyond io.max is desired but only for so much
>> before the limit kicks in, afterwards with sustained usage throughput
>> is capped. (This proactively avoids remote-side limits). In that case
>> one would configure in a root container io.max + io.burst, and
>> configure low/other limits on descendants sharing the resource on the
>> same node.
>>
>> With this patch, so long as tg has not dispatched more than the burst,
>> no limit is applied at all by that tg, including limit imposed by
>> io.low in tg_iops_limit, etc.
>
> I'd appreciate if you can give more details about the 'why'. 'configuring
> limits for a remote shared device' doesn't justify the change.

This is to configure a bursty workload (and associated device) with
known/allowed expected burst size, but to not allow full utilization
of the device for extended periods of time for QoS. During idle or low
use periods the burst allowance accrues, and then tasks can burst well
beyond the configured throttle up to the limit, afterwards is
throttled. A constant throttle speed isn't sufficient for this as you
can only burst 1 slice worth, but a limit of sorts is desirable for
preventing over utilization of the shared device. This type of limit
is also slightly different than what i understand io.low does in local
cases in that tg is only high priority/unthrottled if it is bursty,
and is limited with constant usage

Khazhy
Khazhy Kumykov Dec. 7, 2017, 12:41 a.m. UTC | #6
On Mon, Nov 20, 2017 at 8:36 PM, Khazhismel Kumykov <khazhy@google.com> wrote:
> On Fri, Nov 17, 2017 at 11:26 AM, Shaohua Li <shli@kernel.org> wrote:
>> On Thu, Nov 16, 2017 at 08:25:58PM -0800, Khazhismel Kumykov wrote:
>>> On Thu, Nov 16, 2017 at 8:50 AM, Shaohua Li <shli@kernel.org> wrote:
>>> > On Tue, Nov 14, 2017 at 03:10:22PM -0800, Khazhismel Kumykov wrote:
>>> >> Allows configuration additional bytes or ios before a throttle is
>>> >> triggered.
>>> >>
>>> >> This allows implementation of a bucket style rate-limit/throttle on a
>>> >> block device. Previously, bursting to a device was limited to allowance
>>> >> granted in a single throtl_slice (similar to a bucket with limit N and
>>> >> refill rate N/slice).
>>> >>
>>> >> Additional parameters bytes/io_burst_conf defined for tg, which define a
>>> >> number of bytes/ios that must be depleted before throttling happens. A
>>> >> tg that does not deplete this allowance functions as though it has no
>>> >> configured limits. tgs earn additional allowance at rate defined by
>>> >> bps/iops for the tg. Once a tg has *_disp > *_burst_conf, throttling
>>> >> kicks in. If a tg is idle for a while, it will again have some burst
>>> >> allowance before it gets throttled again.
>>> >>
>>> >> slice_end for a tg is extended until io_disp/byte_disp would fall to 0,
>>> >> when all "used" burst allowance would be earned back. trim_slice still
>>> >> does progress slice_start as before and decrements *_disp as before, and
>>> >> tgs continue to get bytes/ios in throtl_slice intervals.
>>> >
>>> > Can you describe why we need this? It would be great if you can describe the
>>> > usage model and an example. Does this work for io.low/io.max or both?
>>> >
>>> > Thanks,
>>> > Shaohua
>>> >
>>>
>>> Use case that brought this up was configuring limits for a remote
>>> shared device. Bursting beyond io.max is desired but only for so much
>>> before the limit kicks in, afterwards with sustained usage throughput
>>> is capped. (This proactively avoids remote-side limits). In that case
>>> one would configure in a root container io.max + io.burst, and
>>> configure low/other limits on descendants sharing the resource on the
>>> same node.
>>>
>>> With this patch, so long as tg has not dispatched more than the burst,
>>> no limit is applied at all by that tg, including limit imposed by
>>> io.low in tg_iops_limit, etc.
>>
>> I'd appreciate if you can give more details about the 'why'. 'configuring
>> limits for a remote shared device' doesn't justify the change.
>
> This is to configure a bursty workload (and associated device) with
> known/allowed expected burst size, but to not allow full utilization
> of the device for extended periods of time for QoS. During idle or low
> use periods the burst allowance accrues, and then tasks can burst well
> beyond the configured throttle up to the limit, afterwards is
> throttled. A constant throttle speed isn't sufficient for this as you
> can only burst 1 slice worth, but a limit of sorts is desirable for
> preventing over utilization of the shared device. This type of limit
> is also slightly different than what i understand io.low does in local
> cases in that tg is only high priority/unthrottled if it is bursty,
> and is limited with constant usage
>
> Khazhy
Ping this time without html (oops)
Vivek Goyal Dec. 7, 2017, 7:43 p.m. UTC | #7
On Thu, Nov 16, 2017 at 08:50:33AM -0800, Shaohua Li wrote:

[..]
> Can you describe why we need this? It would be great if you can describe the
> usage model and an example. Does this work for io.low/io.max or both?

Hi Shaohua,

Is there any documentation for "io.low" somewhere now. Should we update
cgroup-v2.txt? Just reading through BLK_DEV_THROTTLING_LOW description
does not give me enough idea to know what it is.

Thanks
Vivek
Khazhy Kumykov Dec. 18, 2017, 6:16 p.m. UTC | #8
On Mon, Nov 20, 2017 at 8:36 PM, Khazhismel Kumykov <khazhy@google.com> wrote:
> On Fri, Nov 17, 2017 at 11:26 AM, Shaohua Li <shli@kernel.org> wrote:
>> On Thu, Nov 16, 2017 at 08:25:58PM -0800, Khazhismel Kumykov wrote:
>>> On Thu, Nov 16, 2017 at 8:50 AM, Shaohua Li <shli@kernel.org> wrote:
>>> > On Tue, Nov 14, 2017 at 03:10:22PM -0800, Khazhismel Kumykov wrote:
>>> >> Allows configuration additional bytes or ios before a throttle is
>>> >> triggered.
>>> >>
>>> >> This allows implementation of a bucket style rate-limit/throttle on a
>>> >> block device. Previously, bursting to a device was limited to allowance
>>> >> granted in a single throtl_slice (similar to a bucket with limit N and
>>> >> refill rate N/slice).
>>> >>
>>> >> Additional parameters bytes/io_burst_conf defined for tg, which define a
>>> >> number of bytes/ios that must be depleted before throttling happens. A
>>> >> tg that does not deplete this allowance functions as though it has no
>>> >> configured limits. tgs earn additional allowance at rate defined by
>>> >> bps/iops for the tg. Once a tg has *_disp > *_burst_conf, throttling
>>> >> kicks in. If a tg is idle for a while, it will again have some burst
>>> >> allowance before it gets throttled again.
>>> >>
>>> >> slice_end for a tg is extended until io_disp/byte_disp would fall to 0,
>>> >> when all "used" burst allowance would be earned back. trim_slice still
>>> >> does progress slice_start as before and decrements *_disp as before, and
>>> >> tgs continue to get bytes/ios in throtl_slice intervals.
>>> >
>>> > Can you describe why we need this? It would be great if you can describe the
>>> > usage model and an example. Does this work for io.low/io.max or both?
>>> >
>>> > Thanks,
>>> > Shaohua
>>> >
>>>
>>> Use case that brought this up was configuring limits for a remote
>>> shared device. Bursting beyond io.max is desired but only for so much
>>> before the limit kicks in, afterwards with sustained usage throughput
>>> is capped. (This proactively avoids remote-side limits). In that case
>>> one would configure in a root container io.max + io.burst, and
>>> configure low/other limits on descendants sharing the resource on the
>>> same node.
>>>
>>> With this patch, so long as tg has not dispatched more than the burst,
>>> no limit is applied at all by that tg, including limit imposed by
>>> io.low in tg_iops_limit, etc.
>>
>> I'd appreciate if you can give more details about the 'why'. 'configuring
>> limits for a remote shared device' doesn't justify the change.
>
> This is to configure a bursty workload (and associated device) with
> known/allowed expected burst size, but to not allow full utilization
> of the device for extended periods of time for QoS. During idle or low
> use periods the burst allowance accrues, and then tasks can burst well
> beyond the configured throttle up to the limit, afterwards is
> throttled. A constant throttle speed isn't sufficient for this as you
> can only burst 1 slice worth, but a limit of sorts is desirable for
> preventing over utilization of the shared device. This type of limit
> is also slightly different than what i understand io.low does in local
> cases in that tg is only high priority/unthrottled if it is bursty,
> and is limited with constant usage
>
> Khazhy

Hi Shaohua,

Does this clarify the reason for this patch? Is this (or something
similar) a good fit for inclusion in blk-throttle?

Thanks,
Khazhy
Vivek Goyal Dec. 18, 2017, 6:29 p.m. UTC | #9
On Mon, Dec 18, 2017 at 10:16:02AM -0800, Khazhismel Kumykov wrote:
> On Mon, Nov 20, 2017 at 8:36 PM, Khazhismel Kumykov <khazhy@google.com> wrote:
> > On Fri, Nov 17, 2017 at 11:26 AM, Shaohua Li <shli@kernel.org> wrote:
> >> On Thu, Nov 16, 2017 at 08:25:58PM -0800, Khazhismel Kumykov wrote:
> >>> On Thu, Nov 16, 2017 at 8:50 AM, Shaohua Li <shli@kernel.org> wrote:
> >>> > On Tue, Nov 14, 2017 at 03:10:22PM -0800, Khazhismel Kumykov wrote:
> >>> >> Allows configuration additional bytes or ios before a throttle is
> >>> >> triggered.
> >>> >>
> >>> >> This allows implementation of a bucket style rate-limit/throttle on a
> >>> >> block device. Previously, bursting to a device was limited to allowance
> >>> >> granted in a single throtl_slice (similar to a bucket with limit N and
> >>> >> refill rate N/slice).
> >>> >>
> >>> >> Additional parameters bytes/io_burst_conf defined for tg, which define a
> >>> >> number of bytes/ios that must be depleted before throttling happens. A
> >>> >> tg that does not deplete this allowance functions as though it has no
> >>> >> configured limits. tgs earn additional allowance at rate defined by
> >>> >> bps/iops for the tg. Once a tg has *_disp > *_burst_conf, throttling
> >>> >> kicks in. If a tg is idle for a while, it will again have some burst
> >>> >> allowance before it gets throttled again.
> >>> >>
> >>> >> slice_end for a tg is extended until io_disp/byte_disp would fall to 0,
> >>> >> when all "used" burst allowance would be earned back. trim_slice still
> >>> >> does progress slice_start as before and decrements *_disp as before, and
> >>> >> tgs continue to get bytes/ios in throtl_slice intervals.
> >>> >
> >>> > Can you describe why we need this? It would be great if you can describe the
> >>> > usage model and an example. Does this work for io.low/io.max or both?
> >>> >
> >>> > Thanks,
> >>> > Shaohua
> >>> >
> >>>
> >>> Use case that brought this up was configuring limits for a remote
> >>> shared device. Bursting beyond io.max is desired but only for so much
> >>> before the limit kicks in, afterwards with sustained usage throughput
> >>> is capped. (This proactively avoids remote-side limits). In that case
> >>> one would configure in a root container io.max + io.burst, and
> >>> configure low/other limits on descendants sharing the resource on the
> >>> same node.
> >>>
> >>> With this patch, so long as tg has not dispatched more than the burst,
> >>> no limit is applied at all by that tg, including limit imposed by
> >>> io.low in tg_iops_limit, etc.
> >>
> >> I'd appreciate if you can give more details about the 'why'. 'configuring
> >> limits for a remote shared device' doesn't justify the change.
> >
> > This is to configure a bursty workload (and associated device) with
> > known/allowed expected burst size, but to not allow full utilization
> > of the device for extended periods of time for QoS. During idle or low
> > use periods the burst allowance accrues, and then tasks can burst well
> > beyond the configured throttle up to the limit, afterwards is
> > throttled. A constant throttle speed isn't sufficient for this as you
> > can only burst 1 slice worth, but a limit of sorts is desirable for
> > preventing over utilization of the shared device. This type of limit
> > is also slightly different than what i understand io.low does in local
> > cases in that tg is only high priority/unthrottled if it is bursty,
> > and is limited with constant usage
> >
> > Khazhy
> 
> Hi Shaohua,
> 
> Does this clarify the reason for this patch? Is this (or something
> similar) a good fit for inclusion in blk-throttle?
> 

So does this brust have to be per cgroup. I mean if thortl_slice was
configurable, that will allow to control the size of burst. (Just that
it will be for all cgroups). If that works, that might be a simpler
solution.

Vivek
Khazhy Kumykov Dec. 18, 2017, 8:39 p.m. UTC | #10
On Mon, Dec 18, 2017 at 10:29 AM, Vivek Goyal <vgoyal@redhat.com> wrote:
> On Mon, Dec 18, 2017 at 10:16:02AM -0800, Khazhismel Kumykov wrote:
>> On Mon, Nov 20, 2017 at 8:36 PM, Khazhismel Kumykov <khazhy@google.com> wrote:
>> > On Fri, Nov 17, 2017 at 11:26 AM, Shaohua Li <shli@kernel.org> wrote:
>> >> On Thu, Nov 16, 2017 at 08:25:58PM -0800, Khazhismel Kumykov wrote:
>> >>> On Thu, Nov 16, 2017 at 8:50 AM, Shaohua Li <shli@kernel.org> wrote:
>> >>> > On Tue, Nov 14, 2017 at 03:10:22PM -0800, Khazhismel Kumykov wrote:
>> >>> >> Allows configuration additional bytes or ios before a throttle is
>> >>> >> triggered.
>> >>> >>
>> >>> >> This allows implementation of a bucket style rate-limit/throttle on a
>> >>> >> block device. Previously, bursting to a device was limited to allowance
>> >>> >> granted in a single throtl_slice (similar to a bucket with limit N and
>> >>> >> refill rate N/slice).
>> >>> >>
>> >>> >> Additional parameters bytes/io_burst_conf defined for tg, which define a
>> >>> >> number of bytes/ios that must be depleted before throttling happens. A
>> >>> >> tg that does not deplete this allowance functions as though it has no
>> >>> >> configured limits. tgs earn additional allowance at rate defined by
>> >>> >> bps/iops for the tg. Once a tg has *_disp > *_burst_conf, throttling
>> >>> >> kicks in. If a tg is idle for a while, it will again have some burst
>> >>> >> allowance before it gets throttled again.
>> >>> >>
>> >>> >> slice_end for a tg is extended until io_disp/byte_disp would fall to 0,
>> >>> >> when all "used" burst allowance would be earned back. trim_slice still
>> >>> >> does progress slice_start as before and decrements *_disp as before, and
>> >>> >> tgs continue to get bytes/ios in throtl_slice intervals.
>> >>> >
>> >>> > Can you describe why we need this? It would be great if you can describe the
>> >>> > usage model and an example. Does this work for io.low/io.max or both?
>> >>> >
>> >>> > Thanks,
>> >>> > Shaohua
>> >>> >
>> >>>
>> >>> Use case that brought this up was configuring limits for a remote
>> >>> shared device. Bursting beyond io.max is desired but only for so much
>> >>> before the limit kicks in, afterwards with sustained usage throughput
>> >>> is capped. (This proactively avoids remote-side limits). In that case
>> >>> one would configure in a root container io.max + io.burst, and
>> >>> configure low/other limits on descendants sharing the resource on the
>> >>> same node.
>> >>>
>> >>> With this patch, so long as tg has not dispatched more than the burst,
>> >>> no limit is applied at all by that tg, including limit imposed by
>> >>> io.low in tg_iops_limit, etc.
>> >>
>> >> I'd appreciate if you can give more details about the 'why'. 'configuring
>> >> limits for a remote shared device' doesn't justify the change.
>> >
>> > This is to configure a bursty workload (and associated device) with
>> > known/allowed expected burst size, but to not allow full utilization
>> > of the device for extended periods of time for QoS. During idle or low
>> > use periods the burst allowance accrues, and then tasks can burst well
>> > beyond the configured throttle up to the limit, afterwards is
>> > throttled. A constant throttle speed isn't sufficient for this as you
>> > can only burst 1 slice worth, but a limit of sorts is desirable for
>> > preventing over utilization of the shared device. This type of limit
>> > is also slightly different than what i understand io.low does in local
>> > cases in that tg is only high priority/unthrottled if it is bursty,
>> > and is limited with constant usage
>> >
>> > Khazhy
>>
>> Hi Shaohua,
>>
>> Does this clarify the reason for this patch? Is this (or something
>> similar) a good fit for inclusion in blk-throttle?
>>
>
> So does this brust have to be per cgroup. I mean if thortl_slice was
> configurable, that will allow to control the size of burst. (Just that
> it will be for all cgroups). If that works, that might be a simpler
> solution.
>
> Vivek

The purpose for this configuration vs. increasing throtl_slice is the
behavior when the burst runs out. io/bytes allowance is given in
intervals of throtl_slice, so for long throtl_slice for those devices
that exceed the limit will see extended periods with no IO, rather
than at throttled speed. With this once burst is run out, since the
burst allowance is on top of the throttle, the device can continue to
be used more smoothly at the configured throttled speed. For this we
do want a throttle group with both the "steady state" rate + the burst
amount, and we get cgroup support with that.

I notice with cgroupv2 io, it seems no longer to configure a
device-wide throttle group e.g. on the root cgroup. (and putting
restrictions on root cgroup isn't an option) For something like this,
it does make sense to want to configure just for the device, vs. per
cgroup, perhaps there is somewhere better it would fit than as cgroup
option? perhaps have configuration on device node for a throttle group
for the device?

Khazhy
Vivek Goyal Dec. 18, 2017, 9:01 p.m. UTC | #11
On Mon, Dec 18, 2017 at 12:39:50PM -0800, Khazhismel Kumykov wrote:
> On Mon, Dec 18, 2017 at 10:29 AM, Vivek Goyal <vgoyal@redhat.com> wrote:
> > On Mon, Dec 18, 2017 at 10:16:02AM -0800, Khazhismel Kumykov wrote:
> >> On Mon, Nov 20, 2017 at 8:36 PM, Khazhismel Kumykov <khazhy@google.com> wrote:
> >> > On Fri, Nov 17, 2017 at 11:26 AM, Shaohua Li <shli@kernel.org> wrote:
> >> >> On Thu, Nov 16, 2017 at 08:25:58PM -0800, Khazhismel Kumykov wrote:
> >> >>> On Thu, Nov 16, 2017 at 8:50 AM, Shaohua Li <shli@kernel.org> wrote:
> >> >>> > On Tue, Nov 14, 2017 at 03:10:22PM -0800, Khazhismel Kumykov wrote:
> >> >>> >> Allows configuration additional bytes or ios before a throttle is
> >> >>> >> triggered.
> >> >>> >>
> >> >>> >> This allows implementation of a bucket style rate-limit/throttle on a
> >> >>> >> block device. Previously, bursting to a device was limited to allowance
> >> >>> >> granted in a single throtl_slice (similar to a bucket with limit N and
> >> >>> >> refill rate N/slice).
> >> >>> >>
> >> >>> >> Additional parameters bytes/io_burst_conf defined for tg, which define a
> >> >>> >> number of bytes/ios that must be depleted before throttling happens. A
> >> >>> >> tg that does not deplete this allowance functions as though it has no
> >> >>> >> configured limits. tgs earn additional allowance at rate defined by
> >> >>> >> bps/iops for the tg. Once a tg has *_disp > *_burst_conf, throttling
> >> >>> >> kicks in. If a tg is idle for a while, it will again have some burst
> >> >>> >> allowance before it gets throttled again.
> >> >>> >>
> >> >>> >> slice_end for a tg is extended until io_disp/byte_disp would fall to 0,
> >> >>> >> when all "used" burst allowance would be earned back. trim_slice still
> >> >>> >> does progress slice_start as before and decrements *_disp as before, and
> >> >>> >> tgs continue to get bytes/ios in throtl_slice intervals.
> >> >>> >
> >> >>> > Can you describe why we need this? It would be great if you can describe the
> >> >>> > usage model and an example. Does this work for io.low/io.max or both?
> >> >>> >
> >> >>> > Thanks,
> >> >>> > Shaohua
> >> >>> >
> >> >>>
> >> >>> Use case that brought this up was configuring limits for a remote
> >> >>> shared device. Bursting beyond io.max is desired but only for so much
> >> >>> before the limit kicks in, afterwards with sustained usage throughput
> >> >>> is capped. (This proactively avoids remote-side limits). In that case
> >> >>> one would configure in a root container io.max + io.burst, and
> >> >>> configure low/other limits on descendants sharing the resource on the
> >> >>> same node.
> >> >>>
> >> >>> With this patch, so long as tg has not dispatched more than the burst,
> >> >>> no limit is applied at all by that tg, including limit imposed by
> >> >>> io.low in tg_iops_limit, etc.
> >> >>
> >> >> I'd appreciate if you can give more details about the 'why'. 'configuring
> >> >> limits for a remote shared device' doesn't justify the change.
> >> >
> >> > This is to configure a bursty workload (and associated device) with
> >> > known/allowed expected burst size, but to not allow full utilization
> >> > of the device for extended periods of time for QoS. During idle or low
> >> > use periods the burst allowance accrues, and then tasks can burst well
> >> > beyond the configured throttle up to the limit, afterwards is
> >> > throttled. A constant throttle speed isn't sufficient for this as you
> >> > can only burst 1 slice worth, but a limit of sorts is desirable for
> >> > preventing over utilization of the shared device. This type of limit
> >> > is also slightly different than what i understand io.low does in local
> >> > cases in that tg is only high priority/unthrottled if it is bursty,
> >> > and is limited with constant usage
> >> >
> >> > Khazhy
> >>
> >> Hi Shaohua,
> >>
> >> Does this clarify the reason for this patch? Is this (or something
> >> similar) a good fit for inclusion in blk-throttle?
> >>
> >
> > So does this brust have to be per cgroup. I mean if thortl_slice was
> > configurable, that will allow to control the size of burst. (Just that
> > it will be for all cgroups). If that works, that might be a simpler
> > solution.
> >
> > Vivek
> 
> The purpose for this configuration vs. increasing throtl_slice is the
> behavior when the burst runs out. io/bytes allowance is given in
> intervals of throtl_slice, so for long throtl_slice for those devices
> that exceed the limit will see extended periods with no IO, rather
> than at throttled speed. With this once burst is run out, since the
> burst allowance is on top of the throttle, the device can continue to
> be used more smoothly at the configured throttled speed.

I thought that whole idea of burst is that there is some bursty IO which
will quickly finish. If workload expects a stedy state IO rate, then
why to allow a large burst to begin with.

So yes, increasing throtl slice will should allow you to dispatch a slice
worth of IO and then throttle the process. If burst has finished, and
process does another burst of IO, it will may be dispatch it immediately
too (Depending on where are we in slice at the time).

> For this we
> do want a throttle group with both the "steady state" rate + the burst
> amount, and we get cgroup support with that.

So if burst IO is on top of steady configured rate, how frequently you
allow burst IO?

> 
> I notice with cgroupv2 io, it seems no longer to configure a
> device-wide throttle group e.g. on the root cgroup. (and putting
> restrictions on root cgroup isn't an option) For something like this,
> it does make sense to want to configure just for the device, vs. per
> cgroup, perhaps there is somewhere better it would fit than as cgroup
> option? perhaps have configuration on device node for a throttle group
> for the device?

Default throtle slice value per device might be reasonable. Though not
perfect for the cases where people want same throtl slice value for
whole of the system.

Vivek
Khazhy Kumykov Dec. 18, 2017, 10:22 p.m. UTC | #12
On Mon, Dec 18, 2017 at 1:01 PM, Vivek Goyal <vgoyal@redhat.com> wrote:
> On Mon, Dec 18, 2017 at 12:39:50PM -0800, Khazhismel Kumykov wrote:
>> On Mon, Dec 18, 2017 at 10:29 AM, Vivek Goyal <vgoyal@redhat.com> wrote:
>> > On Mon, Dec 18, 2017 at 10:16:02AM -0800, Khazhismel Kumykov wrote:
>> >> On Mon, Nov 20, 2017 at 8:36 PM, Khazhismel Kumykov <khazhy@google.com> wrote:
>> >> > On Fri, Nov 17, 2017 at 11:26 AM, Shaohua Li <shli@kernel.org> wrote:
>> >> >> On Thu, Nov 16, 2017 at 08:25:58PM -0800, Khazhismel Kumykov wrote:
>> >> >>> On Thu, Nov 16, 2017 at 8:50 AM, Shaohua Li <shli@kernel.org> wrote:
>> >> >>> > On Tue, Nov 14, 2017 at 03:10:22PM -0800, Khazhismel Kumykov wrote:
>> >> >>> >> Allows configuration additional bytes or ios before a throttle is
>> >> >>> >> triggered.
>> >> >>> >>
>> >> >>> >> This allows implementation of a bucket style rate-limit/throttle on a
>> >> >>> >> block device. Previously, bursting to a device was limited to allowance
>> >> >>> >> granted in a single throtl_slice (similar to a bucket with limit N and
>> >> >>> >> refill rate N/slice).
>> >> >>> >>
>> >> >>> >> Additional parameters bytes/io_burst_conf defined for tg, which define a
>> >> >>> >> number of bytes/ios that must be depleted before throttling happens. A
>> >> >>> >> tg that does not deplete this allowance functions as though it has no
>> >> >>> >> configured limits. tgs earn additional allowance at rate defined by
>> >> >>> >> bps/iops for the tg. Once a tg has *_disp > *_burst_conf, throttling
>> >> >>> >> kicks in. If a tg is idle for a while, it will again have some burst
>> >> >>> >> allowance before it gets throttled again.
>> >> >>> >>
>> >> >>> >> slice_end for a tg is extended until io_disp/byte_disp would fall to 0,
>> >> >>> >> when all "used" burst allowance would be earned back. trim_slice still
>> >> >>> >> does progress slice_start as before and decrements *_disp as before, and
>> >> >>> >> tgs continue to get bytes/ios in throtl_slice intervals.
>> >> >>> >
>> >> >>> > Can you describe why we need this? It would be great if you can describe the
>> >> >>> > usage model and an example. Does this work for io.low/io.max or both?
>> >> >>> >
>> >> >>> > Thanks,
>> >> >>> > Shaohua
>> >> >>> >
>> >> >>>
>> >> >>> Use case that brought this up was configuring limits for a remote
>> >> >>> shared device. Bursting beyond io.max is desired but only for so much
>> >> >>> before the limit kicks in, afterwards with sustained usage throughput
>> >> >>> is capped. (This proactively avoids remote-side limits). In that case
>> >> >>> one would configure in a root container io.max + io.burst, and
>> >> >>> configure low/other limits on descendants sharing the resource on the
>> >> >>> same node.
>> >> >>>
>> >> >>> With this patch, so long as tg has not dispatched more than the burst,
>> >> >>> no limit is applied at all by that tg, including limit imposed by
>> >> >>> io.low in tg_iops_limit, etc.
>> >> >>
>> >> >> I'd appreciate if you can give more details about the 'why'. 'configuring
>> >> >> limits for a remote shared device' doesn't justify the change.
>> >> >
>> >> > This is to configure a bursty workload (and associated device) with
>> >> > known/allowed expected burst size, but to not allow full utilization
>> >> > of the device for extended periods of time for QoS. During idle or low
>> >> > use periods the burst allowance accrues, and then tasks can burst well
>> >> > beyond the configured throttle up to the limit, afterwards is
>> >> > throttled. A constant throttle speed isn't sufficient for this as you
>> >> > can only burst 1 slice worth, but a limit of sorts is desirable for
>> >> > preventing over utilization of the shared device. This type of limit
>> >> > is also slightly different than what i understand io.low does in local
>> >> > cases in that tg is only high priority/unthrottled if it is bursty,
>> >> > and is limited with constant usage
>> >> >
>> >> > Khazhy
>> >>
>> >> Hi Shaohua,
>> >>
>> >> Does this clarify the reason for this patch? Is this (or something
>> >> similar) a good fit for inclusion in blk-throttle?
>> >>
>> >
>> > So does this brust have to be per cgroup. I mean if thortl_slice was
>> > configurable, that will allow to control the size of burst. (Just that
>> > it will be for all cgroups). If that works, that might be a simpler
>> > solution.
>> >
>> > Vivek
>>
>> The purpose for this configuration vs. increasing throtl_slice is the
>> behavior when the burst runs out. io/bytes allowance is given in
>> intervals of throtl_slice, so for long throtl_slice for those devices
>> that exceed the limit will see extended periods with no IO, rather
>> than at throttled speed. With this once burst is run out, since the
>> burst allowance is on top of the throttle, the device can continue to
>> be used more smoothly at the configured throttled speed.
>
> I thought that whole idea of burst is that there is some bursty IO which
> will quickly finish. If workload expects a stedy state IO rate, then
> why to allow a large burst to begin with.
>
> So yes, increasing throtl slice will should allow you to dispatch a slice
> worth of IO and then throttle the process. If burst has finished, and
> process does another burst of IO, it will may be dispatch it immediately
> too (Depending on where are we in slice at the time).
>
>> For this we
>> do want a throttle group with both the "steady state" rate + the burst
>> amount, and we get cgroup support with that.
>
> So if burst IO is on top of steady configured rate, how frequently you
> allow burst IO?
>

This is mostly for the unexpected behavior: workload is expected to
have large burst(s) not exceeding the burst limit, and idle most of
the time, but isn't directly controlled by us, and in the case that it
does exceed limit/ is sustained we want to allow at throttled speed
rather than it cut off. e.g. device which allows for large burst that
refills over 60s, it is preferable to have a user workload which is
non-conformant/broken/whatnot to have consistent slower IOs rather
than being blocked on IO for potentially 60s at a time. The steady
state io rate is so we don't have a situation where the IO rate is 0
for a long time

>>
>> I notice with cgroupv2 io, it seems no longer to configure a
>> device-wide throttle group e.g. on the root cgroup. (and putting
>> restrictions on root cgroup isn't an option) For something like this,
>> it does make sense to want to configure just for the device, vs. per
>> cgroup, perhaps there is somewhere better it would fit than as cgroup
>> option? perhaps have configuration on device node for a throttle group
>> for the device?
>
> Default throtle slice value per device might be reasonable. Though not
> perfect for the cases where people want same throtl slice value for
> whole of the system.
>
throtl_slice is already per device as I see, with non/rotational
having different values, just isn't configurable.

> Vivek
diff mbox

Patch

diff --git a/block/Kconfig b/block/Kconfig
index 28ec55752b68..fbd05b419f93 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -128,6 +128,17 @@  config BLK_DEV_THROTTLING_LOW
 
 	Note, this is an experimental interface and could be changed someday.
 
+config BLK_DEV_THROTTLING_BURST
+    bool "Block throttling .burst allowance interface"
+    depends on BLK_DEV_THROTTLING
+    default n
+    ---help---
+    Add .burst allowance for block throttling. Burst allowance allows for
+    additional unthrottled usage, while still limiting speed for sustained
+    usage.
+
+    If in doubt, say N.
+
 config BLK_CMDLINE_PARSER
 	bool "Block device command line partition parser"
 	default n
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 96ad32623427..27c084312772 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -157,6 +157,11 @@  struct throtl_grp {
 	/* Number of bio's dispatched in current slice */
 	unsigned int io_disp[2];
 
+#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
+	uint64_t bytes_burst_conf[2];
+	unsigned int io_burst_conf[2];
+#endif
+
 	unsigned long last_low_overflow_time[2];
 
 	uint64_t last_bytes_disp[2];
@@ -507,6 +512,12 @@  static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
 	tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
 	tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
 	tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
+#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
+	tg->bytes_burst_conf[READ] = 0;
+	tg->bytes_burst_conf[WRITE] = 0;
+	tg->io_burst_conf[READ] = 0;
+	tg->io_burst_conf[WRITE] = 0;
+#endif
 	/* LIMIT_LOW will have default value 0 */
 
 	tg->latency_target = DFL_LATENCY_TARGET;
@@ -800,6 +811,26 @@  static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
 		   tg->slice_end[rw], jiffies);
 }
 
+/*
+ * When current slice should end.
+ *
+ * With CONFIG_BLK_DEV_THROTTLING_BURST, we will wait longer than min_wait
+ * for slice to recover used burst allowance. (*_disp -> 0). Setting slice_end
+ * before this would result in tg receiving additional burst allowance.
+ */
+static inline unsigned long throtl_slice_wait(struct throtl_grp *tg, bool rw,
+						unsigned long min_wait)
+{
+	unsigned long bytes_wait = 0, io_wait = 0;
+#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
+	if (tg->bytes_burst_conf[rw])
+		bytes_wait = (tg->bytes_disp[rw] * HZ) / tg_bps_limit(tg, rw);
+	if (tg->io_burst_conf[rw])
+		io_wait = (tg->io_disp[rw] * HZ) / tg_iops_limit(tg, rw);
+#endif
+	return max(min_wait, max(bytes_wait, io_wait));
+}
+
 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
 					unsigned long jiffy_end)
 {
@@ -849,7 +880,8 @@  static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
 	 * is bad because it does not allow new slice to start.
 	 */
 
-	throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
+	throtl_set_slice_end(tg, rw,
+		jiffies + throtl_slice_wait(tg, rw, tg->td->throtl_slice));
 
 	time_elapsed = jiffies - tg->slice_start[rw];
 
@@ -889,7 +921,7 @@  static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
 				  unsigned long *wait)
 {
 	bool rw = bio_data_dir(bio);
-	unsigned int io_allowed;
+	unsigned int io_allowed, io_disp;
 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 	u64 tmp;
 
@@ -908,6 +940,17 @@  static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
 	 * have been trimmed.
 	 */
 
+	io_disp = tg->io_disp[rw];
+
+#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
+	if (tg->io_disp[rw] < tg->io_burst_conf[rw]) {
+		if (wait)
+			*wait = 0;
+		return true;
+	}
+	io_disp -= tg->io_burst_conf[rw];
+#endif
+
 	tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
 	do_div(tmp, HZ);
 
@@ -916,14 +959,14 @@  static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
 	else
 		io_allowed = tmp;
 
-	if (tg->io_disp[rw] + 1 <= io_allowed) {
+	if (io_disp + 1 <= io_allowed) {
 		if (wait)
 			*wait = 0;
 		return true;
 	}
 
 	/* Calc approx time to dispatch */
-	jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
+	jiffy_wait = ((io_disp + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
 
 	if (jiffy_wait > jiffy_elapsed)
 		jiffy_wait = jiffy_wait - jiffy_elapsed;
@@ -939,7 +982,7 @@  static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
 				 unsigned long *wait)
 {
 	bool rw = bio_data_dir(bio);
-	u64 bytes_allowed, extra_bytes, tmp;
+	u64 bytes_allowed, extra_bytes, bytes_disp, tmp;
 	unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
 	unsigned int bio_size = throtl_bio_data_size(bio);
 
@@ -951,18 +994,28 @@  static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
 
 	jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
 
+	bytes_disp = tg->bytes_disp[rw];
+#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
+	if (tg->bytes_disp[rw] < tg->bytes_burst_conf[rw]) {
+		if (wait)
+			*wait = 0;
+		return true;
+	}
+	bytes_disp -= tg->bytes_burst_conf[rw];
+#endif
+
 	tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
 	do_div(tmp, HZ);
 	bytes_allowed = tmp;
 
-	if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
+	if (bytes_disp + bio_size <= bytes_allowed) {
 		if (wait)
 			*wait = 0;
 		return true;
 	}
 
 	/* Calc approx time to dispatch */
-	extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
+	extra_bytes = bytes_disp + bio_size - bytes_allowed;
 	jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
 
 	if (!jiffy_wait)
@@ -986,7 +1039,7 @@  static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
 			    unsigned long *wait)
 {
 	bool rw = bio_data_dir(bio);
-	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
+	unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0, disp_time;
 
 	/*
  	 * Currently whole state machine of group depends on first bio
@@ -1015,10 +1068,10 @@  static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
 	if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
 		throtl_start_new_slice(tg, rw);
 	else {
-		if (time_before(tg->slice_end[rw],
-		    jiffies + tg->td->throtl_slice))
-			throtl_extend_slice(tg, rw,
-				jiffies + tg->td->throtl_slice);
+		disp_time = jiffies + throtl_slice_wait(
+				tg, rw, tg->td->throtl_slice);
+		if (time_before(tg->slice_end[rw], disp_time))
+			throtl_extend_slice(tg, rw, disp_time);
 	}
 
 	if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
@@ -1033,8 +1086,9 @@  static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
 	if (wait)
 		*wait = max_wait;
 
-	if (time_before(tg->slice_end[rw], jiffies + max_wait))
-		throtl_extend_slice(tg, rw, jiffies + max_wait);
+	disp_time = jiffies + throtl_slice_wait(tg, rw, max_wait);
+	if (time_before(tg->slice_end[rw], disp_time))
+		throtl_extend_slice(tg, rw, disp_time);
 
 	return 0;
 }
@@ -1705,6 +1759,108 @@  static ssize_t tg_set_limit(struct kernfs_open_file *of,
 	return ret ?: nbytes;
 }
 
+#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
+static u64 tg_prfill_burst(struct seq_file *sf, struct blkg_policy_data *pd,
+			   int data)
+{
+	struct throtl_grp *tg = pd_to_tg(pd);
+	const char *dname = blkg_dev_name(pd->blkg);
+	char bufs[4][21];
+
+	if (!dname)
+		return 0;
+
+	if (tg->bytes_burst_conf[READ] == 0 &&
+	    tg->bytes_burst_conf[WRITE] == 0 &&
+	    tg->io_burst_conf[READ] == 0 &&
+	    tg->io_burst_conf[WRITE] == 0)
+		return 0;
+
+	snprintf(bufs[0], sizeof(bufs[0]), "%llu",
+		tg->bytes_burst_conf[READ]);
+	snprintf(bufs[1], sizeof(bufs[1]), "%llu",
+		tg->bytes_burst_conf[WRITE]);
+	snprintf(bufs[2], sizeof(bufs[2]), "%u",
+		tg->io_burst_conf[READ]);
+	snprintf(bufs[3], sizeof(bufs[3]), "%u",
+		tg->io_burst_conf[WRITE]);
+
+	seq_printf(sf, "%s brbyte=%s bwbyte=%s brio=%s bwio=%s\n",
+		   dname, bufs[0], bufs[1], bufs[2], bufs[3]);
+	return 0;
+}
+
+static int tg_print_burst(struct seq_file *sf, void *v)
+{
+	blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_burst,
+			  &blkcg_policy_throtl, 0, false);
+	return 0;
+}
+
+static ssize_t tg_set_burst(struct kernfs_open_file *of,
+			  char *buf, size_t nbytes, loff_t off)
+{
+	struct blkcg *blkcg = css_to_blkcg(of_css(of));
+	struct blkg_conf_ctx ctx;
+	struct throtl_grp *tg;
+	u64 v[4];
+	int ret;
+
+	ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
+	if (ret)
+		return ret;
+
+	tg = blkg_to_tg(ctx.blkg);
+
+	v[0] = tg->bytes_burst_conf[READ];
+	v[1] = tg->bytes_burst_conf[WRITE];
+	v[2] = tg->io_burst_conf[READ];
+	v[3] = tg->io_burst_conf[WRITE];
+
+	while (true) {
+		char tok[28];	/* bwbyte=18446744073709551616 */
+		char *p;
+		u64 val = U64_MAX;
+		int len;
+
+		if (sscanf(ctx.body, "%27s%n", tok, &len) != 1)
+			break;
+		if (tok[0] == '\0')
+			break;
+		ctx.body += len;
+
+		ret = -EINVAL;
+		p = tok;
+		strsep(&p, "=");
+		if (!p || (kstrtoull(p, 0, &val) != 0 && strcmp(p, "max")))
+			goto out_finish;
+
+		ret = -EINVAL;
+		if (!strcmp(tok, "brbyte"))
+			v[0] = val;
+		else if (!strcmp(tok, "bwbyte"))
+			v[1] = val;
+		else if (!strcmp(tok, "brio"))
+			v[2] = min_t(u64, val, UINT_MAX);
+		else if (!strcmp(tok, "bwio"))
+			v[3] = min_t(u64, val, UINT_MAX);
+		else
+			goto out_finish;
+	}
+
+	tg->bytes_burst_conf[READ] = v[0];
+	tg->bytes_burst_conf[WRITE] = v[1];
+	tg->io_burst_conf[READ] = v[2];
+	tg->io_burst_conf[WRITE] = v[3];
+
+	tg_conf_updated(tg, false);
+	ret = 0;
+out_finish:
+	blkg_conf_finish(&ctx);
+	return ret ?: nbytes;
+}
+#endif
+
 static struct cftype throtl_files[] = {
 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
 	{
@@ -1714,6 +1870,14 @@  static struct cftype throtl_files[] = {
 		.write = tg_set_limit,
 		.private = LIMIT_LOW,
 	},
+#endif
+#ifdef CONFIG_BLK_DEV_THROTTLING_BURST
+	{
+		.name = "burst",
+		.flags = CFTYPE_NOT_ON_ROOT,
+		.seq_show = tg_print_burst,
+		.write = tg_set_burst,
+	},
 #endif
 	{
 		.name = "max",