@@ -242,10 +242,11 @@ static void cryptodev_backend_throttle_timer_cb(void *opaque)
continue;
}
- throttle_account(&backend->ts, true, ret);
+ throttle_account(&backend->ts, THROTTLE_WRITE, ret);
cryptodev_backend_operation(backend, op_info);
if (throttle_enabled(&backend->tc) &&
- throttle_schedule_timer(&backend->ts, &backend->tt, true)) {
+ throttle_schedule_timer(&backend->ts, &backend->tt,
+ THROTTLE_WRITE)) {
break;
}
}
@@ -261,7 +262,7 @@ int cryptodev_backend_crypto_operation(
goto do_account;
}
- if (throttle_schedule_timer(&backend->ts, &backend->tt, true) ||
+ if (throttle_schedule_timer(&backend->ts, &backend->tt, THROTTLE_WRITE) ||
!QTAILQ_EMPTY(&backend->opinfos)) {
QTAILQ_INSERT_TAIL(&backend->opinfos, op_info, next);
return 0;
@@ -273,7 +274,7 @@ do_account:
return ret;
}
- throttle_account(&backend->ts, true, ret);
+ throttle_account(&backend->ts, THROTTLE_WRITE, ret);
return cryptodev_backend_operation(backend, op_info);
}
@@ -270,6 +270,7 @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
ThrottleState *ts = tgm->throttle_state;
ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts);
ThrottleTimers *tt = &tgm->throttle_timers;
+ ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
bool must_wait;
if (qatomic_read(&tgm->io_limits_disabled)) {
@@ -281,7 +282,7 @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm,
return true;
}
- must_wait = throttle_schedule_timer(ts, tt, is_write);
+ must_wait = throttle_schedule_timer(ts, tt, direction);
/* If a timer just got armed, set tgm as the current token */
if (must_wait) {
@@ -364,6 +365,7 @@ void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm
bool must_wait;
ThrottleGroupMember *token;
ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
+ ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
assert(bytes >= 0);
@@ -386,7 +388,7 @@ void coroutine_fn throttle_group_co_io_limits_intercept(ThrottleGroupMember *tgm
}
/* The I/O will be executed, so do the accounting */
- throttle_account(tgm->throttle_state, is_write, bytes);
+ throttle_account(tgm->throttle_state, direction, bytes);
/* Schedule the next request */
schedule_next_request(tgm, is_write);
@@ -97,16 +97,18 @@ void fsdev_throttle_init(FsThrottle *fst)
void coroutine_fn fsdev_co_throttle_request(FsThrottle *fst, bool is_write,
struct iovec *iov, int iovcnt)
{
+ ThrottleDirection direction = is_write ? THROTTLE_WRITE : THROTTLE_READ;
+
if (throttle_enabled(&fst->cfg)) {
- if (throttle_schedule_timer(&fst->ts, &fst->tt, is_write) ||
+ if (throttle_schedule_timer(&fst->ts, &fst->tt, direction) ||
!qemu_co_queue_empty(&fst->throttled_reqs[is_write])) {
qemu_co_queue_wait(&fst->throttled_reqs[is_write], NULL);
}
- throttle_account(&fst->ts, is_write, iov_size(iov, iovcnt));
+ throttle_account(&fst->ts, direction, iov_size(iov, iovcnt));
if (!qemu_co_queue_empty(&fst->throttled_reqs[is_write]) &&
- !throttle_schedule_timer(&fst->ts, &fst->tt, is_write)) {
+ !throttle_schedule_timer(&fst->ts, &fst->tt, direction)) {
qemu_co_queue_next(&fst->throttled_reqs[is_write]);
}
}
@@ -154,9 +154,10 @@ void throttle_config_init(ThrottleConfig *cfg);
/* usage */
bool throttle_schedule_timer(ThrottleState *ts,
ThrottleTimers *tt,
- bool is_write);
+ ThrottleDirection direction);
-void throttle_account(ThrottleState *ts, bool is_write, uint64_t size);
+void throttle_account(ThrottleState *ts, ThrottleDirection direction,
+ uint64_t size);
void throttle_limits_to_config(ThrottleLimits *arg, ThrottleConfig *cfg,
Error **errp);
void throttle_config_to_limits(ThrottleConfig *cfg, ThrottleLimits *var);
@@ -637,9 +637,9 @@ static bool do_test_accounting(bool is_ops, /* are we testing bps or ops */
throttle_config(&ts, QEMU_CLOCK_VIRTUAL, &cfg);
/* account a read */
- throttle_account(&ts, false, size);
+ throttle_account(&ts, THROTTLE_READ, size);
/* account a write */
- throttle_account(&ts, true, size);
+ throttle_account(&ts, THROTTLE_WRITE, size);
/* check total result */
index = to_test[is_ops][0];
@@ -136,11 +136,11 @@ int64_t throttle_compute_wait(LeakyBucket *bkt)
/* This function compute the time that must be waited while this IO
*
- * @is_write: true if the current IO is a write, false if it's a read
+ * @direction: throttle direction
* @ret: time to wait
*/
static int64_t throttle_compute_wait_for(ThrottleState *ts,
- bool is_write)
+ ThrottleDirection direction)
{
BucketType to_check[2][4] = { {THROTTLE_BPS_TOTAL,
THROTTLE_OPS_TOTAL,
@@ -154,7 +154,7 @@ static int64_t throttle_compute_wait_for(ThrottleState *ts,
int i;
for (i = 0; i < 4; i++) {
- BucketType index = to_check[is_write][i];
+ BucketType index = to_check[direction][i];
wait = throttle_compute_wait(&ts->cfg.buckets[index]);
if (wait > max_wait) {
max_wait = wait;
@@ -166,13 +166,13 @@ static int64_t throttle_compute_wait_for(ThrottleState *ts,
/* compute the timer for this type of operation
*
- * @is_write: the type of operation
+ * @direction: throttle direction
* @now: the current clock timestamp
* @next_timestamp: the resulting timer
* @ret: true if a timer must be set
*/
static bool throttle_compute_timer(ThrottleState *ts,
- bool is_write,
+ ThrottleDirection direction,
int64_t now,
int64_t *next_timestamp)
{
@@ -182,7 +182,7 @@ static bool throttle_compute_timer(ThrottleState *ts,
throttle_do_leak(ts, now);
/* compute the wait time if any */
- wait = throttle_compute_wait_for(ts, is_write);
+ wait = throttle_compute_wait_for(ts, direction);
/* if the code must wait compute when the next timer should fire */
if (wait) {
@@ -425,23 +425,24 @@ void throttle_get_config(ThrottleState *ts, ThrottleConfig *cfg)
* NOTE: this function is not unit tested due to it's usage of timer_mod
*
* @tt: the timers structure
- * @is_write: the type of operation (read/write)
+ * @direction: throttle direction
* @ret: true if the timer has been scheduled else false
*/
bool throttle_schedule_timer(ThrottleState *ts,
ThrottleTimers *tt,
- bool is_write)
+ ThrottleDirection direction)
{
int64_t now = qemu_clock_get_ns(tt->clock_type);
int64_t next_timestamp;
QEMUTimer *timer;
bool must_wait;
- timer = is_write ? tt->timers[THROTTLE_WRITE] : tt->timers[THROTTLE_READ];
+ assert(direction < THROTTLE_MAX);
+ timer = tt->timers[direction];
assert(timer);
must_wait = throttle_compute_timer(ts,
- is_write,
+ direction,
now,
&next_timestamp);
@@ -462,10 +463,11 @@ bool throttle_schedule_timer(ThrottleState *ts,
/* do the accounting for this operation
*
- * @is_write: the type of operation (read/write)
+ * @direction: throttle direction
* @size: the size of the operation
*/
-void throttle_account(ThrottleState *ts, bool is_write, uint64_t size)
+void throttle_account(ThrottleState *ts, ThrottleDirection direction,
+ uint64_t size)
{
const BucketType bucket_types_size[2][2] = {
{ THROTTLE_BPS_TOTAL, THROTTLE_BPS_READ },
@@ -478,6 +480,7 @@ void throttle_account(ThrottleState *ts, bool is_write, uint64_t size)
double units = 1.0;
unsigned i;
+ assert(direction < THROTTLE_MAX);
/* if cfg.op_size is defined and smaller than size we compute unit count */
if (ts->cfg.op_size && size > ts->cfg.op_size) {
units = (double) size / ts->cfg.op_size;
@@ -486,13 +489,13 @@ void throttle_account(ThrottleState *ts, bool is_write, uint64_t size)
for (i = 0; i < 2; i++) {
LeakyBucket *bkt;
- bkt = &ts->cfg.buckets[bucket_types_size[is_write][i]];
+ bkt = &ts->cfg.buckets[bucket_types_size[direction][i]];
bkt->level += size;
if (bkt->burst_length > 1) {
bkt->burst_level += size;
}
- bkt = &ts->cfg.buckets[bucket_types_units[is_write][i]];
+ bkt = &ts->cfg.buckets[bucket_types_units[direction][i]];
bkt->level += units;
if (bkt->burst_length > 1) {
bkt->burst_level += units;