@@ -1978,8 +1978,9 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
if (bfqd->in_service_queue &&
((bfqq_wants_to_preempt &&
bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) ||
- bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue) ||
- !bfq_better_to_idle(bfqd->in_service_queue)) &&
+ bfq_bfqq_higher_class_or_weight(bfqq, bfqd->in_service_queue) ||
+ (bfq_bfqq_wait_request(bfqq) &&
+ !bfq_better_to_idle(bfqd->in_service_queue))) &&
next_queue_may_preempt(bfqd))
bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
false, BFQQE_PREEMPTED);
Why we preempt in_service_queue when bfq_better_to_idle say no is (from comment above the preemption expiration): As for throughput, we ask bfq_better_to_idle() whether we still need to plug I/O dispatching. If bfq_better_to_idle() says no, then plugging is not needed any longer, either to boost throughput or to perserve service guarantees. Then the best option is to stop plugging I/O, as not doing so would certainly lower throughput. This preemption only makes sense that IO of in_service_queue is currently plugged and then there is a need to stop plugging. Make sure bfqq is plugged before bfq_better_to_idle check to improve this. Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> --- block/bfq-iosched.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)