@@ -597,6 +597,9 @@ void __bio_clone_fast(struct bio *bio, s
* so we don't set nor calculate new physical/hw segment counts here
*/
bio->bi_disk = bio_src->bi_disk;
+#ifdef CONFIG_BLK_DEV_THROTTLING
+ bio->bi_throttled_disk = bio_src->bi_throttled_disk;
+#endif
bio_set_flag(bio, BIO_CLONED);
bio->bi_opf = bio_src->bi_opf;
bio->bi_write_hint = bio_src->bi_write_hint;
@@ -1051,13 +1051,12 @@ static void throtl_charge_bio(struct thr
tg->last_io_disp[rw]++;
/*
- * BIO_THROTTLED is used to prevent the same bio to be throttled
+ * bi_throttled_disk is used to prevent the same bio to be throttled
* more than once as a throttled bio will go through blk-throtl the
* second time when it eventually gets issued. Set it when a bio
* is being charged to a tg.
*/
- if (!bio_flagged(bio, BIO_THROTTLED))
- bio_set_flag(bio, BIO_THROTTLED);
+ bio->bi_throttled_disk = bio->bi_disk;
}
/**
@@ -2131,8 +2130,11 @@ bool blk_throtl_bio(struct request_queue
WARN_ON_ONCE(!rcu_read_lock_held());
- /* see throtl_charge_bio() */
- if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
+ /*
+ * See throtl_charge_bio(). If a bio is throttled against a disk
+ * but remapped to other disk, we should throttle it again
+ */
+ if (bio->bi_throttled_disk == bio->bi_disk || !tg->has_rules[rw])
goto out;
spin_lock_irq(q->queue_lock);
@@ -2223,14 +2225,6 @@ again:
out_unlock:
spin_unlock_irq(q->queue_lock);
out:
- /*
- * As multiple blk-throtls may stack in the same issue path, we
- * don't want bios to leave with the flag set. Clear the flag if
- * being issued.
- */
- if (!throttled)
- bio_clear_flag(bio, BIO_THROTTLED);
-
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
if (throttled || !td->track_bio_latency)
bio->bi_issue_stat.stat |= SKIP_LATENCY;
@@ -90,6 +90,10 @@ struct bio {
void *bi_cg_private;
struct blk_issue_stat bi_issue_stat;
#endif
+#ifdef CONFIG_BLK_DEV_THROTTLING
+ /* record which disk the bio is throttled against */
+ struct gendisk *bi_throttled_disk;
+#endif
#endif
union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)