Message ID | 20210126145247.1964410-15-hch@lst.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [01/17] zonefs: use bio_alloc in zonefs_file_dio_append | expand |
On Tue, Jan 26, 2021 at 7:19 AM Christoph Hellwig <hch@lst.de> wrote: > > Refactor raid5_read_one_chunk so that all simple checks are done > before allocating the bio. > > Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Song Liu <song@kernel.org> Thanks for the clean-up! > --- > drivers/md/raid5.c | 108 +++++++++++++++++++-------------------------- > 1 file changed, 45 insertions(+), 63 deletions(-) > > diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c > index f411b9e5c332f4..a348b2adf2a9f9 100644 > --- a/drivers/md/raid5.c > +++ b/drivers/md/raid5.c > @@ -5393,90 +5393,72 @@ static void raid5_align_endio(struct bio *bi) > static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) > { > struct r5conf *conf = mddev->private; > - int dd_idx; > - struct bio* align_bi; > + struct bio *align_bio; > struct md_rdev *rdev; > - sector_t end_sector; > + sector_t sector, end_sector, first_bad; > + int bad_sectors, dd_idx; > > if (!in_chunk_boundary(mddev, raid_bio)) { > pr_debug("%s: non aligned\n", __func__); > return 0; > } > - /* > - * use bio_clone_fast to make a copy of the bio > - */ > - align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set); > - if (!align_bi) > - return 0; > - /* > - * set bi_end_io to a new function, and set bi_private to the > - * original bio. > - */ > - align_bi->bi_end_io = raid5_align_endio; > - align_bi->bi_private = raid_bio; > - /* > - * compute position > - */ > - align_bi->bi_iter.bi_sector = > - raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, > - 0, &dd_idx, NULL); > > - end_sector = bio_end_sector(align_bi); > + sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0, > + &dd_idx, NULL); > + end_sector = bio_end_sector(raid_bio); > + > rcu_read_lock(); > + if (r5c_big_stripe_cached(conf, sector)) > + goto out_rcu_unlock; > + > rdev = rcu_dereference(conf->disks[dd_idx].replacement); > if (!rdev || test_bit(Faulty, &rdev->flags) || > rdev->recovery_offset < end_sector) { > rdev = rcu_dereference(conf->disks[dd_idx].rdev); > - if (rdev && > - (test_bit(Faulty, &rdev->flags) || > + if (!rdev) > + goto out_rcu_unlock; > + if (test_bit(Faulty, &rdev->flags) || > !(test_bit(In_sync, &rdev->flags) || > - rdev->recovery_offset >= end_sector))) > - rdev = NULL; > + rdev->recovery_offset >= end_sector)) > + goto out_rcu_unlock; > } > > - if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) { > - rcu_read_unlock(); > - bio_put(align_bi); > + atomic_inc(&rdev->nr_pending); > + rcu_read_unlock(); > + > + align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set); > + bio_set_dev(align_bio, rdev->bdev); > + align_bio->bi_end_io = raid5_align_endio; > + align_bio->bi_private = raid_bio; > + align_bio->bi_iter.bi_sector = sector; > + > + raid_bio->bi_next = (void *)rdev; > + > + if (is_badblock(rdev, sector, bio_sectors(align_bio), &first_bad, > + &bad_sectors)) { > + bio_put(align_bio); > + rdev_dec_pending(rdev, mddev); > return 0; > } > > - if (rdev) { > - sector_t first_bad; > - int bad_sectors; > - > - atomic_inc(&rdev->nr_pending); > - rcu_read_unlock(); > - raid_bio->bi_next = (void*)rdev; > - bio_set_dev(align_bi, rdev->bdev); > - > - if (is_badblock(rdev, align_bi->bi_iter.bi_sector, > - bio_sectors(align_bi), > - &first_bad, &bad_sectors)) { > - bio_put(align_bi); > - rdev_dec_pending(rdev, mddev); > - return 0; > - } > + /* No reshape active, so we can trust rdev->data_offset */ > + align_bio->bi_iter.bi_sector += rdev->data_offset; > > - /* No reshape active, so we can trust rdev->data_offset */ > - align_bi->bi_iter.bi_sector += rdev->data_offset; > + spin_lock_irq(&conf->device_lock); > + wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0, > + conf->device_lock); > + atomic_inc(&conf->active_aligned_reads); > + spin_unlock_irq(&conf->device_lock); > > - spin_lock_irq(&conf->device_lock); > - wait_event_lock_irq(conf->wait_for_quiescent, > - conf->quiesce == 0, > - conf->device_lock); > - atomic_inc(&conf->active_aligned_reads); > - spin_unlock_irq(&conf->device_lock); > + if (mddev->gendisk) > + trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk), > + raid_bio->bi_iter.bi_sector); > + submit_bio_noacct(align_bio); > + return 1; > > - if (mddev->gendisk) > - trace_block_bio_remap(align_bi, disk_devt(mddev->gendisk), > - raid_bio->bi_iter.bi_sector); > - submit_bio_noacct(align_bi); > - return 1; > - } else { > - rcu_read_unlock(); > - bio_put(align_bi); > - return 0; > - } > +out_rcu_unlock: > + rcu_read_unlock(); > + return 0; > } > > static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) > -- > 2.29.2 >
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f411b9e5c332f4..a348b2adf2a9f9 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5393,90 +5393,72 @@ static void raid5_align_endio(struct bio *bi) static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) { struct r5conf *conf = mddev->private; - int dd_idx; - struct bio* align_bi; + struct bio *align_bio; struct md_rdev *rdev; - sector_t end_sector; + sector_t sector, end_sector, first_bad; + int bad_sectors, dd_idx; if (!in_chunk_boundary(mddev, raid_bio)) { pr_debug("%s: non aligned\n", __func__); return 0; } - /* - * use bio_clone_fast to make a copy of the bio - */ - align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set); - if (!align_bi) - return 0; - /* - * set bi_end_io to a new function, and set bi_private to the - * original bio. - */ - align_bi->bi_end_io = raid5_align_endio; - align_bi->bi_private = raid_bio; - /* - * compute position - */ - align_bi->bi_iter.bi_sector = - raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, - 0, &dd_idx, NULL); - end_sector = bio_end_sector(align_bi); + sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0, + &dd_idx, NULL); + end_sector = bio_end_sector(raid_bio); + rcu_read_lock(); + if (r5c_big_stripe_cached(conf, sector)) + goto out_rcu_unlock; + rdev = rcu_dereference(conf->disks[dd_idx].replacement); if (!rdev || test_bit(Faulty, &rdev->flags) || rdev->recovery_offset < end_sector) { rdev = rcu_dereference(conf->disks[dd_idx].rdev); - if (rdev && - (test_bit(Faulty, &rdev->flags) || + if (!rdev) + goto out_rcu_unlock; + if (test_bit(Faulty, &rdev->flags) || !(test_bit(In_sync, &rdev->flags) || - rdev->recovery_offset >= end_sector))) - rdev = NULL; + rdev->recovery_offset >= end_sector)) + goto out_rcu_unlock; } - if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) { - rcu_read_unlock(); - bio_put(align_bi); + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + + align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set); + bio_set_dev(align_bio, rdev->bdev); + align_bio->bi_end_io = raid5_align_endio; + align_bio->bi_private = raid_bio; + align_bio->bi_iter.bi_sector = sector; + + raid_bio->bi_next = (void *)rdev; + + if (is_badblock(rdev, sector, bio_sectors(align_bio), &first_bad, + &bad_sectors)) { + bio_put(align_bio); + rdev_dec_pending(rdev, mddev); return 0; } - if (rdev) { - sector_t first_bad; - int bad_sectors; - - atomic_inc(&rdev->nr_pending); - rcu_read_unlock(); - raid_bio->bi_next = (void*)rdev; - bio_set_dev(align_bi, rdev->bdev); - - if (is_badblock(rdev, align_bi->bi_iter.bi_sector, - bio_sectors(align_bi), - &first_bad, &bad_sectors)) { - bio_put(align_bi); - rdev_dec_pending(rdev, mddev); - return 0; - } + /* No reshape active, so we can trust rdev->data_offset */ + align_bio->bi_iter.bi_sector += rdev->data_offset; - /* No reshape active, so we can trust rdev->data_offset */ - align_bi->bi_iter.bi_sector += rdev->data_offset; + spin_lock_irq(&conf->device_lock); + wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0, + conf->device_lock); + atomic_inc(&conf->active_aligned_reads); + spin_unlock_irq(&conf->device_lock); - spin_lock_irq(&conf->device_lock); - wait_event_lock_irq(conf->wait_for_quiescent, - conf->quiesce == 0, - conf->device_lock); - atomic_inc(&conf->active_aligned_reads); - spin_unlock_irq(&conf->device_lock); + if (mddev->gendisk) + trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk), + raid_bio->bi_iter.bi_sector); + submit_bio_noacct(align_bio); + return 1; - if (mddev->gendisk) - trace_block_bio_remap(align_bi, disk_devt(mddev->gendisk), - raid_bio->bi_iter.bi_sector); - submit_bio_noacct(align_bi); - return 1; - } else { - rcu_read_unlock(); - bio_put(align_bi); - return 0; - } +out_rcu_unlock: + rcu_read_unlock(); + return 0; } static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio)
Refactor raid5_read_one_chunk so that all simple checks are done before allocating the bio. Signed-off-by: Christoph Hellwig <hch@lst.de> --- drivers/md/raid5.c | 108 +++++++++++++++++++-------------------------- 1 file changed, 45 insertions(+), 63 deletions(-)