@@ -2383,6 +2383,31 @@ struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
return rbio;
}
+/*
+ * Alloc a recovery rbio out of the regular btrfs_submit_bio() path.
+ *
+ * This allows scrub caller to use cached pages to reduce IO.
+ */
+struct btrfs_raid_bio *raid56_parity_alloc_recover_rbio(struct bio *bio,
+ struct btrfs_io_context *bioc, int mirror_num)
+{
+ struct btrfs_fs_info *fs_info = bioc->fs_info;
+ struct btrfs_raid_bio *rbio;
+
+ rbio = alloc_rbio(fs_info, bioc);
+ if (IS_ERR(rbio))
+ return NULL;
+ /* We should have some sectors that really need to be recovered. */
+ ASSERT(bio->bi_iter.bi_size);
+ bio_list_add(&rbio->bio_list, bio);
+ set_rbio_range_error(rbio, bio);
+ rbio->operation = BTRFS_RBIO_READ_REBUILD;
+ if (mirror_num > 2)
+ set_rbio_raid6_extra_error(rbio, mirror_num);
+
+ return rbio;
+}
+
/*
* We just scrub the parity that we have correct data on the same horizontal,
* so we needn't allocate all pages for all the stripes.
@@ -2771,6 +2796,12 @@ void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
start_async_work(rbio, scrub_rbio_work_locked);
}
+void raid56_parity_submit_recover_rbio(struct btrfs_raid_bio *rbio)
+{
+ if (!lock_stripe_add(rbio))
+ start_async_work(rbio, recover_rbio_work_locked);
+}
+
/*
* This is for scrub call sites where we already have correct stripe contents.
* This allows us to avoid reading on-disk stripes again.
@@ -197,7 +197,10 @@ struct btrfs_raid_bio *raid56_parity_alloc_scrub_rbio(struct bio *bio,
struct btrfs_io_context *bioc,
struct btrfs_device *scrub_dev,
unsigned long *dbitmap);
+struct btrfs_raid_bio *raid56_parity_alloc_recover_rbio(struct bio *bio,
+ struct btrfs_io_context *bioc, int mirror_num);
void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
+void raid56_parity_submit_recover_rbio(struct btrfs_raid_bio *rbio);
void raid56_parity_cache_pages(struct btrfs_raid_bio *rbio, struct page **pages,
int stripe_num);
The incoming scrub_logical would need to recover raid56 data sectors with cached pages. This means we can not go regular btrfs_submit_bio() path, but go a similar path like raid56_parity_alloc_scrub_rbio(). So this patch adds the following new functions to allow recover rbio to be allocated and submitted out of the btrfs_submit_bio() path: - raid56_parity_alloc_recover_rbio() - raid56_parity_submit_scrub_rbio() This means now we can go a full cached recover without reading any pages from disk. Signed-off-by: Qu Wenruo <wqu@suse.com> --- fs/btrfs/raid56.c | 31 +++++++++++++++++++++++++++++++ fs/btrfs/raid56.h | 3 +++ 2 files changed, 34 insertions(+)