@@ -98,22 +98,12 @@ void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info)
*/
struct btrfs_bio_ctrl {
struct btrfs_bio *bbio;
- int mirror_num;
enum btrfs_compression_type compress_type;
u32 len_to_oe_boundary;
blk_opf_t opf;
btrfs_bio_end_io_t end_io_func;
struct writeback_control *wbc;
- /*
- * This is for metadata read, to provide the extra needed verification
- * info. This has to be provided for submit_one_bio(), as
- * submit_one_bio() can submit a bio if it ends at stripe boundary. If
- * no such parent_check is provided, the metadata can hit false alert at
- * endio time.
- */
- struct btrfs_tree_parent_check *parent_check;
-
/*
* Tell writepage not to lock the state bits for this range, it still
* does the unlocking.
@@ -124,7 +114,6 @@ struct btrfs_bio_ctrl {
static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
{
struct btrfs_bio *bbio = bio_ctrl->bbio;
- int mirror_num = bio_ctrl->mirror_num;
if (!bbio)
return;
@@ -132,25 +121,14 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
/* Caller should ensure the bio has at least some range added */
ASSERT(bbio->bio.bi_iter.bi_size);
- if (!is_data_inode(&bbio->inode->vfs_inode)) {
- if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE) {
- /*
- * For metadata read, we should have the parent_check,
- * and copy it to bbio for metadata verification.
- */
- ASSERT(bio_ctrl->parent_check);
- memcpy(&bbio->parent_check,
- bio_ctrl->parent_check,
- sizeof(struct btrfs_tree_parent_check));
- }
+ if (!is_data_inode(&bbio->inode->vfs_inode))
bbio->bio.bi_opf |= REQ_META;
- }
if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ &&
bio_ctrl->compress_type != BTRFS_COMPRESS_NONE)
- btrfs_submit_compressed_read(bbio, mirror_num);
+ btrfs_submit_compressed_read(bbio, 0);
else
- btrfs_submit_bio(bbio, mirror_num);
+ btrfs_submit_bio(bbio, 0);
/* The bbio is owned by the end_io handler now */
bio_ctrl->bbio = NULL;
@@ -4242,6 +4220,36 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb)
}
}
+static void __read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num,
+ struct btrfs_tree_parent_check *check)
+{
+ int num_pages = num_extent_pages(eb), i;
+ struct btrfs_bio *bbio;
+
+ clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
+ eb->read_mirror = 0;
+ atomic_set(&eb->io_pages, num_pages);
+ check_buffer_tree_ref(eb);
+
+ bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
+ REQ_OP_READ | REQ_META,
+ BTRFS_I(eb->fs_info->btree_inode),
+ end_bio_extent_readpage, NULL);
+ bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT;
+ bbio->file_offset = eb->start;
+ memcpy(&bbio->parent_check, check, sizeof(*check));
+ if (eb->fs_info->nodesize < PAGE_SIZE) {
+ __bio_add_page(&bbio->bio, eb->pages[0], eb->len,
+ eb->start - page_offset(eb->pages[0]));
+ } else {
+ for (i = 0; i < num_pages; i++) {
+ ClearPageError(eb->pages[i]);
+ __bio_add_page(&bbio->bio, eb->pages[i], PAGE_SIZE, 0);
+ }
+ }
+ btrfs_submit_bio(bbio, mirror_num);
+}
+
static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
int mirror_num,
struct btrfs_tree_parent_check *check)
@@ -4250,11 +4258,6 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
struct extent_io_tree *io_tree;
struct page *page = eb->pages[0];
struct extent_state *cached_state = NULL;
- struct btrfs_bio_ctrl bio_ctrl = {
- .opf = REQ_OP_READ,
- .mirror_num = mirror_num,
- .parent_check = check,
- };
int ret;
ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags));
@@ -4282,18 +4285,10 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
return 0;
}
- clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
- eb->read_mirror = 0;
- atomic_set(&eb->io_pages, 1);
- check_buffer_tree_ref(eb);
- bio_ctrl.end_io_func = end_bio_extent_readpage;
-
btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len);
-
btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len);
- submit_extent_page(&bio_ctrl, eb->start, page, eb->len,
- eb->start - page_offset(page));
- submit_one_bio(&bio_ctrl);
+
+ __read_extent_buffer_pages(eb, mirror_num, check);
if (wait != WAIT_COMPLETE) {
free_extent_state(cached_state);
return 0;
@@ -4314,11 +4309,6 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
int locked_pages = 0;
int all_uptodate = 1;
int num_pages;
- struct btrfs_bio_ctrl bio_ctrl = {
- .opf = REQ_OP_READ,
- .mirror_num = mirror_num,
- .parent_check = check,
- };
if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
@@ -4368,24 +4358,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
goto unlock_exit;
}
- clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags);
- eb->read_mirror = 0;
- atomic_set(&eb->io_pages, num_pages);
- /*
- * It is possible for release_folio to clear the TREE_REF bit before we
- * set io_pages. See check_buffer_tree_ref for a more detailed comment.
- */
- check_buffer_tree_ref(eb);
- bio_ctrl.end_io_func = end_bio_extent_readpage;
- for (i = 0; i < num_pages; i++) {
- page = eb->pages[i];
-
- ClearPageError(page);
- submit_extent_page(&bio_ctrl, page_offset(page), page,
- PAGE_SIZE, 0);
- }
-
- submit_one_bio(&bio_ctrl);
+ __read_extent_buffer_pages(eb, mirror_num, check);
if (wait != WAIT_COMPLETE)
return 0;