@@ -48,15 +48,17 @@ void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode,
* Just like the underlying bio_alloc_bioset it will not fail as it is backed by
* a mempool.
*/
-struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
- struct btrfs_inode *inode,
- btrfs_bio_end_io_t end_io, void *private)
+struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
+ struct btrfs_inode *inode,
+ btrfs_bio_end_io_t end_io, void *private)
{
+ struct btrfs_bio *bbio;
struct bio *bio;
bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset);
- btrfs_bio_init(btrfs_bio(bio), inode, end_io, private);
- return bio;
+ bbio = btrfs_bio(bio);
+ btrfs_bio_init(bbio, inode, end_io, private);
+ return bbio;
}
static struct bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
@@ -75,9 +75,9 @@ void __cold btrfs_bioset_exit(void);
void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_inode *inode,
btrfs_bio_end_io_t end_io, void *private);
-struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
- struct btrfs_inode *inode,
- btrfs_bio_end_io_t end_io, void *private);
+struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
+ struct btrfs_inode *inode,
+ btrfs_bio_end_io_t end_io, void *private);
static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
{
@@ -896,13 +896,13 @@ static void alloc_new_bio(struct btrfs_inode *inode,
u64 disk_bytenr, u64 file_offset)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct bio *bio;
+ struct btrfs_bio *bbio;
- bio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, inode,
- bio_ctrl->end_io_func, NULL);
- bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
- btrfs_bio(bio)->file_offset = file_offset;
- bio_ctrl->bbio = btrfs_bio(bio);
+ bbio = btrfs_bio_alloc(BIO_MAX_VECS, bio_ctrl->opf, inode,
+ bio_ctrl->end_io_func, NULL);
+ bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
+ bbio->file_offset = file_offset;
+ bio_ctrl->bbio = bbio;
bio_ctrl->len_to_oe_boundary = U32_MAX;
/*
@@ -911,7 +911,7 @@ static void alloc_new_bio(struct btrfs_inode *inode,
* them.
*/
if (bio_ctrl->compress_type == BTRFS_COMPRESS_NONE &&
- btrfs_use_zone_append(btrfs_bio(bio))) {
+ btrfs_use_zone_append(bbio)) {
struct btrfs_ordered_extent *ordered;
ordered = btrfs_lookup_ordered_extent(inode, file_offset);
@@ -930,8 +930,8 @@ static void alloc_new_bio(struct btrfs_inode *inode,
* to always be set on the last added/replaced device.
* This is a bit odd but has been like that for a long time.
*/
- bio_set_dev(bio, fs_info->fs_devices->latest_dev->bdev);
- wbc_init_bio(bio_ctrl->wbc, bio);
+ bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev);
+ wbc_init_bio(bio_ctrl->wbc, &bbio->bio);
}
}
@@ -9959,24 +9959,24 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
.pending = ATOMIC_INIT(1),
};
unsigned long i = 0;
- struct bio *bio;
+ struct btrfs_bio *bbio;
init_waitqueue_head(&priv.wait);
- bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode,
+ bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode,
btrfs_encoded_read_endio, &priv);
- bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
+ bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
do {
size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
- if (bio_add_page(bio, pages[i], bytes, 0) < bytes) {
+ if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
atomic_inc(&priv.pending);
- btrfs_submit_bio(btrfs_bio(bio), 0);
+ btrfs_submit_bio(bbio, 0);
- bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode,
- btrfs_encoded_read_endio, &priv);
- bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
+ bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, inode,
+ btrfs_encoded_read_endio, &priv);
+ bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
continue;
}
@@ -9986,7 +9986,7 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
} while (disk_io_size);
atomic_inc(&priv.pending);
- btrfs_submit_bio(btrfs_bio(bio), 0);
+ btrfs_submit_bio(bbio, 0);
if (atomic_dec_return(&priv.pending))
io_wait_event(priv.wait, !atomic_read(&priv.pending));