@@ -601,10 +601,21 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
int btrfs_writepage_cow_fixup(struct page *page);
int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
int compress_type);
+typedef void (btrfs_encoded_read_cb_t)(void *, int);
+
+struct btrfs_encoded_read_wait_ctx {
+ wait_queue_head_t wait;
+ bool done;
+ int err;
+};
+
+void btrfs_encoded_read_wait_cb(void *ctx, int err);
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
u64 file_offset, u64 disk_bytenr,
u64 disk_io_size,
- struct page **pages);
+ struct page **pages,
+ btrfs_encoded_read_cb_t cb,
+ void *cb_ctx);
ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
struct btrfs_ioctl_encoded_io_args *encoded);
ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
@@ -9080,9 +9080,10 @@ static ssize_t btrfs_encoded_read_inline(
}
struct btrfs_encoded_read_private {
- wait_queue_head_t wait;
atomic_t pending;
blk_status_t status;
+ btrfs_encoded_read_cb_t *cb;
+ void *cb_ctx;
};
static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
@@ -9100,26 +9101,33 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
*/
WRITE_ONCE(priv->status, bbio->bio.bi_status);
}
- if (!atomic_dec_return(&priv->pending))
- wake_up(&priv->wait);
+ if (!atomic_dec_return(&priv->pending)) {
+ priv->cb(priv->cb_ctx,
+ blk_status_to_errno(READ_ONCE(priv->status)));
+ kfree(priv);
+ }
bio_put(&bbio->bio);
}
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
u64 file_offset, u64 disk_bytenr,
- u64 disk_io_size, struct page **pages)
+ u64 disk_io_size, struct page **pages,
+ btrfs_encoded_read_cb_t cb,
+ void *cb_ctx)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_encoded_read_private priv = {
- .pending = ATOMIC_INIT(1),
- };
+ struct btrfs_encoded_read_private *priv;
unsigned long i = 0;
struct btrfs_bio *bbio;
- init_waitqueue_head(&priv.wait);
+ priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS);
+ if (!priv)
+ return -ENOMEM;
+
+ atomic_set(&priv->pending, 1);
bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
- btrfs_encoded_read_endio, &priv);
+ btrfs_encoded_read_endio, priv);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
bbio->inode = inode;
@@ -9127,11 +9135,11 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
- atomic_inc(&priv.pending);
+ atomic_inc(&priv->pending);
btrfs_submit_bio(bbio, 0);
bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
- btrfs_encoded_read_endio, &priv);
+ btrfs_encoded_read_endio, priv);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
bbio->inode = inode;
continue;
@@ -9142,13 +9150,28 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
disk_io_size -= bytes;
} while (disk_io_size);
- atomic_inc(&priv.pending);
+ atomic_inc(&priv->pending);
+ priv->cb = cb;
+ priv->cb_ctx = cb_ctx;
+
btrfs_submit_bio(bbio, 0);
- if (atomic_dec_return(&priv.pending))
- io_wait_event(priv.wait, !atomic_read(&priv.pending));
- /* See btrfs_encoded_read_endio() for ordering. */
- return blk_status_to_errno(READ_ONCE(priv.status));
+ if (!atomic_dec_return(&priv->pending)) {
+ cb(cb_ctx, blk_status_to_errno(READ_ONCE(priv->status)));
+ kfree(priv);
+ }
+
+ return 0;
+}
+
+void btrfs_encoded_read_wait_cb(void *ctx, int err)
+{
+ struct btrfs_encoded_read_wait_ctx *priv = ctx;
+
+ priv->err = err;
+ priv->done = true;
+
+ wake_up(&priv->wait);
}
static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
@@ -9166,6 +9189,7 @@ static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
u64 cur;
size_t page_offset;
ssize_t ret;
+ struct btrfs_encoded_read_wait_ctx wait_ctx;
nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
@@ -9177,11 +9201,23 @@ static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
goto out;
}
+ wait_ctx.done = false;
+ init_waitqueue_head(&wait_ctx.wait);
+
ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr,
- disk_io_size, pages);
+ disk_io_size, pages,
+ btrfs_encoded_read_wait_cb,
+ &wait_ctx);
if (ret)
goto out;
+ io_wait_event(wait_ctx.wait, wait_ctx.done);
+
+ if (wait_ctx.err) {
+ ret = wait_ctx.err;
+ goto out;
+ }
+
unlock_extent(io_tree, start, lockend, cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
*unlocked = true;
@@ -5613,6 +5613,7 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
u64 disk_bytenr, disk_num_bytes;
u32 data_offset;
struct btrfs_cmd_header *hdr;
+ struct btrfs_encoded_read_wait_ctx wait_ctx;
u32 crc;
int ret;
@@ -5671,6 +5672,9 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
goto out;
}
+ wait_ctx.done = false;
+ init_waitqueue_head(&wait_ctx.wait);
+
/*
* Note that send_buf is a mapping of send_buf_pages, so this is really
* reading into send_buf.
@@ -5678,10 +5682,19 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
ret = btrfs_encoded_read_regular_fill_pages(BTRFS_I(inode), offset,
disk_bytenr, disk_num_bytes,
sctx->send_buf_pages +
- (data_offset >> PAGE_SHIFT));
+ (data_offset >> PAGE_SHIFT),
+ btrfs_encoded_read_wait_cb,
+ &wait_ctx);
if (ret)
goto out;
+ io_wait_event(wait_ctx.wait, wait_ctx.done);
+
+ if (wait_ctx.err) {
+ ret = wait_ctx.err;
+ goto out;
+ }
+
hdr = (struct btrfs_cmd_header *)sctx->send_buf;
hdr->len = cpu_to_le32(sctx->send_size + disk_num_bytes - sizeof(*hdr));
hdr->crc = 0;
Change btrfs_encoded_read_regular_fill_pages so that it takes a callback rather than waiting, and add new helper function btrfs_encoded_read_wait_cb to match the existing behaviour. Signed-off-by: Mark Harmstone <maharmstone@fb.com> --- fs/btrfs/btrfs_inode.h | 13 +++++++- fs/btrfs/inode.c | 70 ++++++++++++++++++++++++++++++++---------- fs/btrfs/send.c | 15 ++++++++- 3 files changed, 79 insertions(+), 19 deletions(-)