Message ID | 20191213195750.32184-5-rgoldwyn@suse.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | btrfs direct-io using iomap | expand |
So Ilooked into the "unlocked" direct I/O case, and I think the current code using dio_sem is really sketchy. What btrfs really needs to do is take i_rwsem shared by default for direct writes, and only upgrade to the exclusive lock when needed, similar to xfs and the WIP ext4 code. While looking for that I also noticed two other things: - check_direct_IO looks pretty bogus - btrfs_direct_IO really should be split and folded into the two callers Untested patches attached. The first should probably go into a prep patch, and the second could be folded into this one. From bc285e440a50140beb456f11e545a049bdf51ec1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig <hch@lst.de> Date: Sat, 21 Dec 2019 15:17:26 +0100 Subject: btrfs: remove direct I/O aligment checks The direct I/O code itself already checks for the proper sector size alignment, so remove the duplicate checks. The remainder of check_direct_IO is not ony needed for reads and can be moved to file.c and outside of i_rwsem. Signed-off-by: Christoph Hellwig <hch@lst.de> --- fs/btrfs/file.c | 34 +++++++++++++++++++++++++++------- fs/btrfs/inode.c | 37 ------------------------------------- 2 files changed, 27 insertions(+), 44 deletions(-) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index a6d41d7bf362..0522f6d45a98 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -3444,21 +3444,41 @@ static int btrfs_file_open(struct inode *inode, struct file *filp) return generic_file_open(inode, filp); } -static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) +/* + * If there are duplicate iov_base's in this iovec, fall back to buffered I/O + * to avoid checksum errors. + */ +static bool btrfs_direct_read_ok(struct kiocb *iocb, struct iov_iter *iter) { - ssize_t ret = 0; + int seg, i; - if (iocb->ki_flags & IOCB_DIRECT) { + if (!iter_is_iovec(iter)) + return true; + + for (seg = 0; seg < iter->nr_segs; seg++) { + for (i = seg + 1; i < iter->nr_segs; i++) { + if (iter->iov[seg].iov_base == iter->iov[i].iov_base) + return false; + } + } + + return true; +} + + +static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) +{ + if ((iocb->ki_flags & IOCB_DIRECT) && btrfs_direct_read_ok(iocb, to)) { struct inode *inode = file_inode(iocb->ki_filp); + ssize_t ret; inode_lock_shared(inode); ret = btrfs_direct_IO(iocb, to); inode_unlock_shared(inode); - if (ret < 0) - return ret; - } - return generic_file_buffered_read(iocb, to, ret); + return ret; + } + return generic_file_buffered_read(iocb, to, 0); } const struct file_operations btrfs_file_operations = { diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 824f318cee5e..18d153a62655 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8581,39 +8581,6 @@ static blk_qc_t btrfs_submit_direct(struct bio *dio_bio, struct file *file, return BLK_QC_T_NONE; } -static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, - const struct iov_iter *iter, loff_t offset) -{ - int seg; - int i; - unsigned int blocksize_mask = fs_info->sectorsize - 1; - ssize_t retval = -EINVAL; - - if (offset & blocksize_mask) - goto out; - - if (iov_iter_alignment(iter) & blocksize_mask) - goto out; - - /* If this is a write we don't need to check anymore */ - if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter)) - return 0; - /* - * Check to make sure we don't have duplicate iov_base's in this - * iovec, if so return EINVAL, otherwise we'll get csum errors - * when reading back. - */ - for (seg = 0; seg < iter->nr_segs; seg++) { - for (i = seg + 1; i < iter->nr_segs; i++) { - if (iter->iov[seg].iov_base == iter->iov[i].iov_base) - goto out; - } - } - retval = 0; -out: - return retval; -} - static const struct iomap_ops btrfs_dio_iomap_ops = { .iomap_begin = btrfs_dio_iomap_begin, .iomap_end = btrfs_dio_iomap_end, @@ -8635,7 +8602,6 @@ ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct extent_changeset *data_reserved = NULL; loff_t offset = iocb->ki_pos; size_t count = 0; @@ -8644,9 +8610,6 @@ ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) lockdep_assert_held(&inode->i_rwsem); - if (check_direct_IO(fs_info, iter, offset)) - return 0; - count = iov_iter_count(iter); if (iov_iter_rw(iter) == WRITE) { /*
On 6:42 21/12, Christoph Hellwig wrote: > So Ilooked into the "unlocked" direct I/O case, and I think the current > code using dio_sem is really sketchy. What btrfs really needs to do is > take i_rwsem shared by default for direct writes, and only upgrade to > the exclusive lock when needed, similar to xfs and the WIP ext4 code. Sketchy in what sense? I am not trying to second-guess, but I want to know where it could fail. I would want it to be simpler as well, but if we can perform direct writes without locking, why should we introduce locks. > > While looking for that I also noticed two other things: > > - check_direct_IO looks pretty bogus > - btrfs_direct_IO really should be split and folded into the two > callers Thanks for the cleanups. I will incorporate these. > > Untested patches attached. The first should probably go into a prep > patch, and the second could be folded into this one. > From bc285e440a50140beb456f11e545a049bdf51ec1 Mon Sep 17 00:00:00 2001 > From: Christoph Hellwig <hch@lst.de> > Date: Sat, 21 Dec 2019 15:17:26 +0100 > Subject: btrfs: remove direct I/O aligment checks > > The direct I/O code itself already checks for the proper sector > size alignment, so remove the duplicate checks. The remainder of > check_direct_IO is not ony needed for reads and can be moved to > file.c and outside of i_rwsem. > > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > fs/btrfs/file.c | 34 +++++++++++++++++++++++++++------- > fs/btrfs/inode.c | 37 ------------------------------------- > 2 files changed, 27 insertions(+), 44 deletions(-) > > diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c > index a6d41d7bf362..0522f6d45a98 100644 > --- a/fs/btrfs/file.c > +++ b/fs/btrfs/file.c > @@ -3444,21 +3444,41 @@ static int btrfs_file_open(struct inode *inode, struct file *filp) > return generic_file_open(inode, filp); > } > > -static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) > +/* > + * If there are duplicate iov_base's in this iovec, fall back to buffered I/O > + * to avoid checksum errors. > + */ > +static bool btrfs_direct_read_ok(struct kiocb *iocb, struct iov_iter *iter) > { > - ssize_t ret = 0; > + int seg, i; > > - if (iocb->ki_flags & IOCB_DIRECT) { > + if (!iter_is_iovec(iter)) > + return true; > + > + for (seg = 0; seg < iter->nr_segs; seg++) { > + for (i = seg + 1; i < iter->nr_segs; i++) { > + if (iter->iov[seg].iov_base == iter->iov[i].iov_base) > + return false; > + } > + } > + > + return true; > +} > + > + > +static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) > +{ > + if ((iocb->ki_flags & IOCB_DIRECT) && btrfs_direct_read_ok(iocb, to)) { > struct inode *inode = file_inode(iocb->ki_filp); > + ssize_t ret; > > inode_lock_shared(inode); > ret = btrfs_direct_IO(iocb, to); > inode_unlock_shared(inode); > - if (ret < 0) > - return ret; > - } > > - return generic_file_buffered_read(iocb, to, ret); > + return ret; > + } > + return generic_file_buffered_read(iocb, to, 0); > } > > const struct file_operations btrfs_file_operations = { > diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c > index 824f318cee5e..18d153a62655 100644 > --- a/fs/btrfs/inode.c > +++ b/fs/btrfs/inode.c > @@ -8581,39 +8581,6 @@ static blk_qc_t btrfs_submit_direct(struct bio *dio_bio, struct file *file, > return BLK_QC_T_NONE; > } > > -static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, > - const struct iov_iter *iter, loff_t offset) > -{ > - int seg; > - int i; > - unsigned int blocksize_mask = fs_info->sectorsize - 1; > - ssize_t retval = -EINVAL; > - > - if (offset & blocksize_mask) > - goto out; > - > - if (iov_iter_alignment(iter) & blocksize_mask) > - goto out; > - > - /* If this is a write we don't need to check anymore */ > - if (iov_iter_rw(iter) != READ || !iter_is_iovec(iter)) > - return 0; > - /* > - * Check to make sure we don't have duplicate iov_base's in this > - * iovec, if so return EINVAL, otherwise we'll get csum errors > - * when reading back. > - */ > - for (seg = 0; seg < iter->nr_segs; seg++) { > - for (i = seg + 1; i < iter->nr_segs; i++) { > - if (iter->iov[seg].iov_base == iter->iov[i].iov_base) > - goto out; > - } > - } > - retval = 0; > -out: > - return retval; > -} > - > static const struct iomap_ops btrfs_dio_iomap_ops = { > .iomap_begin = btrfs_dio_iomap_begin, > .iomap_end = btrfs_dio_iomap_end, > @@ -8635,7 +8602,6 @@ ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) > { > struct file *file = iocb->ki_filp; > struct inode *inode = file->f_mapping->host; > - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); > struct extent_changeset *data_reserved = NULL; > loff_t offset = iocb->ki_pos; > size_t count = 0; > @@ -8644,9 +8610,6 @@ ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) > > lockdep_assert_held(&inode->i_rwsem); > > - if (check_direct_IO(fs_info, iter, offset)) > - return 0; > - > count = iov_iter_count(iter); > if (iov_iter_rw(iter) == WRITE) { > /* > -- > 2.24.0 > > From 7194fa1986a48af46d2b01457865066cdbd14e35 Mon Sep 17 00:00:00 2001 > From: Christoph Hellwig <hch@lst.de> > Date: Sat, 21 Dec 2019 15:23:41 +0100 > Subject: btrfs: split btrfs_direct_IO > > The read and write versions don't have anything in common except > for the call to iomap_dio_rw. So split this function, and merge > each half into its only caller. > > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > fs/btrfs/ctree.h | 4 ++- > fs/btrfs/file.c | 44 +++++++++++++++++++++++++---- > fs/btrfs/inode.c | 72 ++++-------------------------------------------- > 3 files changed, 48 insertions(+), 72 deletions(-) > > diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h > index 8faa069b0a73..fccbbfebdf88 100644 > --- a/fs/btrfs/ctree.h > +++ b/fs/btrfs/ctree.h > @@ -28,6 +28,7 @@ > #include <linux/dynamic_debug.h> > #include <linux/refcount.h> > #include <linux/crc32c.h> > +#include <linux/iomap.h> > #include "extent-io-tree.h" > #include "extent_io.h" > #include "extent_map.h" > @@ -2904,7 +2905,8 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end); > void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, > u64 end, int uptodate); > extern const struct dentry_operations btrfs_dentry_operations; > -ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter); > +const struct iomap_ops btrfs_dio_iomap_ops; > +const struct iomap_dio_ops btrfs_dio_ops; > > /* ioctl.c */ > long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); > diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c > index 0522f6d45a98..ed0b2e015d8d 100644 > --- a/fs/btrfs/file.c > +++ b/fs/btrfs/file.c > @@ -1822,17 +1822,50 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, > return num_written ? num_written : ret; > } > > -static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) > +static ssize_t btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) > { > struct file *file = iocb->ki_filp; > struct inode *inode = file_inode(file); > - loff_t pos; > + size_t count = iov_iter_count(from); > + struct extent_changeset *data_reserved = NULL; > + loff_t pos = iocb->ki_pos; > ssize_t written; > ssize_t written_buffered; > loff_t endbyte; > + bool relock = false; > int err; > > - written = btrfs_direct_IO(iocb, from); > + /* > + * If the write DIO is beyond the EOF, we need update the isize, but > + * it is protected by i_mutex. So we can not unlock the i_mutex in > + * this case. > + */ > + if (pos + count <= inode->i_size) { > + inode_unlock(inode); > + relock = true; > + } else { > + if (iocb->ki_flags & IOCB_NOWAIT) > + return -EAGAIN; > + } > + > + err = btrfs_delalloc_reserve_space(inode, &data_reserved, pos, count); > + if (err) { > + if (relock) > + inode_lock(inode); > + return err; > + } > + > + down_read(&BTRFS_I(inode)->dio_sem); > + written = iomap_dio_rw(iocb, from, &btrfs_dio_iomap_ops, &btrfs_dio_ops, > + is_sync_kiocb(iocb)); > + up_read(&BTRFS_I(inode)->dio_sem); > + if (written >= 0 && (size_t)written < count) > + btrfs_delalloc_release_space(inode, data_reserved, > + pos, count - (size_t)written, true); > + btrfs_delalloc_release_extents(BTRFS_I(inode), count); > + if (relock) > + inode_lock(inode); > + extent_changeset_free(data_reserved); > > if (written < 0 || !iov_iter_count(from)) > return written; > @@ -1975,7 +2008,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, > atomic_inc(&BTRFS_I(inode)->sync_writers); > > if (iocb->ki_flags & IOCB_DIRECT) { > - num_written = __btrfs_direct_write(iocb, from); > + num_written = btrfs_direct_write(iocb, from); > } else { > num_written = btrfs_buffered_write(iocb, from); > if (num_written > 0) > @@ -3473,7 +3506,8 @@ static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) > ssize_t ret; > > inode_lock_shared(inode); > - ret = btrfs_direct_IO(iocb, to); > + ret = iomap_dio_rw(iocb, to, &btrfs_dio_iomap_ops, > + &btrfs_dio_ops, is_sync_kiocb(iocb)); > inode_unlock_shared(inode); > > return ret; > diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c > index 18d153a62655..7b747270ec40 100644 > --- a/fs/btrfs/inode.c > +++ b/fs/btrfs/inode.c > @@ -29,7 +29,6 @@ > #include <linux/iversion.h> > #include <linux/swap.h> > #include <linux/sched/mm.h> > -#include <linux/iomap.h> > #include <asm/unaligned.h> > #include "misc.h" > #include "ctree.h" > @@ -7856,6 +7855,11 @@ static int btrfs_dio_iomap_end(struct inode *inode, loff_t pos, loff_t length, > return 0; > } > > +const struct iomap_ops btrfs_dio_iomap_ops = { > + .iomap_begin = btrfs_dio_iomap_begin, > + .iomap_end = btrfs_dio_iomap_end, > +}; > + > static inline blk_status_t submit_dio_repair_bio(struct inode *inode, > struct bio *bio, > int mirror_num) > @@ -8581,74 +8585,10 @@ static blk_qc_t btrfs_submit_direct(struct bio *dio_bio, struct file *file, > return BLK_QC_T_NONE; > } > > -static const struct iomap_ops btrfs_dio_iomap_ops = { > - .iomap_begin = btrfs_dio_iomap_begin, > - .iomap_end = btrfs_dio_iomap_end, > -}; > - > -static const struct iomap_dio_ops btrfs_dops = { > +const struct iomap_dio_ops btrfs_dio_ops = { > .submit_io = btrfs_submit_direct, > }; > > - > -/* > - * btrfs_direct_IO - perform direct I/O > - * inode->i_rwsem must be locked before calling this function, shared or exclusive. > - * @iocb - kernel iocb > - * @iter - iter to/from data is copied > - */ > - > -ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) > -{ > - struct file *file = iocb->ki_filp; > - struct inode *inode = file->f_mapping->host; > - struct extent_changeset *data_reserved = NULL; > - loff_t offset = iocb->ki_pos; > - size_t count = 0; > - bool relock = false; > - ssize_t ret; > - > - lockdep_assert_held(&inode->i_rwsem); > - > - count = iov_iter_count(iter); > - if (iov_iter_rw(iter) == WRITE) { > - /* > - * If the write DIO is beyond the EOF, we need update > - * the isize, but it is protected by i_mutex. So we can > - * not unlock the i_mutex at this case. > - */ > - if (offset + count <= inode->i_size) { > - inode_unlock(inode); > - relock = true; > - } else if (iocb->ki_flags & IOCB_NOWAIT) { > - ret = -EAGAIN; > - goto out; > - } > - ret = btrfs_delalloc_reserve_space(inode, &data_reserved, > - offset, count); > - if (ret) > - goto out; > - > - down_read(&BTRFS_I(inode)->dio_sem); > - } > - > - ret = iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dops, > - is_sync_kiocb(iocb)); > - > - if (iov_iter_rw(iter) == WRITE) { > - up_read(&BTRFS_I(inode)->dio_sem); > - if (ret >= 0 && (size_t)ret < count) > - btrfs_delalloc_release_space(inode, data_reserved, > - offset, count - (size_t)ret, true); > - btrfs_delalloc_release_extents(BTRFS_I(inode), count); > - } > -out: > - if (relock) > - inode_lock(inode); > - extent_changeset_free(data_reserved); > - return ret; > -} > - > #define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC) > > static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, > -- > 2.24.0 >
On 6:42 21/12, Christoph Hellwig wrote: > So Ilooked into the "unlocked" direct I/O case, and I think the current > code using dio_sem is really sketchy. What btrfs really needs to do is > take i_rwsem shared by default for direct writes, and only upgrade to > the exclusive lock when needed, similar to xfs and the WIP ext4 code. > > While looking for that I also noticed two other things: > > - check_direct_IO looks pretty bogus > - btrfs_direct_IO really should be split and folded into the two > callers > > Untested patches attached. The first should probably go into a prep > patch, and the second could be folded into this one. Testing revealed that removing check_direct_IO will not work. We try and reserve space as a whole for the entire direct write. These checks safeguard from requests unaligned to fs_info->sectorsize. I liked the patch to split and fold the direct_IO code. However to merge it into this will make it difficult to understand the changes since we are moving it to a different file rather than changing in-place. A separate patch would better serve as a cleanup.
On Tue, Jan 07, 2020 at 05:59:09AM -0600, Goldwyn Rodrigues wrote: > Testing revealed that removing check_direct_IO will not work. We try and > reserve space as a whole for the entire direct write. These checks > safeguard from requests unaligned to fs_info->sectorsize. Ok. The fact that a wrong sector size falls back to buffered I/O instead of failing the I/O is still bogus, though. Btrfs should align with all other file systems there. > > I liked the patch to split and fold the direct_IO code. However to merge > it into this will make it difficult to understand the changes since we > are moving it to a different file rather than changing in-place. A > separate patch would better serve as a cleanup. Sure, this can be added on top.
On Thu, Jan 02, 2020 at 12:01:27PM -0600, Goldwyn Rodrigues wrote: > On 6:42 21/12, Christoph Hellwig wrote: > > So Ilooked into the "unlocked" direct I/O case, and I think the current > > code using dio_sem is really sketchy. What btrfs really needs to do is > > take i_rwsem shared by default for direct writes, and only upgrade to > > the exclusive lock when needed, similar to xfs and the WIP ext4 code. > > Sketchy in what sense? I am not trying to second-guess, but I want to > know where it could fail. I would want it to be simpler as well, but if > we can perform direct writes without locking, why should we introduce > locks. In that it needs yet another lock which doesn't really provide exclusion guarantees in its own. In many ways this lock plus the historic i_mutex were abused to provide the shared/exclusiv lock that now exists natively with i_rwsem.
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b2e8fd8a8e59..113dcd1a11cd 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2904,6 +2904,7 @@ int btrfs_writepage_cow_fixup(struct page *page, u64 start, u64 end); void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start, u64 end, int uptodate); extern const struct dentry_operations btrfs_dentry_operations; +ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter); /* ioctl.c */ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 0cb43b682789..7010dd7beccc 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1832,7 +1832,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) loff_t endbyte; int err; - written = generic_file_direct_write(iocb, from); + written = btrfs_direct_IO(iocb, from); if (written < 0 || !iov_iter_count(from)) return written; @@ -3444,9 +3444,26 @@ static int btrfs_file_open(struct inode *inode, struct file *filp) return generic_file_open(inode, filp); } +static ssize_t btrfs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) +{ + ssize_t ret = 0; + + if (iocb->ki_flags & IOCB_DIRECT) { + struct inode *inode = file_inode(iocb->ki_filp); + + inode_lock_shared(inode); + ret = btrfs_direct_IO(iocb, to); + inode_unlock_shared(inode); + if (ret < 0) + return ret; + } + + return generic_file_buffered_read(iocb, to, ret); +} + const struct file_operations btrfs_file_operations = { .llseek = btrfs_file_llseek, - .read_iter = generic_file_read_iter, + .read_iter = btrfs_file_read_iter, .splice_read = generic_file_splice_read, .write_iter = btrfs_file_write_iter, .mmap = btrfs_file_mmap, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 56032c518b26..ff5ee99086f0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -29,6 +29,7 @@ #include <linux/iversion.h> #include <linux/swap.h> #include <linux/sched/mm.h> +#include <linux/iomap.h> #include <asm/unaligned.h> #include "misc.h" #include "ctree.h" @@ -7510,7 +7511,7 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, } static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, - struct extent_state **cached_state, int writing) + struct extent_state **cached_state, bool writing) { struct btrfs_ordered_extent *ordered; int ret = 0; @@ -7648,30 +7649,7 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, } -static int btrfs_get_blocks_direct_read(struct extent_map *em, - struct buffer_head *bh_result, - struct inode *inode, - u64 start, u64 len) -{ - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - - if (em->block_start == EXTENT_MAP_HOLE || - test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) - return -ENOENT; - - len = min(len, em->len - (start - em->start)); - - bh_result->b_blocknr = (em->block_start + (start - em->start)) >> - inode->i_blkbits; - bh_result->b_size = len; - bh_result->b_bdev = fs_info->fs_devices->latest_bdev; - set_buffer_mapped(bh_result); - - return 0; -} - static int btrfs_get_blocks_direct_write(struct extent_map **map, - struct buffer_head *bh_result, struct inode *inode, struct btrfs_dio_data *dio_data, u64 start, u64 len) @@ -7733,7 +7711,6 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, } /* this will cow the extent */ - len = bh_result->b_size; free_extent_map(em); *map = em = btrfs_new_extent_direct(inode, start, len); if (IS_ERR(em)) { @@ -7744,15 +7721,6 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, len = min(len, em->len - (start - em->start)); skip_cow: - bh_result->b_blocknr = (em->block_start + (start - em->start)) >> - inode->i_blkbits; - bh_result->b_size = len; - bh_result->b_bdev = fs_info->fs_devices->latest_bdev; - set_buffer_mapped(bh_result); - - if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) - set_buffer_new(bh_result); - /* * Need to update the i_size under the extent lock so buffered * readers will get the updated i_size when we unlock. @@ -7768,24 +7736,37 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map, return ret; } -static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, - struct buffer_head *bh_result, int create) +static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, + loff_t length, unsigned flags, struct iomap *iomap, + struct iomap *srcmap) { struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct extent_map *em; struct extent_state *cached_state = NULL; struct btrfs_dio_data *dio_data = NULL; - u64 start = iblock << inode->i_blkbits; u64 lockstart, lockend; - u64 len = bh_result->b_size; + bool write = !!(flags & IOMAP_WRITE); int ret = 0; + u64 len = length; + bool unlock_extents = false; - if (!create) + if (!write) len = min_t(u64, len, fs_info->sectorsize); lockstart = start; lockend = start + len - 1; + /* + * The generic stuff only does filemap_write_and_wait_range, which + * isn't enough if we've written compressed pages to this area, so + * we need to flush the dirty pages again to make absolutely sure + * that any outstanding dirty pages are on disk. + */ + if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, + &BTRFS_I(inode)->runtime_flags)) + ret = filemap_fdatawrite_range(inode->i_mapping, start, + start + length - 1); + if (current->journal_info) { /* * Need to pull our outstanding extents and set journal_info to NULL so @@ -7801,7 +7782,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, * this range and we need to fallback to buffered. */ if (lock_extent_direct(inode, lockstart, lockend, &cached_state, - create)) { + write)) { ret = -ENOTBLK; goto err; } @@ -7833,35 +7814,52 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, goto unlock_err; } - if (create) { - ret = btrfs_get_blocks_direct_write(&em, bh_result, inode, + len = min(len, em->len - (start - em->start)); + if (write) { + ret = btrfs_get_blocks_direct_write(&em, inode, dio_data, start, len); if (ret < 0) goto unlock_err; - - unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, - lockend, &cached_state); + unlock_extents = true; + /* Recalc len in case the new em is smaller than requested */ + len = min(len, em->len - (start - em->start)); + } else if (em->block_start == EXTENT_MAP_HOLE || + test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { + /* Unlock in case of direct reading from a hole */ + unlock_extents = true; } else { - ret = btrfs_get_blocks_direct_read(em, bh_result, inode, - start, len); - /* Can be negative only if we read from a hole */ - if (ret < 0) { - ret = 0; - free_extent_map(em); - goto unlock_err; - } /* * We need to unlock only the end area that we aren't using. * The rest is going to be unlocked by the endio routine. */ - lockstart = start + bh_result->b_size; - if (lockstart < lockend) { - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - lockstart, lockend, &cached_state); - } else { - free_extent_state(cached_state); - } + lockstart = start + len; + if (lockstart < lockend) + unlock_extents = true; + } + + if (unlock_extents) + unlock_extent_cached(&BTRFS_I(inode)->io_tree, + lockstart, lockend, &cached_state); + else + free_extent_state(cached_state); + + /* + * Translate extent map information to iomap + * We trim the extents (and move the addr) even though + * iomap code does that, since we have locked only the parts + * we are performing I/O in. + */ + if ((em->block_start == EXTENT_MAP_HOLE) || + (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && !write)) { + iomap->addr = IOMAP_NULL_ADDR; + iomap->type = IOMAP_HOLE; + } else { + iomap->addr = em->block_start + (start - em->start); + iomap->type = IOMAP_MAPPED; } + iomap->offset = start; + iomap->bdev = fs_info->fs_devices->latest_bdev; + iomap->length = len; free_extent_map(em); @@ -8229,10 +8227,9 @@ static void btrfs_endio_direct_read(struct bio *bio) kfree(dip); - dio_bio->bi_status = err; - dio_end_io(dio_bio); btrfs_io_bio_free_csum(io_bio); - bio_put(bio); + dio_bio->bi_status = err; + bio_endio(dio_bio); } static void __endio_write_update_ordered(struct inode *inode, @@ -8289,8 +8286,7 @@ static void btrfs_endio_direct_write(struct bio *bio) kfree(dip); dio_bio->bi_status = bio->bi_status; - dio_end_io(dio_bio); - bio_put(bio); + bio_endio(dio_bio); } static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data, @@ -8522,9 +8518,10 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip) return 0; } -static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, +static blk_qc_t btrfs_submit_direct(struct bio *dio_bio, struct file *file, loff_t file_offset) { + struct inode *inode = file_inode(file); struct btrfs_dio_private *dip = NULL; struct bio *bio = NULL; struct btrfs_io_bio *io_bio; @@ -8575,7 +8572,7 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, ret = btrfs_submit_direct_hook(dip); if (!ret) - return; + return BLK_QC_T_NONE; btrfs_io_bio_free_csum(io_bio); @@ -8594,7 +8591,7 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, /* * The end io callbacks free our dip, do the final put on bio * and all the cleanup and final put for dio_bio (through - * dio_end_io()). + * end_io()). */ dip = NULL; bio = NULL; @@ -8609,15 +8606,12 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, file_offset + dio_bio->bi_iter.bi_size - 1); dio_bio->bi_status = BLK_STS_IOERR; - /* - * Releases and cleans up our dio_bio, no need to bio_put() - * nor bio_endio()/bio_io_error() against dio_bio. - */ - dio_end_io(dio_bio); + bio_endio(dio_bio); } if (bio) bio_put(bio); kfree(dip); + return BLK_QC_T_NONE; } static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, @@ -8653,7 +8647,23 @@ static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, return retval; } -static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) +static const struct iomap_ops btrfs_dio_iomap_ops = { + .iomap_begin = btrfs_dio_iomap_begin, +}; + +static const struct iomap_dio_ops btrfs_dops = { + .submit_io = btrfs_submit_direct, +}; + + +/* + * btrfs_direct_IO - perform direct I/O + * inode->i_rwsem must be locked before calling this function, shared or exclusive. + * @iocb - kernel iocb + * @iter - iter to/from data is copied + */ + +ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; @@ -8662,28 +8672,15 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) struct extent_changeset *data_reserved = NULL; loff_t offset = iocb->ki_pos; size_t count = 0; - int flags = 0; - bool wakeup = true; bool relock = false; ssize_t ret; + lockdep_assert_held(&inode->i_rwsem); + if (check_direct_IO(fs_info, iter, offset)) return 0; - inode_dio_begin(inode); - - /* - * The generic stuff only does filemap_write_and_wait_range, which - * isn't enough if we've written compressed pages to this area, so - * we need to flush the dirty pages again to make absolutely sure - * that any outstanding dirty pages are on disk. - */ count = iov_iter_count(iter); - if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT, - &BTRFS_I(inode)->runtime_flags)) - filemap_fdatawrite_range(inode->i_mapping, offset, - offset + count - 1); - if (iov_iter_rw(iter) == WRITE) { /* * If the write DIO is beyond the EOF, we need update @@ -8714,17 +8711,11 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) dio_data.unsubmitted_oe_range_end = (u64)offset; current->journal_info = &dio_data; down_read(&BTRFS_I(inode)->dio_sem); - } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK, - &BTRFS_I(inode)->runtime_flags)) { - inode_dio_end(inode); - flags = DIO_LOCKING | DIO_SKIP_HOLES; - wakeup = false; } - ret = __blockdev_direct_IO(iocb, inode, - fs_info->fs_devices->latest_bdev, - iter, btrfs_get_blocks_direct, NULL, - btrfs_submit_direct, flags); + ret = iomap_dio_rw(iocb, iter, &btrfs_dio_iomap_ops, &btrfs_dops, + is_sync_kiocb(iocb)); + if (iov_iter_rw(iter) == WRITE) { up_read(&BTRFS_I(inode)->dio_sem); current->journal_info = NULL; @@ -8751,11 +8742,8 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) btrfs_delalloc_release_extents(BTRFS_I(inode), count); } out: - if (wakeup) - inode_dio_end(inode); if (relock) inode_lock(inode); - extent_changeset_free(data_reserved); return ret; } @@ -11045,7 +11033,7 @@ static const struct address_space_operations btrfs_aops = { .writepage = btrfs_writepage, .writepages = btrfs_writepages, .readpages = btrfs_readpages, - .direct_IO = btrfs_direct_IO, + .direct_IO = noop_direct_IO, .invalidatepage = btrfs_invalidatepage, .releasepage = btrfs_releasepage, .set_page_dirty = btrfs_set_page_dirty,