Message ID | 20211101203929.954622-16-willy@infradead.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | iomap/xfs folio patches | expand |
Hi "Matthew, Thank you for the patch! Yet something to improve: [auto build test ERROR on hnaz-mm/master] [also build test ERROR on axboe-block/for-next linus/master next-20211101] [cannot apply to xfs-linux/for-next djwong-xfs/djwong-devel v5.15] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Matthew-Wilcox-Oracle/iomap-xfs-folio-patches/20211102-052926 base: https://github.com/hnaz/linux-mm master config: sparc64-randconfig-r035-20211101 (attached as .config) compiler: sparc64-linux-gcc (GCC) 11.2.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/0day-ci/linux/commit/b3cbfa38e55d041252c57ee712d1bbb146a4aee8 git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Matthew-Wilcox-Oracle/iomap-xfs-folio-patches/20211102-052926 git checkout b3cbfa38e55d041252c57ee712d1bbb146a4aee8 # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.2.0 make.cross ARCH=sparc64 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> All errors (new ones prefixed by >>): fs/iomap/buffered-io.c: In function '__iomap_write_end': >> fs/iomap/buffered-io.c:657:9: error: implicit declaration of function 'flush_dcache_folio'; did you mean 'flush_dcache_page'? [-Werror=implicit-function-declaration] 657 | flush_dcache_folio(folio); | ^~~~~~~~~~~~~~~~~~ | flush_dcache_page cc1: some warnings being treated as errors vim +657 fs/iomap/buffered-io.c 652 653 static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, 654 size_t copied, struct folio *folio) 655 { 656 struct iomap_page *iop = to_iomap_page(folio); > 657 flush_dcache_folio(folio); 658 659 /* 660 * The blocks that were entirely written will now be uptodate, so we 661 * don't have to worry about a readpage reading them and overwriting a 662 * partial write. However, if we've encountered a short write and only 663 * partially written into a block, it will not be marked uptodate, so a 664 * readpage might come in and destroy our partial write. 665 * 666 * Do the simplest thing and just treat any short write to a 667 * non-uptodate page as a zero-length write, and force the caller to 668 * redo the whole thing. 669 */ 670 if (unlikely(copied < len && !folio_test_uptodate(folio))) 671 return 0; 672 iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len); 673 filemap_dirty_folio(inode->i_mapping, folio); 674 return copied; 675 } 676 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Looks good,
Reviewed-by: Christoph Hellwig <hch@lst.de>
On Mon, Nov 01, 2021 at 08:39:23PM +0000, Matthew Wilcox (Oracle) wrote: > These functions still only work in PAGE_SIZE chunks, but there are > fewer conversions from tail to head pages as a result of this patch. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > --- > fs/iomap/buffered-io.c | 67 ++++++++++++++++++++++-------------------- > 1 file changed, 35 insertions(+), 32 deletions(-) > > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c > index b55d947867b1..6df8fdbb1951 100644 > --- a/fs/iomap/buffered-io.c > +++ b/fs/iomap/buffered-io.c > @@ -539,9 +539,8 @@ static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, > } > > static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, > - unsigned len, struct page *page) > + size_t len, struct folio *folio) > { > - struct folio *folio = page_folio(page); > const struct iomap *srcmap = iomap_iter_srcmap(iter); > struct iomap_page *iop = iomap_page_create(iter->inode, folio); > loff_t block_size = i_blocksize(iter->inode); > @@ -583,9 +582,8 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, > } > > static int iomap_write_begin_inline(const struct iomap_iter *iter, > - struct page *page) > + struct folio *folio) > { > - struct folio *folio = page_folio(page); > int ret; > > /* needs more work for the tailpacking case; disable for now */ > @@ -598,11 +596,13 @@ static int iomap_write_begin_inline(const struct iomap_iter *iter, > } > > static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, > - unsigned len, struct page **pagep) > + size_t len, struct folio **foliop) > { > const struct iomap_page_ops *page_ops = iter->iomap.page_ops; > const struct iomap *srcmap = iomap_iter_srcmap(iter); > + struct folio *folio; > struct page *page; > + unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS; > int status = 0; > > BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); > @@ -618,29 +618,30 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, > return status; > } > > - page = grab_cache_page_write_begin(iter->inode->i_mapping, > - pos >> PAGE_SHIFT, AOP_FLAG_NOFS); > - if (!page) { > + folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, > + fgp, mapping_gfp_mask(iter->inode->i_mapping)); > + if (!folio) { > status = -ENOMEM; > goto out_no_page; > } > > + page = folio_file_page(folio, pos >> PAGE_SHIFT); Isn't this only needed in the BUFFER_HEAD case? --D > if (srcmap->type == IOMAP_INLINE) > - status = iomap_write_begin_inline(iter, page); > + status = iomap_write_begin_inline(iter, folio); > else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) > status = __block_write_begin_int(page, pos, len, NULL, srcmap); > else > - status = __iomap_write_begin(iter, pos, len, page); > + status = __iomap_write_begin(iter, pos, len, folio); > > if (unlikely(status)) > goto out_unlock; > > - *pagep = page; > + *foliop = folio; > return 0; > > out_unlock: > - unlock_page(page); > - put_page(page); > + folio_unlock(folio); > + folio_put(folio); > iomap_write_failed(iter->inode, pos, len); > > out_no_page: > @@ -650,11 +651,10 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, > } > > static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, > - size_t copied, struct page *page) > + size_t copied, struct folio *folio) > { > - struct folio *folio = page_folio(page); > struct iomap_page *iop = to_iomap_page(folio); > - flush_dcache_page(page); > + flush_dcache_folio(folio); > > /* > * The blocks that were entirely written will now be uptodate, so we > @@ -667,10 +667,10 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, > * non-uptodate page as a zero-length write, and force the caller to > * redo the whole thing. > */ > - if (unlikely(copied < len && !PageUptodate(page))) > + if (unlikely(copied < len && !folio_test_uptodate(folio))) > return 0; > iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len); > - __set_page_dirty_nobuffers(page); > + filemap_dirty_folio(inode->i_mapping, folio); > return copied; > } > > @@ -694,8 +694,9 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter, > > /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ > static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, > - size_t copied, struct page *page) > + size_t copied, struct folio *folio) > { > + struct page *page = folio_file_page(folio, pos >> PAGE_SHIFT); > const struct iomap_page_ops *page_ops = iter->iomap.page_ops; > const struct iomap *srcmap = iomap_iter_srcmap(iter); > loff_t old_size = iter->inode->i_size; > @@ -707,7 +708,7 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, > ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, > copied, page, NULL); > } else { > - ret = __iomap_write_end(iter->inode, pos, len, copied, page); > + ret = __iomap_write_end(iter->inode, pos, len, copied, folio); > } > > /* > @@ -719,13 +720,13 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, > i_size_write(iter->inode, pos + ret); > iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; > } > - unlock_page(page); > + folio_unlock(folio); > > if (old_size < pos) > pagecache_isize_extended(iter->inode, old_size, pos); > if (page_ops && page_ops->page_done) > page_ops->page_done(iter->inode, pos, ret, page); > - put_page(page); > + folio_put(folio); > > if (ret < len) > iomap_write_failed(iter->inode, pos, len); > @@ -740,6 +741,7 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) > long status = 0; > > do { > + struct folio *folio; > struct page *page; > unsigned long offset; /* Offset into pagecache page */ > unsigned long bytes; /* Bytes to write to page */ > @@ -763,16 +765,17 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) > break; > } > > - status = iomap_write_begin(iter, pos, bytes, &page); > + status = iomap_write_begin(iter, pos, bytes, &folio); > if (unlikely(status)) > break; > > + page = folio_file_page(folio, pos >> PAGE_SHIFT); > if (mapping_writably_mapped(iter->inode->i_mapping)) > flush_dcache_page(page); > > copied = copy_page_from_iter_atomic(page, offset, bytes, i); > > - status = iomap_write_end(iter, pos, bytes, copied, page); > + status = iomap_write_end(iter, pos, bytes, copied, folio); > > if (unlikely(copied != status)) > iov_iter_revert(i, copied - status); > @@ -838,13 +841,13 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter) > do { > unsigned long offset = offset_in_page(pos); > unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length); > - struct page *page; > + struct folio *folio; > > - status = iomap_write_begin(iter, pos, bytes, &page); > + status = iomap_write_begin(iter, pos, bytes, &folio); > if (unlikely(status)) > return status; > > - status = iomap_write_end(iter, pos, bytes, bytes, page); > + status = iomap_write_end(iter, pos, bytes, bytes, folio); > if (WARN_ON_ONCE(status == 0)) > return -EIO; > > @@ -880,19 +883,19 @@ EXPORT_SYMBOL_GPL(iomap_file_unshare); > > static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length) > { > - struct page *page; > + struct folio *folio; > int status; > unsigned offset = offset_in_page(pos); > unsigned bytes = min_t(u64, PAGE_SIZE - offset, length); > > - status = iomap_write_begin(iter, pos, bytes, &page); > + status = iomap_write_begin(iter, pos, bytes, &folio); > if (status) > return status; > > - zero_user(page, offset, bytes); > - mark_page_accessed(page); > + zero_user(folio_file_page(folio, pos >> PAGE_SHIFT), offset, bytes); > + folio_mark_accessed(folio); > > - return iomap_write_end(iter, pos, bytes, bytes, page); > + return iomap_write_end(iter, pos, bytes, bytes, folio); > } > > static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero) > -- > 2.33.0 >
On Tue, Nov 02, 2021 at 04:22:15PM -0700, Darrick J. Wong wrote: > > + page = folio_file_page(folio, pos >> PAGE_SHIFT); > > Isn't this only needed in the BUFFER_HEAD case? Good catch. Want me to fold this in? +++ b/fs/iomap/buffered-io.c @@ -608,7 +608,6 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, const struct iomap_page_ops *page_ops = iter->iomap.page_ops; const struct iomap *srcmap = iomap_iter_srcmap(iter); struct folio *folio; - struct page *page; unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS; int status = 0; @@ -632,12 +631,12 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, goto out_no_page; } - page = folio_file_page(folio, pos >> PAGE_SHIFT); if (srcmap->type == IOMAP_INLINE) status = iomap_write_begin_inline(iter, folio); - else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) + else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { + struct page *page = folio_file_page(folio, pos >> PAGE_SHIFT); status = __block_write_begin_int(page, pos, len, NULL, srcmap); - else + } else status = __iomap_write_begin(iter, pos, len, folio); if (unlikely(status))
On Wed, Nov 03, 2021 at 03:15:13AM +0000, Matthew Wilcox wrote: > On Tue, Nov 02, 2021 at 04:22:15PM -0700, Darrick J. Wong wrote: > > > + page = folio_file_page(folio, pos >> PAGE_SHIFT); > > > > Isn't this only needed in the BUFFER_HEAD case? > > Good catch. Want me to fold this in? > > @@ -632,12 +631,12 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, > goto out_no_page; > } > > - page = folio_file_page(folio, pos >> PAGE_SHIFT); > if (srcmap->type == IOMAP_INLINE) > status = iomap_write_begin_inline(iter, folio); > - else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) > + else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) { > + struct page *page = folio_file_page(folio, pos >> PAGE_SHIFT); > status = __block_write_begin_int(page, pos, len, NULL, srcmap); On second thoughts, this is silly. __block_write_begin_int() doesn't want the precise page (because it constructs buffer_heads and attaches them to the passed-in page). I should just pass &folio->page here. And __block_write_begin_int() should be converted to take a folio at some point.
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index b55d947867b1..6df8fdbb1951 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -539,9 +539,8 @@ static int iomap_read_folio_sync(loff_t block_start, struct folio *folio, } static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, - unsigned len, struct page *page) + size_t len, struct folio *folio) { - struct folio *folio = page_folio(page); const struct iomap *srcmap = iomap_iter_srcmap(iter); struct iomap_page *iop = iomap_page_create(iter->inode, folio); loff_t block_size = i_blocksize(iter->inode); @@ -583,9 +582,8 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, } static int iomap_write_begin_inline(const struct iomap_iter *iter, - struct page *page) + struct folio *folio) { - struct folio *folio = page_folio(page); int ret; /* needs more work for the tailpacking case; disable for now */ @@ -598,11 +596,13 @@ static int iomap_write_begin_inline(const struct iomap_iter *iter, } static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, - unsigned len, struct page **pagep) + size_t len, struct folio **foliop) { const struct iomap_page_ops *page_ops = iter->iomap.page_ops; const struct iomap *srcmap = iomap_iter_srcmap(iter); + struct folio *folio; struct page *page; + unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS; int status = 0; BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length); @@ -618,29 +618,30 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, return status; } - page = grab_cache_page_write_begin(iter->inode->i_mapping, - pos >> PAGE_SHIFT, AOP_FLAG_NOFS); - if (!page) { + folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT, + fgp, mapping_gfp_mask(iter->inode->i_mapping)); + if (!folio) { status = -ENOMEM; goto out_no_page; } + page = folio_file_page(folio, pos >> PAGE_SHIFT); if (srcmap->type == IOMAP_INLINE) - status = iomap_write_begin_inline(iter, page); + status = iomap_write_begin_inline(iter, folio); else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) status = __block_write_begin_int(page, pos, len, NULL, srcmap); else - status = __iomap_write_begin(iter, pos, len, page); + status = __iomap_write_begin(iter, pos, len, folio); if (unlikely(status)) goto out_unlock; - *pagep = page; + *foliop = folio; return 0; out_unlock: - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); iomap_write_failed(iter->inode, pos, len); out_no_page: @@ -650,11 +651,10 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos, } static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, - size_t copied, struct page *page) + size_t copied, struct folio *folio) { - struct folio *folio = page_folio(page); struct iomap_page *iop = to_iomap_page(folio); - flush_dcache_page(page); + flush_dcache_folio(folio); /* * The blocks that were entirely written will now be uptodate, so we @@ -667,10 +667,10 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len, * non-uptodate page as a zero-length write, and force the caller to * redo the whole thing. */ - if (unlikely(copied < len && !PageUptodate(page))) + if (unlikely(copied < len && !folio_test_uptodate(folio))) return 0; iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len); - __set_page_dirty_nobuffers(page); + filemap_dirty_folio(inode->i_mapping, folio); return copied; } @@ -694,8 +694,9 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter, /* Returns the number of bytes copied. May be 0. Cannot be an errno. */ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, - size_t copied, struct page *page) + size_t copied, struct folio *folio) { + struct page *page = folio_file_page(folio, pos >> PAGE_SHIFT); const struct iomap_page_ops *page_ops = iter->iomap.page_ops; const struct iomap *srcmap = iomap_iter_srcmap(iter); loff_t old_size = iter->inode->i_size; @@ -707,7 +708,7 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, ret = block_write_end(NULL, iter->inode->i_mapping, pos, len, copied, page, NULL); } else { - ret = __iomap_write_end(iter->inode, pos, len, copied, page); + ret = __iomap_write_end(iter->inode, pos, len, copied, folio); } /* @@ -719,13 +720,13 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len, i_size_write(iter->inode, pos + ret); iter->iomap.flags |= IOMAP_F_SIZE_CHANGED; } - unlock_page(page); + folio_unlock(folio); if (old_size < pos) pagecache_isize_extended(iter->inode, old_size, pos); if (page_ops && page_ops->page_done) page_ops->page_done(iter->inode, pos, ret, page); - put_page(page); + folio_put(folio); if (ret < len) iomap_write_failed(iter->inode, pos, len); @@ -740,6 +741,7 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) long status = 0; do { + struct folio *folio; struct page *page; unsigned long offset; /* Offset into pagecache page */ unsigned long bytes; /* Bytes to write to page */ @@ -763,16 +765,17 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i) break; } - status = iomap_write_begin(iter, pos, bytes, &page); + status = iomap_write_begin(iter, pos, bytes, &folio); if (unlikely(status)) break; + page = folio_file_page(folio, pos >> PAGE_SHIFT); if (mapping_writably_mapped(iter->inode->i_mapping)) flush_dcache_page(page); copied = copy_page_from_iter_atomic(page, offset, bytes, i); - status = iomap_write_end(iter, pos, bytes, copied, page); + status = iomap_write_end(iter, pos, bytes, copied, folio); if (unlikely(copied != status)) iov_iter_revert(i, copied - status); @@ -838,13 +841,13 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter) do { unsigned long offset = offset_in_page(pos); unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length); - struct page *page; + struct folio *folio; - status = iomap_write_begin(iter, pos, bytes, &page); + status = iomap_write_begin(iter, pos, bytes, &folio); if (unlikely(status)) return status; - status = iomap_write_end(iter, pos, bytes, bytes, page); + status = iomap_write_end(iter, pos, bytes, bytes, folio); if (WARN_ON_ONCE(status == 0)) return -EIO; @@ -880,19 +883,19 @@ EXPORT_SYMBOL_GPL(iomap_file_unshare); static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length) { - struct page *page; + struct folio *folio; int status; unsigned offset = offset_in_page(pos); unsigned bytes = min_t(u64, PAGE_SIZE - offset, length); - status = iomap_write_begin(iter, pos, bytes, &page); + status = iomap_write_begin(iter, pos, bytes, &folio); if (status) return status; - zero_user(page, offset, bytes); - mark_page_accessed(page); + zero_user(folio_file_page(folio, pos >> PAGE_SHIFT), offset, bytes); + folio_mark_accessed(folio); - return iomap_write_end(iter, pos, bytes, bytes, page); + return iomap_write_end(iter, pos, bytes, bytes, folio); } static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
These functions still only work in PAGE_SIZE chunks, but there are fewer conversions from tail to head pages as a result of this patch. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- fs/iomap/buffered-io.c | 67 ++++++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 32 deletions(-)