Message ID | d06abc56a48e3ac7d8c0619fee57506f36fcca5b.1685962158.git.ritesh.list@gmail.com (mailing list archive) |
---|---|
State | Under Review |
Headers | show |
Series | iomap: Add support for per-block dirty state to improve write performance | expand |
On Mon, Jun 05, 2023 at 04:25:01PM +0530, Ritesh Harjani (IBM) wrote: > This patch renames the iomap_page_create/release() functions to > iomap_iop_alloc/free() calls. Later patches adds more functions for > handling iop structure with iomap_iop_** naming conventions. > Hence iomap_iop_alloc/free() makes more sense to be consistent with all > APIs. > > Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> > --- > fs/iomap/buffered-io.c | 21 +++++++++++---------- > 1 file changed, 11 insertions(+), 10 deletions(-) > > diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c > index 063133ec77f4..4567bdd4fff9 100644 > --- a/fs/iomap/buffered-io.c > +++ b/fs/iomap/buffered-io.c > @@ -43,8 +43,8 @@ static inline struct iomap_page *to_iomap_page(struct folio *folio) > > static struct bio_set iomap_ioend_bioset; > > -static struct iomap_page * > -iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags) > +static struct iomap_page *iomap_iop_alloc(struct inode *inode, Personally I preferred iop_alloc, but as I wasn't around to make to that point during the v6 review I'll let this slide. iomap_iop_* it is. (I invoke maintainer privilege and will rename the structure to iomap_folio and iop->iof since the objects no longer track /only/ a single page state.) Reviewed-by: Darrick J. Wong <djwong@kernel.org> --D > + struct folio *folio, unsigned int flags) > { > struct iomap_page *iop = to_iomap_page(folio); > unsigned int nr_blocks = i_blocks_per_folio(inode, folio); > @@ -69,7 +69,7 @@ iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags) > return iop; > } > > -static void iomap_page_release(struct folio *folio) > +static void iomap_iop_free(struct folio *folio) > { > struct iomap_page *iop = folio_detach_private(folio); > struct inode *inode = folio->mapping->host; > @@ -231,7 +231,7 @@ static int iomap_read_inline_data(const struct iomap_iter *iter, > if (WARN_ON_ONCE(size > iomap->length)) > return -EIO; > if (offset > 0) > - iop = iomap_page_create(iter->inode, folio, iter->flags); > + iop = iomap_iop_alloc(iter->inode, folio, iter->flags); > else > iop = to_iomap_page(folio); > > @@ -269,7 +269,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, > return iomap_read_inline_data(iter, folio); > > /* zero post-eof blocks as the page may be mapped */ > - iop = iomap_page_create(iter->inode, folio, iter->flags); > + iop = iomap_iop_alloc(iter->inode, folio, iter->flags); > iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); > if (plen == 0) > goto done; > @@ -490,7 +490,7 @@ bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) > */ > if (folio_test_dirty(folio) || folio_test_writeback(folio)) > return false; > - iomap_page_release(folio); > + iomap_iop_free(folio); > return true; > } > EXPORT_SYMBOL_GPL(iomap_release_folio); > @@ -507,12 +507,12 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) > if (offset == 0 && len == folio_size(folio)) { > WARN_ON_ONCE(folio_test_writeback(folio)); > folio_cancel_dirty(folio); > - iomap_page_release(folio); > + iomap_iop_free(folio); > } else if (folio_test_large(folio)) { > /* Must release the iop so the page can be split */ > WARN_ON_ONCE(!folio_test_uptodate(folio) && > folio_test_dirty(folio)); > - iomap_page_release(folio); > + iomap_iop_free(folio); > } > } > EXPORT_SYMBOL_GPL(iomap_invalidate_folio); > @@ -559,7 +559,8 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, > return 0; > folio_clear_error(folio); > > - iop = iomap_page_create(iter->inode, folio, iter->flags); > + iop = iomap_iop_alloc(iter->inode, folio, iter->flags); > + > if ((iter->flags & IOMAP_NOWAIT) && !iop && nr_blocks > 1) > return -EAGAIN; > > @@ -1612,7 +1613,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, > struct writeback_control *wbc, struct inode *inode, > struct folio *folio, u64 end_pos) > { > - struct iomap_page *iop = iomap_page_create(inode, folio, 0); > + struct iomap_page *iop = iomap_iop_alloc(inode, folio, 0); > struct iomap_ioend *ioend, *next; > unsigned len = i_blocksize(inode); > unsigned nblocks = i_blocks_per_folio(inode, folio); > -- > 2.40.1 >
"Darrick J. Wong" <djwong@kernel.org> writes: > On Mon, Jun 05, 2023 at 04:25:01PM +0530, Ritesh Harjani (IBM) wrote: >> This patch renames the iomap_page_create/release() functions to >> iomap_iop_alloc/free() calls. Later patches adds more functions for >> handling iop structure with iomap_iop_** naming conventions. >> Hence iomap_iop_alloc/free() makes more sense to be consistent with all >> APIs. >> >> Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> >> --- >> fs/iomap/buffered-io.c | 21 +++++++++++---------- >> 1 file changed, 11 insertions(+), 10 deletions(-) >> >> diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c >> index 063133ec77f4..4567bdd4fff9 100644 >> --- a/fs/iomap/buffered-io.c >> +++ b/fs/iomap/buffered-io.c >> @@ -43,8 +43,8 @@ static inline struct iomap_page *to_iomap_page(struct folio *folio) >> >> static struct bio_set iomap_ioend_bioset; >> >> -static struct iomap_page * >> -iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags) >> +static struct iomap_page *iomap_iop_alloc(struct inode *inode, > > Personally I preferred iop_alloc, but as I wasn't around to make to that > point during the v6 review I'll let this slide. iomap_iop_* it is. > > (I invoke maintainer privilege and will rename the structure to > iomap_folio and iop->iof since the objects no longer track /only/ a > single page state.) Darrick, Do you want me to rename iomap_page -> iomap_folio in this patch itself or would you rather prefer the renaming of iomap_page -> iomap_folio and iop -> iof as a separate last patch in the series? > > Reviewed-by: Darrick J. Wong <djwong@kernel.org> > Thanks! -ritesh
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 063133ec77f4..4567bdd4fff9 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -43,8 +43,8 @@ static inline struct iomap_page *to_iomap_page(struct folio *folio) static struct bio_set iomap_ioend_bioset; -static struct iomap_page * -iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags) +static struct iomap_page *iomap_iop_alloc(struct inode *inode, + struct folio *folio, unsigned int flags) { struct iomap_page *iop = to_iomap_page(folio); unsigned int nr_blocks = i_blocks_per_folio(inode, folio); @@ -69,7 +69,7 @@ iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags) return iop; } -static void iomap_page_release(struct folio *folio) +static void iomap_iop_free(struct folio *folio) { struct iomap_page *iop = folio_detach_private(folio); struct inode *inode = folio->mapping->host; @@ -231,7 +231,7 @@ static int iomap_read_inline_data(const struct iomap_iter *iter, if (WARN_ON_ONCE(size > iomap->length)) return -EIO; if (offset > 0) - iop = iomap_page_create(iter->inode, folio, iter->flags); + iop = iomap_iop_alloc(iter->inode, folio, iter->flags); else iop = to_iomap_page(folio); @@ -269,7 +269,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter, return iomap_read_inline_data(iter, folio); /* zero post-eof blocks as the page may be mapped */ - iop = iomap_page_create(iter->inode, folio, iter->flags); + iop = iomap_iop_alloc(iter->inode, folio, iter->flags); iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen); if (plen == 0) goto done; @@ -490,7 +490,7 @@ bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags) */ if (folio_test_dirty(folio) || folio_test_writeback(folio)) return false; - iomap_page_release(folio); + iomap_iop_free(folio); return true; } EXPORT_SYMBOL_GPL(iomap_release_folio); @@ -507,12 +507,12 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len) if (offset == 0 && len == folio_size(folio)) { WARN_ON_ONCE(folio_test_writeback(folio)); folio_cancel_dirty(folio); - iomap_page_release(folio); + iomap_iop_free(folio); } else if (folio_test_large(folio)) { /* Must release the iop so the page can be split */ WARN_ON_ONCE(!folio_test_uptodate(folio) && folio_test_dirty(folio)); - iomap_page_release(folio); + iomap_iop_free(folio); } } EXPORT_SYMBOL_GPL(iomap_invalidate_folio); @@ -559,7 +559,8 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos, return 0; folio_clear_error(folio); - iop = iomap_page_create(iter->inode, folio, iter->flags); + iop = iomap_iop_alloc(iter->inode, folio, iter->flags); + if ((iter->flags & IOMAP_NOWAIT) && !iop && nr_blocks > 1) return -EAGAIN; @@ -1612,7 +1613,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, struct writeback_control *wbc, struct inode *inode, struct folio *folio, u64 end_pos) { - struct iomap_page *iop = iomap_page_create(inode, folio, 0); + struct iomap_page *iop = iomap_iop_alloc(inode, folio, 0); struct iomap_ioend *ioend, *next; unsigned len = i_blocksize(inode); unsigned nblocks = i_blocks_per_folio(inode, folio);
This patch renames the iomap_page_create/release() functions to iomap_iop_alloc/free() calls. Later patches adds more functions for handling iop structure with iomap_iop_** naming conventions. Hence iomap_iop_alloc/free() makes more sense to be consistent with all APIs. Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> --- fs/iomap/buffered-io.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-)