@@ -43,8 +43,8 @@ static inline struct iomap_page *to_iomap_page(struct folio *folio)
static struct bio_set iomap_ioend_bioset;
-static struct iomap_page *
-iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags)
+static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio,
+ unsigned int flags)
{
struct iomap_page *iop = to_iomap_page(folio);
unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
@@ -69,9 +69,9 @@ iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags)
return iop;
}
-static void iomap_page_release(struct folio *folio)
+static void iop_free(struct folio *folio)
{
- struct iomap_page *iop = folio_detach_private(folio);
+ struct iomap_page *iop = to_iomap_page(folio);
struct inode *inode = folio->mapping->host;
unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
@@ -81,6 +81,7 @@ static void iomap_page_release(struct folio *folio)
WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
folio_test_uptodate(folio));
+ folio_detach_private(folio);
kfree(iop);
}
@@ -231,7 +232,7 @@ static int iomap_read_inline_data(const struct iomap_iter *iter,
if (WARN_ON_ONCE(size > iomap->length))
return -EIO;
if (offset > 0)
- iop = iomap_page_create(iter->inode, folio, iter->flags);
+ iop = iop_alloc(iter->inode, folio, iter->flags);
else
iop = to_iomap_page(folio);
@@ -269,7 +270,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
return iomap_read_inline_data(iter, folio);
/* zero post-eof blocks as the page may be mapped */
- iop = iomap_page_create(iter->inode, folio, iter->flags);
+ iop = iop_alloc(iter->inode, folio, iter->flags);
iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
if (plen == 0)
goto done;
@@ -497,7 +498,7 @@ bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
*/
if (folio_test_dirty(folio) || folio_test_writeback(folio))
return false;
- iomap_page_release(folio);
+ iop_free(folio);
return true;
}
EXPORT_SYMBOL_GPL(iomap_release_folio);
@@ -514,12 +515,12 @@ void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
if (offset == 0 && len == folio_size(folio)) {
WARN_ON_ONCE(folio_test_writeback(folio));
folio_cancel_dirty(folio);
- iomap_page_release(folio);
+ iop_free(folio);
} else if (folio_test_large(folio)) {
/* Must release the iop so the page can be split */
WARN_ON_ONCE(!folio_test_uptodate(folio) &&
folio_test_dirty(folio));
- iomap_page_release(folio);
+ iop_free(folio);
}
}
EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
@@ -566,7 +567,8 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
return 0;
folio_clear_error(folio);
- iop = iomap_page_create(iter->inode, folio, iter->flags);
+ iop = iop_alloc(iter->inode, folio, iter->flags);
+
if ((iter->flags & IOMAP_NOWAIT) && !iop && nr_blocks > 1)
return -EAGAIN;
@@ -1619,7 +1621,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct inode *inode,
struct folio *folio, u64 end_pos)
{
- struct iomap_page *iop = iomap_page_create(inode, folio, 0);
+ struct iomap_page *iop = iop_alloc(inode, folio, 0);
struct iomap_ioend *ioend, *next;
unsigned len = i_blocksize(inode);
unsigned nblocks = i_blocks_per_folio(inode, folio);
This patch renames the iomap_page_create/release() functions to iop_alloc/free() calls. Later patches adds more functions for handling iop structure with iop_** naming conventions. Hence iop_alloc/free() makes more sense. Note, this patch also move folio_detach_private() to happen later after checking for bitmap_full(). This is just another small refactor because in later patches we will move bitmap_** helpers to iop_** related helpers which will only take a folio and hence we should move folio_detach_private() to the end before calling kfree(iop). Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> --- fs/iomap/buffered-io.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-)