diff mbox series

[RFCv5,1/5] iomap: Rename iomap_page_create/release() to iop_alloc/free()

Message ID 03639dbe54a0a0ef2bd789f4e8318df22a4c5d12.1683485700.git.ritesh.list@gmail.com (mailing list archive)
State Mainlined, archived
Headers show
Series iomap: Add support for per-block dirty state to improve write performance | expand

Commit Message

Ritesh Harjani (IBM) May 7, 2023, 7:27 p.m. UTC
This patch renames the iomap_page_create/release() functions to
iop_alloc/free() calls. Later patches adds more functions for
handling iop structure with iop_** naming conventions.
Hence iop_alloc/free() makes more sense.

Note, this patch also move folio_detach_private() to happen later
after checking for bitmap_full(). This is just another small refactor
because in later patches we will move bitmap_** helpers to iop_** related
helpers which will only take a folio and hence we should move
folio_detach_private() to the end before calling kfree(iop).

Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
---
 fs/iomap/buffered-io.c | 24 +++++++++++++-----------
 1 file changed, 13 insertions(+), 11 deletions(-)

Comments

Christoph Hellwig May 18, 2023, 6:13 a.m. UTC | #1
On Mon, May 08, 2023 at 12:57:56AM +0530, Ritesh Harjani (IBM) wrote:
> This patch renames the iomap_page_create/release() functions to
> iop_alloc/free() calls. Later patches adds more functions for
> handling iop structure with iop_** naming conventions.
> Hence iop_alloc/free() makes more sense.

I can't say I like the iop_* naming all that much, especially as we
it is very generic and we use an iomap_ prefix every else.

> Note, this patch also move folio_detach_private() to happen later
> after checking for bitmap_full(). This is just another small refactor
> because in later patches we will move bitmap_** helpers to iop_** related
> helpers which will only take a folio and hence we should move
> folio_detach_private() to the end before calling kfree(iop).

Please don't mix renames and code movements.
Ritesh Harjani (IBM) May 19, 2023, 3:01 p.m. UTC | #2
Christoph Hellwig <hch@infradead.org> writes:

> On Mon, May 08, 2023 at 12:57:56AM +0530, Ritesh Harjani (IBM) wrote:
>> This patch renames the iomap_page_create/release() functions to
>> iop_alloc/free() calls. Later patches adds more functions for
>> handling iop structure with iop_** naming conventions.
>> Hence iop_alloc/free() makes more sense.
>
> I can't say I like the iop_* naming all that much, especially as we
> it is very generic and we use an iomap_ prefix every else.
>

I can prefix iomap_ so it will then become iomap_iop_alloc()/iomap_iop_free().
All other helpers will be like...
- iomap_iop_set_range_uptodate()
- iomap_iop_set_range_dirty()
- ...

'iomap_iop' prefix just helps distinguish all the APIs which are
working over iomap_page (iop) structure.

>> Note, this patch also move folio_detach_private() to happen later
>> after checking for bitmap_full(). This is just another small refactor
>> because in later patches we will move bitmap_** helpers to iop_** related
>> helpers which will only take a folio and hence we should move
>> folio_detach_private() to the end before calling kfree(iop).
>
> Please don't mix renames and code movements.

Sure. Will take care of it in the next rev.
diff mbox series

Patch

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 6f4c97a6d7e9..cbd945d96584 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -43,8 +43,8 @@  static inline struct iomap_page *to_iomap_page(struct folio *folio)
 
 static struct bio_set iomap_ioend_bioset;
 
-static struct iomap_page *
-iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags)
+static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio,
+				    unsigned int flags)
 {
 	struct iomap_page *iop = to_iomap_page(folio);
 	unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
@@ -69,9 +69,9 @@  iomap_page_create(struct inode *inode, struct folio *folio, unsigned int flags)
 	return iop;
 }
 
-static void iomap_page_release(struct folio *folio)
+static void iop_free(struct folio *folio)
 {
-	struct iomap_page *iop = folio_detach_private(folio);
+	struct iomap_page *iop = to_iomap_page(folio);
 	struct inode *inode = folio->mapping->host;
 	unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
 
@@ -81,6 +81,7 @@  static void iomap_page_release(struct folio *folio)
 	WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
 	WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
 			folio_test_uptodate(folio));
+	folio_detach_private(folio);
 	kfree(iop);
 }
 
@@ -231,7 +232,7 @@  static int iomap_read_inline_data(const struct iomap_iter *iter,
 	if (WARN_ON_ONCE(size > iomap->length))
 		return -EIO;
 	if (offset > 0)
-		iop = iomap_page_create(iter->inode, folio, iter->flags);
+		iop = iop_alloc(iter->inode, folio, iter->flags);
 	else
 		iop = to_iomap_page(folio);
 
@@ -269,7 +270,7 @@  static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
 		return iomap_read_inline_data(iter, folio);
 
 	/* zero post-eof blocks as the page may be mapped */
-	iop = iomap_page_create(iter->inode, folio, iter->flags);
+	iop = iop_alloc(iter->inode, folio, iter->flags);
 	iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
 	if (plen == 0)
 		goto done;
@@ -497,7 +498,7 @@  bool iomap_release_folio(struct folio *folio, gfp_t gfp_flags)
 	 */
 	if (folio_test_dirty(folio) || folio_test_writeback(folio))
 		return false;
-	iomap_page_release(folio);
+	iop_free(folio);
 	return true;
 }
 EXPORT_SYMBOL_GPL(iomap_release_folio);
@@ -514,12 +515,12 @@  void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
 	if (offset == 0 && len == folio_size(folio)) {
 		WARN_ON_ONCE(folio_test_writeback(folio));
 		folio_cancel_dirty(folio);
-		iomap_page_release(folio);
+		iop_free(folio);
 	} else if (folio_test_large(folio)) {
 		/* Must release the iop so the page can be split */
 		WARN_ON_ONCE(!folio_test_uptodate(folio) &&
 			     folio_test_dirty(folio));
-		iomap_page_release(folio);
+		iop_free(folio);
 	}
 }
 EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
@@ -566,7 +567,8 @@  static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
 		return 0;
 	folio_clear_error(folio);
 
-	iop = iomap_page_create(iter->inode, folio, iter->flags);
+	iop = iop_alloc(iter->inode, folio, iter->flags);
+
 	if ((iter->flags & IOMAP_NOWAIT) && !iop && nr_blocks > 1)
 		return -EAGAIN;
 
@@ -1619,7 +1621,7 @@  iomap_writepage_map(struct iomap_writepage_ctx *wpc,
 		struct writeback_control *wbc, struct inode *inode,
 		struct folio *folio, u64 end_pos)
 {
-	struct iomap_page *iop = iomap_page_create(inode, folio, 0);
+	struct iomap_page *iop = iop_alloc(inode, folio, 0);
 	struct iomap_ioend *ioend, *next;
 	unsigned len = i_blocksize(inode);
 	unsigned nblocks = i_blocks_per_folio(inode, folio);