diff mbox series

[06/12] mm/truncate: add folio_unmap_invalidate() helper

Message ID 20241203153232.92224-8-axboe@kernel.dk (mailing list archive)
State New
Headers show
Series Uncached buffered IO | expand

Commit Message

Jens Axboe Dec. 3, 2024, 3:31 p.m. UTC
Add a folio_unmap_invalidate() helper, which unmaps and invalidates a
given folio. The caller must already have locked the folio. Use this
new helper in invalidate_inode_pages2_range(), rather than duplicate
the code there.

In preparation for using this elsewhere as well, have it take a gfp_t
mask rather than assume GFP_KERNEL is the right choice. This bubbles
back to invalidate_complete_folio2() as well.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 include/linux/pagemap.h |  2 ++
 mm/truncate.c           | 35 ++++++++++++++++++++++-------------
 2 files changed, 24 insertions(+), 13 deletions(-)

Comments

Christoph Hellwig Dec. 10, 2024, 11:21 a.m. UTC | #1
On Tue, Dec 03, 2024 at 08:31:42AM -0700, Jens Axboe wrote:
> Add a folio_unmap_invalidate() helper, which unmaps and invalidates a
> given folio. The caller must already have locked the folio. Use this
> new helper in invalidate_inode_pages2_range(), rather than duplicate
> the code there.

This new helper ends up the only caller of invalidate_complete_folio2,
so you might as well merge the two instead of having yet another
invalidate/unmap helper, which are getting impossible to track of.

Also it is only used in mm/, so add the prototype to mm/internal.h
insead of the public pagemap.h.  And a little comment what the function
does would be pretty useful as well.

> In preparation for using this elsewhere as well, have it take a gfp_t
> mask rather than assume GFP_KERNEL is the right choice. This bubbles
> back to invalidate_complete_folio2() as well.

Looking at the callers the gfp_t looks a bit odd to me, as it is
either GFP_KERNEL or 0 which is a valid but rather unusuable gfp_t
value, but I guess this comes form filemap_release_folio which
works similarly.
diff mbox series

Patch

diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 72b03b37c265..f2d49dccb7c1 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -34,6 +34,8 @@  int kiocb_invalidate_pages(struct kiocb *iocb, size_t count);
 void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count);
 int filemap_invalidate_pages(struct address_space *mapping,
 			     loff_t pos, loff_t end, bool nowait);
+int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
+			   gfp_t gfp);
 
 int write_inode_now(struct inode *, int sync);
 int filemap_fdatawrite(struct address_space *);
diff --git a/mm/truncate.c b/mm/truncate.c
index 7c304d2f0052..c1dfddb1122a 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -533,12 +533,12 @@  EXPORT_SYMBOL(invalidate_mapping_pages);
  * sitting in the folio_add_lru() caches.
  */
 static int invalidate_complete_folio2(struct address_space *mapping,
-					struct folio *folio)
+				      struct folio *folio, gfp_t gfp_mask)
 {
 	if (folio->mapping != mapping)
 		return 0;
 
-	if (!filemap_release_folio(folio, GFP_KERNEL))
+	if (!filemap_release_folio(folio, gfp_mask))
 		return 0;
 
 	spin_lock(&mapping->host->i_lock);
@@ -570,6 +570,25 @@  static int folio_launder(struct address_space *mapping, struct folio *folio)
 	return mapping->a_ops->launder_folio(folio);
 }
 
+int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
+			   gfp_t gfp)
+{
+	int ret;
+
+	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
+
+	if (folio_test_dirty(folio))
+		return 0;
+	if (folio_mapped(folio))
+		unmap_mapping_folio(folio);
+	BUG_ON(folio_mapped(folio));
+
+	ret = folio_launder(mapping, folio);
+	if (!ret && !invalidate_complete_folio2(mapping, folio, gfp))
+		return -EBUSY;
+	return ret;
+}
+
 /**
  * invalidate_inode_pages2_range - remove range of pages from an address_space
  * @mapping: the address_space
@@ -629,18 +648,8 @@  int invalidate_inode_pages2_range(struct address_space *mapping,
 				folio_unlock(folio);
 				continue;
 			}
-			VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio);
 			folio_wait_writeback(folio);
-
-			if (folio_mapped(folio))
-				unmap_mapping_folio(folio);
-			BUG_ON(folio_mapped(folio));
-
-			ret2 = folio_launder(mapping, folio);
-			if (ret2 == 0) {
-				if (!invalidate_complete_folio2(mapping, folio))
-					ret2 = -EBUSY;
-			}
+			ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL);
 			if (ret2 < 0)
 				ret = ret2;
 			folio_unlock(folio);