diff mbox series

[5/8] btrfs: simplify btrfs_clear_buffer_dirty()

Message ID 9d46f48d978dd85977e3e67bcfc74574ac448333.1738127135.git.wqu@suse.com (mailing list archive)
State New
Headers show
Series btrfs: separate/simplify/unify subpage handling | expand

Commit Message

Qu Wenruo Jan. 29, 2025, 7:37 a.m. UTC
The function btrfs_clear_buffer_dirty() is called on dirty extent buffer
that will not be written back.

In that case, we call btrfs_clear_buffer_dirty() to manually clear the
PAGECACHE_TAG_DIRTY flag.

But PAGECACHE_TAG_DIRTY is normally cleared by folio_start_writeback()
if the page is no longer dirty.
And for data folios if we need to clear dirty flag for similar folios,
we just call folio_start_writeback() then followed by
folio_end_writeback() immediately.

So here we can simplify the function by:

- Use the newly introduced btrfs_meta_folio_clear_dirty() helper
  So we do not need to handle subpage metadata separately.

- Call btrfs_meta_folio_set/clear_writeback() to clear PAGECACHE_TAG_DIRTY
  Instead of manually clear the tag for the folio.

- Update the comment inside set_extent_buffer_dirty()
  As there is no separate clear_subpage_extent_buffer_dirty() anymore.

Signed-off-by: Qu Wenruo <wqu@suse.com>
---
 fs/btrfs/extent_io.c | 45 ++++++++++----------------------------------
 1 file changed, 10 insertions(+), 35 deletions(-)
diff mbox series

Patch

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8da1da43aa74..e4261dce2e31 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3319,38 +3319,11 @@  void free_extent_buffer_stale(struct extent_buffer *eb)
 	release_extent_buffer(eb);
 }
 
-static void btree_clear_folio_dirty(struct folio *folio)
-{
-	ASSERT(folio_test_dirty(folio));
-	ASSERT(folio_test_locked(folio));
-	folio_clear_dirty_for_io(folio);
-	xa_lock_irq(&folio->mapping->i_pages);
-	if (!folio_test_dirty(folio))
-		__xa_clear_mark(&folio->mapping->i_pages,
-				folio_index(folio), PAGECACHE_TAG_DIRTY);
-	xa_unlock_irq(&folio->mapping->i_pages);
-}
-
-static void clear_subpage_extent_buffer_dirty(const struct extent_buffer *eb)
-{
-	struct btrfs_fs_info *fs_info = eb->fs_info;
-	struct folio *folio = eb->folios[0];
-	bool last;
-
-	/* btree_clear_folio_dirty() needs page locked. */
-	folio_lock(folio);
-	last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
-	if (last)
-		btree_clear_folio_dirty(folio);
-	folio_unlock(folio);
-	WARN_ON(atomic_read(&eb->refs) == 0);
-}
-
 void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
 			      struct extent_buffer *eb)
 {
 	struct btrfs_fs_info *fs_info = eb->fs_info;
-	int num_folios;
+	const int num_folios = num_extent_folios(eb);
 
 	btrfs_assert_tree_write_locked(eb);
 
@@ -3377,17 +3350,19 @@  void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans,
 	percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len,
 				 fs_info->dirty_metadata_batch);
 
-	if (btrfs_meta_is_subpage(fs_info))
-		return clear_subpage_extent_buffer_dirty(eb);
-
-	num_folios = num_extent_folios(eb);
 	for (int i = 0; i < num_folios; i++) {
 		struct folio *folio = eb->folios[i];
 
 		if (!folio_test_dirty(folio))
 			continue;
 		folio_lock(folio);
-		btree_clear_folio_dirty(folio);
+		btrfs_meta_folio_clear_dirty(fs_info, folio, eb->start, eb->len);
+		/*
+		 * The set and clear writeback is to properly clear
+		 * PAGECACHE_TAG_DIRTY.
+		 */
+		btrfs_meta_folio_set_writeback(fs_info, folio, eb->start, eb->len);
+		btrfs_meta_folio_clear_writeback(fs_info, folio, eb->start, eb->len);
 		folio_unlock(folio);
 	}
 	WARN_ON(atomic_read(&eb->refs) == 0);
@@ -3412,12 +3387,12 @@  void set_extent_buffer_dirty(struct extent_buffer *eb)
 
 		/*
 		 * For subpage case, we can have other extent buffers in the
-		 * same page, and in clear_subpage_extent_buffer_dirty() we
+		 * same page, and in clear_extent_buffer_dirty() we
 		 * have to clear page dirty without subpage lock held.
 		 * This can cause race where our page gets dirty cleared after
 		 * we just set it.
 		 *
-		 * Thankfully, clear_subpage_extent_buffer_dirty() has locked
+		 * Thankfully, clear_extent_buffer_dirty() has locked
 		 * its page for other reasons, we can use page lock to prevent
 		 * the above race.
 		 */