diff mbox series

[4/6] btrfs: move dropping the bg reference out of submit_eb_page

Message ID 20230515192256.29006-5-hch@lst.de (mailing list archive)
State New, archived
Headers show
Series [1/6] btrfs: use a linked list for tracking per-transaction/log dirty buffers | expand

Commit Message

Christoph Hellwig May 15, 2023, 7:22 p.m. UTC
Instead of putting the cached bg for zoned metadata writes in
submit_eb_page, let the btrfs_revert_meta_write_pointer and
btrfs_schedule_zone_finish_bg helpers consume it.  This mirrors how the
reference to it is acquired in btrfs_check_meta_write_pointer and
isolated the extent_buffer writeback code from some of the zoned
device implementation details.  It also avoids a reference roundtrip
for the case where btrfs_schedule_zone_finish_bg actually schedules
a zone_finish command.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 fs/btrfs/extent_io.c | 3 ---
 fs/btrfs/zoned.c     | 7 +++++--
 2 files changed, 5 insertions(+), 5 deletions(-)
diff mbox series

Patch

diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d5937ed0962d38..1205b3a3147e7d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1955,8 +1955,6 @@  static int submit_eb_page(struct page *page, struct writeback_control *wbc,
 
 	if (!lock_extent_buffer_for_io(eb, wbc)) {
 		btrfs_revert_meta_write_pointer(cache, eb);
-		if (cache)
-			btrfs_put_block_group(cache);
 		free_extent_buffer(eb);
 		return 0;
 	}
@@ -1965,7 +1963,6 @@  static int submit_eb_page(struct page *page, struct writeback_control *wbc,
 		 * Implies write in zoned mode. Mark the last eb in a block group.
 		 */
 		btrfs_schedule_zone_finish_bg(cache, eb);
-		btrfs_put_block_group(cache);
 	}
 	write_one_eb(eb, wbc);
 	free_extent_buffer(eb);
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index e4b8134ab70166..eed96ec35052a0 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -1752,6 +1752,7 @@  void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
 
 	ASSERT(cache->meta_write_pointer == eb->start + eb->len);
 	cache->meta_write_pointer = eb->start;
+	btrfs_put_block_group(cache);
 }
 
 int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
@@ -2145,17 +2146,19 @@  void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
 				   struct extent_buffer *eb)
 {
 	if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) ||
-	    eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
+	    eb->start + eb->len * 2 <= bg->start + bg->zone_capacity) {
+		btrfs_put_block_group(bg);
 		return;
+	}
 
 	if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
 		btrfs_err(bg->fs_info, "double scheduling of bg %llu zone finishing",
 			  bg->start);
+		btrfs_put_block_group(bg);
 		return;
 	}
 
 	/* For the work */
-	btrfs_get_block_group(bg);
 	atomic_inc(&eb->refs);
 	bg->last_eb = eb;
 	INIT_WORK(&bg->zone_finish_work, btrfs_zone_finish_endio_workfn);