@@ -1759,12 +1759,39 @@ static void prepare_eb_write(struct extent_buffer *eb)
}
}
-static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
- struct writeback_control *wbc)
+static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
+ struct writeback_control *wbc,
+ struct extent_buffer **eb_context)
{
struct btrfs_fs_info *fs_info = eb->fs_info;
+ struct btrfs_block_group *zoned_bg = NULL;
struct btrfs_bio *bbio;
+ if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &zoned_bg)) {
+ /*
+ * If for_sync, this hole will be filled with
+ * trasnsaction commit.
+ */
+ if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
+ return -EAGAIN;
+ return 0;
+ }
+
+ if (eb_context)
+ *eb_context = eb;
+
+ if (!lock_extent_buffer_for_io(eb, wbc)) {
+ btrfs_revert_meta_write_pointer(zoned_bg, eb);
+ return 0;
+ }
+
+ /*
+ * A non-NULL zoned_bg implies zoned mode and that we are writing the
+ * last possible block in the zone.
+ */
+ if (zoned_bg)
+ btrfs_schedule_zone_finish_bg(zoned_bg, eb);
+
prepare_eb_write(eb);
bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES,
@@ -1801,6 +1828,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb,
}
}
btrfs_submit_bio(bbio, 0);
+ return 1;
}
/*
@@ -1868,11 +1896,8 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc)
*/
if (!eb)
continue;
-
- if (lock_extent_buffer_for_io(eb, wbc)) {
- write_one_eb(eb, wbc);
+ if (write_one_eb(eb, wbc, NULL) > 0)
submitted++;
- }
free_extent_buffer(eb);
}
return submitted;
@@ -1902,7 +1927,6 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
struct extent_buffer **eb_context)
{
struct address_space *mapping = page->mapping;
- struct btrfs_block_group *cache = NULL;
struct extent_buffer *eb;
int ret;
@@ -1937,36 +1961,9 @@ static int submit_eb_page(struct page *page, struct writeback_control *wbc,
spin_unlock(&mapping->private_lock);
if (!ret)
return 0;
-
- if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
- /*
- * If for_sync, this hole will be filled with
- * trasnsaction commit.
- */
- if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
- ret = -EAGAIN;
- else
- ret = 0;
- free_extent_buffer(eb);
- return ret;
- }
-
- *eb_context = eb;
-
- if (!lock_extent_buffer_for_io(eb, wbc)) {
- btrfs_revert_meta_write_pointer(cache, eb);
- free_extent_buffer(eb);
- return 0;
- }
- if (cache) {
- /*
- * Implies write in zoned mode. Mark the last eb in a block group.
- */
- btrfs_schedule_zone_finish_bg(cache, eb);
- }
- write_one_eb(eb, wbc);
+ ret = write_one_eb(eb, wbc, eb_context);
free_extent_buffer(eb);
- return 1;
+ return ret;
}
int btree_write_cache_pages(struct address_space *mapping,
Currently only submit_eb_page contains handling for checking against the write pointer on zoned devices. This is ok as there is no support for block size < PAGE_SIZE with zoned devices at the moment, but prevents us from easily adding new callers of write_one_eb without breaking zoned device support. Move the call tolock_extent_buffer_for_io as well as the write pointer checking and related code into write_one_eb. To be able to do this, write_one_eb now needs to set the eb_context pointer if the caller passed in a non-NULL argument. Signed-off-by: Christoph Hellwig <hch@lst.de> --- fs/btrfs/extent_io.c | 69 +++++++++++++++++++++----------------------- 1 file changed, 33 insertions(+), 36 deletions(-)