@@ -1788,7 +1788,6 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
{
struct writeback_control *wbc = ctx->wbc;
struct address_space *mapping = page->mapping;
- struct btrfs_block_group *cache = NULL;
struct extent_buffer *eb;
int ret;
@@ -1826,7 +1825,7 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
ctx->eb = eb;
- if (!btrfs_check_meta_write_pointer(eb->fs_info, eb, &cache)) {
+ if (!btrfs_check_meta_write_pointer(eb->fs_info, ctx)) {
/*
* If for_sync, this hole will be filled with
* trasnsaction commit.
@@ -1840,18 +1839,15 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
}
if (!lock_extent_buffer_for_io(eb, wbc)) {
- btrfs_revert_meta_write_pointer(cache, eb);
- if (cache)
- btrfs_put_block_group(cache);
+ btrfs_revert_meta_write_pointer(ctx->block_group, eb);
free_extent_buffer(eb);
return 0;
}
- if (cache) {
+ if (ctx->block_group) {
/*
* Implies write in zoned mode. Mark the last eb in a block group.
*/
- btrfs_schedule_zone_finish_bg(cache, eb);
- btrfs_put_block_group(cache);
+ btrfs_schedule_zone_finish_bg(ctx->block_group, eb);
}
write_one_eb(eb, wbc);
free_extent_buffer(eb);
@@ -1864,6 +1860,7 @@ int btree_write_cache_pages(struct address_space *mapping,
struct btrfs_eb_write_context ctx = {
.wbc = wbc,
.eb = NULL,
+ .block_group = NULL,
};
struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info;
int ret = 0;
@@ -1967,6 +1964,9 @@ int btree_write_cache_pages(struct address_space *mapping,
ret = 0;
if (!ret && BTRFS_FS_ERROR(fs_info))
ret = -EROFS;
+
+ if (ctx.block_group)
+ btrfs_put_block_group(ctx.block_group);
btrfs_zoned_meta_io_unlock(fs_info);
return ret;
}
@@ -96,6 +96,7 @@ struct extent_buffer {
struct btrfs_eb_write_context {
struct writeback_control *wbc;
struct extent_buffer *eb;
+ struct btrfs_block_group *block_group;
};
/*
@@ -1748,30 +1748,35 @@ void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
}
bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb,
- struct btrfs_block_group **cache_ret)
+ struct btrfs_eb_write_context *ctx)
{
- struct btrfs_block_group *cache;
- bool ret = true;
+ const struct extent_buffer *eb = ctx->eb;
+ struct btrfs_block_group *block_group = ctx->block_group;
if (!btrfs_is_zoned(fs_info))
return true;
- cache = btrfs_lookup_block_group(fs_info, eb->start);
- if (!cache)
- return true;
+ if (block_group) {
+ if (block_group->start > eb->start ||
+ block_group->start + block_group->length <= eb->start) {
+ btrfs_put_block_group(block_group);
+ block_group = NULL;
+ ctx->block_group = NULL;
+ }
+ }
- if (cache->meta_write_pointer != eb->start) {
- btrfs_put_block_group(cache);
- cache = NULL;
- ret = false;
- } else {
- cache->meta_write_pointer = eb->start + eb->len;
+ if (!block_group) {
+ block_group = btrfs_lookup_block_group(fs_info, eb->start);
+ if (!block_group)
+ return true;
+ ctx->block_group = block_group;
}
- *cache_ret = cache;
+ if (block_group->meta_write_pointer != eb->start)
+ return false;
+ block_group->meta_write_pointer = eb->start + eb->len;
- return ret;
+ return true;
}
void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
@@ -59,8 +59,7 @@ void btrfs_redirty_list_add(struct btrfs_transaction *trans,
bool btrfs_use_zone_append(struct btrfs_bio *bbio);
void btrfs_record_physical_zoned(struct btrfs_bio *bbio);
bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb,
- struct btrfs_block_group **cache_ret);
+ struct btrfs_eb_write_context *ctx);
void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
struct extent_buffer *eb);
int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length);
@@ -190,8 +189,7 @@ static inline void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
}
static inline bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
- struct extent_buffer *eb,
- struct btrfs_block_group **cache_ret)
+ struct btrfs_eb_write_context *ctx)
{
return true;
}
For metadata write out on the zoned mode, we call btrfs_check_meta_write_pointer() to check if an extent buffer to be written is aligned to the write pointer. We lookup for a block group containing the extent buffer for every extent buffer, which take unnecessary effort as the writing extent buffers are mostly contiguous. Introduce "block_group" to cache the block group working on. Also, while at it, rename "cache" to "block_group". Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> --- fs/btrfs/extent_io.c | 16 ++++++++-------- fs/btrfs/extent_io.h | 1 + fs/btrfs/zoned.c | 35 ++++++++++++++++++++--------------- fs/btrfs/zoned.h | 6 ++---- 4 files changed, 31 insertions(+), 27 deletions(-)