@@ -2540,6 +2540,7 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
u64 to_free, to_unusable;
const int bg_reclaim_threshold = READ_ONCE(fs_info->bg_reclaim_threshold);
bool initial = (size == block_group->length);
+ u64 reclaimable_unusable;
WARN_ON(!initial && offset + size > block_group->zone_capacity);
@@ -2570,12 +2571,15 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
spin_unlock(&block_group->lock);
}
+ reclaimable_unusable = block_group->zone_unusable -
+ (block_group->length - block_group->zone_capacity);
/* All the region is now unusable. Mark it as unused and reclaim */
if (block_group->zone_unusable == block_group->length) {
btrfs_mark_bg_unused(block_group);
} else if (bg_reclaim_threshold &&
- block_group->zone_unusable >=
- div_factor_fine(block_group->length, bg_reclaim_threshold)) {
+ reclaimable_unusable >=
+ div_factor_fine(block_group->zone_capacity,
+ bg_reclaim_threshold)) {
btrfs_mark_bg_to_reclaim(block_group);
}
With the introduction of zone capacity, bytes [capacity, length] are always zone unusable. Counting this region as a reclaim target will cause reclaiming too early. Reclaim block groups based on bytes that can be usable after resetting. Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> --- fs/btrfs/free-space-cache.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)