@@ -1369,8 +1369,12 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
spin_unlock(&block_group->lock);
spin_unlock(&space_info->lock);
- /* DISCARD can flip during remount */
- trimming = btrfs_test_opt(fs_info, DISCARD);
+ /*
+ * DISCARD can flip during remount. In HMZONED mode,
+ * we need to reset sequential required zones.
+ */
+ trimming = btrfs_test_opt(fs_info, DISCARD) ||
+ btrfs_fs_incompat(fs_info, HMZONED);
/* Implicit trim during transaction commit. */
if (trimming)
@@ -1338,6 +1338,9 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
stripe = bbio->stripes;
for (i = 0; i < bbio->num_stripes; i++, stripe++) {
+ struct btrfs_device *dev = stripe->dev;
+ u64 physical = stripe->physical;
+ u64 length = stripe->length;
u64 bytes;
struct request_queue *req_q;
@@ -1345,14 +1348,18 @@ int btrfs_discard_extent(struct btrfs_fs_info *fs_info, u64 bytenr,
ASSERT(btrfs_test_opt(fs_info, DEGRADED));
continue;
}
+
req_q = bdev_get_queue(stripe->dev->bdev);
- if (!blk_queue_discard(req_q))
+ /* zone reset in HMZONED mode */
+ if (btrfs_can_zone_reset(dev, physical, length))
+ ret = btrfs_reset_device_zone(dev, physical,
+ length, &bytes);
+ else if (blk_queue_discard(req_q))
+ ret = btrfs_issue_discard(dev->bdev, physical,
+ length, &bytes);
+ else
continue;
- ret = btrfs_issue_discard(stripe->dev->bdev,
- stripe->physical,
- stripe->length,
- &bytes);
if (!ret) {
discarded_bytes += bytes;
} else if (ret != -EOPNOTSUPP) {
@@ -1120,3 +1120,21 @@ int btrfs_hmzoned_check_metadata_space(struct btrfs_fs_info *fs_info)
return btrfs_commit_transaction(trans);
}
+
+int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
+ u64 length, u64 *bytes)
+{
+ int ret;
+
+ ret = blkdev_reset_zones(device->bdev, physical >> SECTOR_SHIFT,
+ length >> SECTOR_SHIFT, GFP_NOFS);
+ if (!ret) {
+ *bytes = length;
+ while (length) {
+ btrfs_dev_set_zone_empty(device, physical);
+ length -= device->zone_info->zone_size;
+ }
+ }
+
+ return ret;
+}
@@ -43,6 +43,8 @@ bool btrfs_check_allocatable_zones(struct btrfs_device *device, u64 pos,
void btrfs_calc_zone_unusable(struct btrfs_block_group *cache);
int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache);
int btrfs_hmzoned_check_metadata_space(struct btrfs_fs_info *fs_info);
+int btrfs_reset_device_zone(struct btrfs_device *device, u64 physical,
+ u64 length, u64 *bytes);
#else /* CONFIG_BLK_DEV_ZONED */
static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
struct blk_zone *zone)
@@ -103,6 +105,11 @@ static inline int btrfs_hmzoned_check_metadata_space(
{
return 0;
}
+static inline int btrfs_reset_device_zone(struct btrfs_device *device,
+ u64 physical, u64 length, u64 *bytes)
+{
+ return 0;
+}
#endif
static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)
@@ -189,4 +196,20 @@ static inline u64 btrfs_zone_align(struct btrfs_device *device, u64 pos)
return ALIGN(pos, device->zone_info->zone_size);
}
+static inline bool btrfs_can_zone_reset(struct btrfs_device *device,
+ u64 physical, u64 length)
+{
+ u64 zone_size;
+
+ if (!btrfs_dev_is_sequential(device, physical))
+ return false;
+
+ zone_size = device->zone_info->zone_size;
+ if (!IS_ALIGNED(physical, zone_size) ||
+ !IS_ALIGNED(length, zone_size))
+ return false;
+
+ return true;
+}
+
#endif
For an HMZONED volume, a block group maps to a zone of the device. For deleted unused block groups, the zone of the block group can be reset to rewind the zone write pointer at the start of the zone. Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> --- fs/btrfs/block-group.c | 8 ++++++-- fs/btrfs/extent-tree.c | 17 ++++++++++++----- fs/btrfs/hmzoned.c | 18 ++++++++++++++++++ fs/btrfs/hmzoned.h | 23 +++++++++++++++++++++++ 4 files changed, 59 insertions(+), 7 deletions(-)