@@ -254,16 +254,23 @@ int btrfs_prepare_device(int fd, const char *file, u64 *byte_count_ret,
if (!zinfo->emulated) {
if (opflags & PREP_DEVICE_VERBOSE)
- printf("Resetting device zones %s (%u zones) ...\n",
- file, zinfo->nr_zones);
+ printf("Resetting device zones %s (%llu zones) ...\n",
+ file, byte_count / zinfo->zone_size);
/*
* We cannot ignore zone reset errors for a zoned block
* device as this could result in the inability to write
* to non-empty sequential zones of the device.
*/
- if (btrfs_reset_all_zones(fd, zinfo)) {
- error("zoned: failed to reset device '%s' zones: %m",
- file);
+ ret = btrfs_reset_zones(fd, zinfo, byte_count);
+ if (ret) {
+ if (ret == EBUSY) {
+ error("zoned: device '%s' contains an active zone outside of the FS range",
+ file);
+ error("zoned: btrfs needs full control of active zones");
+ } else {
+ error("zoned: failed to reset device '%s' zones: %m",
+ file);
+ }
goto err;
}
}
@@ -395,16 +395,24 @@ static int report_zones(int fd, const char *file,
* Discard blocks in the zones of a zoned block device. Process this with zone
* size granularity so that blocks in conventional zones are discarded using
* discard_range and blocks in sequential zones are reset though a zone reset.
+ *
+ * We need to ensure that zones outside of the FS is not active, so that
+ * the FS can use all the active zones. Return EBUSY if there is an active
+ * zone.
*/
-int btrfs_reset_all_zones(int fd, struct btrfs_zoned_device_info *zinfo)
+int btrfs_reset_zones(int fd, struct btrfs_zoned_device_info *zinfo, u64 byte_count)
{
unsigned int i;
int ret = 0;
ASSERT(zinfo);
+ ASSERT(IS_ALIGNED(byte_count, zinfo->zone_size));
/* Zone size granularity */
for (i = 0; i < zinfo->nr_zones; i++) {
+ if (byte_count == 0)
+ break;
+
if (zinfo->zones[i].type == BLK_ZONE_TYPE_CONVENTIONAL) {
ret = device_discard_blocks(fd,
zinfo->zones[i].start << SECTOR_SHIFT,
@@ -419,7 +427,20 @@ int btrfs_reset_all_zones(int fd, struct btrfs_zoned_device_info *zinfo)
if (ret)
return ret;
+
+ byte_count -= zinfo->zone_size;
}
+ for (; i < zinfo->nr_zones; i++) {
+ const enum blk_zone_cond cond = zinfo->zones[i].cond;
+
+ if (zinfo->zones[i].type == BLK_ZONE_TYPE_CONVENTIONAL)
+ continue;
+ if (cond == BLK_ZONE_COND_IMP_OPEN ||
+ cond == BLK_ZONE_COND_EXP_OPEN ||
+ cond == BLK_ZONE_COND_CLOSED)
+ return EBUSY;
+ }
+
return fsync(fd);
}
@@ -149,7 +149,7 @@ bool btrfs_redirty_extent_buffer_for_zoned(struct btrfs_fs_info *fs_info,
u64 start, u64 end);
int btrfs_reset_chunk_zones(struct btrfs_fs_info *fs_info, u64 devid,
u64 offset, u64 length);
-int btrfs_reset_all_zones(int fd, struct btrfs_zoned_device_info *zinfo);
+int btrfs_reset_zones(int fd, struct btrfs_zoned_device_info *zinfo, u64 byte_count);
int zero_zone_blocks(int fd, struct btrfs_zoned_device_info *zinfo, off_t start,
size_t len);
int btrfs_wipe_temporary_sb(struct btrfs_fs_devices *fs_devices);
@@ -203,8 +203,9 @@ static inline int btrfs_reset_chunk_zones(struct btrfs_fs_info *fs_info,
return 0;
}
-static inline int btrfs_reset_all_zones(int fd,
- struct btrfs_zoned_device_info *zinfo)
+static inline int btrfs_reset_zones(int fd,
+ struct btrfs_zoned_device_info *zinfo,
+ u64 byte_count)
{
return -EOPNOTSUPP;
}