@@ -321,6 +321,9 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
device->fs_devices = fs_info->fs_devices;
+ if (btrfs_is_zoned(fs_info) && bdev_zoned_model(bdev) == BLK_ZONED_NONE)
+ device->force_zoned = true;
+
ret = btrfs_get_dev_zone_info(device);
if (ret)
goto error;
@@ -669,6 +669,15 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
device->mode = flags;
+ /* Emulate zoned mode on regular device? */
+ if ((btrfs_super_incompat_flags(disk_super) &
+ BTRFS_FEATURE_INCOMPAT_ZONED) &&
+ bdev_zoned_model(device->bdev) == BLK_ZONED_NONE) {
+ btrfs_info(NULL,
+"zoned: incompat zoned flag detected on regular device, forcing zoned mode emulation");
+ device->force_zoned = true;
+ }
+
fs_devices->open_devices++;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
device->devid != BTRFS_DEV_REPLACE_DEVID) {
@@ -2562,6 +2571,11 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
device->fs_info = fs_info;
device->bdev = bdev;
+ /* Zoned mode is enabled. Emulate zoned device on a regular device. */
+ if (btrfs_is_zoned(fs_info) &&
+ bdev_zoned_model(device->bdev) == BLK_ZONED_NONE)
+ device->force_zoned = true;
+
ret = btrfs_get_dev_zone_info(device);
if (ret)
goto error_free_device;
@@ -144,6 +144,9 @@ struct btrfs_device {
struct completion kobj_unregister;
/* For sysfs/FSID/devinfo/devid/ */
struct kobject devid_kobj;
+
+ /* Force zoned mode */
+ bool force_zoned;
};
/*
@@ -119,6 +119,32 @@ static inline u32 sb_zone_number(int shift, int mirror)
return 0;
}
+static int emulate_report_zones(struct btrfs_device *device, u64 pos,
+ struct blk_zone *zones, unsigned int nr_zones)
+{
+ const sector_t zone_sectors =
+ device->fs_info->zone_size >> SECTOR_SHIFT;
+ sector_t bdev_size = device->bdev->bd_part->nr_sects;
+ unsigned int i;
+
+ pos >>= SECTOR_SHIFT;
+ for (i = 0; i < nr_zones; i++) {
+ zones[i].start = i * zone_sectors + pos;
+ zones[i].len = zone_sectors;
+ zones[i].capacity = zone_sectors;
+ zones[i].wp = zones[i].start + zone_sectors;
+ zones[i].type = BLK_ZONE_TYPE_CONVENTIONAL;
+ zones[i].cond = BLK_ZONE_COND_NOT_WP;
+
+ if (zones[i].wp >= bdev_size) {
+ i++;
+ break;
+ }
+ }
+
+ return i;
+}
+
static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
struct blk_zone *zones, unsigned int *nr_zones)
{
@@ -127,6 +153,12 @@ static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
if (!*nr_zones)
return 0;
+ if (device->force_zoned) {
+ ret = emulate_report_zones(device, pos, zones, *nr_zones);
+ *nr_zones = ret;
+ return 0;
+ }
+
ret = blkdev_report_zones(device->bdev, pos >> SECTOR_SHIFT, *nr_zones,
copy_zone_info_cb, zones);
if (ret < 0) {
@@ -143,6 +175,49 @@ static int btrfs_get_dev_zones(struct btrfs_device *device, u64 pos,
return 0;
}
+static int calculate_emulated_zone_size(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_path *path;
+ struct btrfs_root *root = fs_info->dev_root;
+ struct btrfs_key key;
+ struct extent_buffer *leaf;
+ struct btrfs_dev_extent *dext;
+ int ret = 0;
+
+ key.objectid = 1;
+ key.type = BTRFS_DEV_EXTENT_KEY;
+ key.offset = 0;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
+ ret = btrfs_next_item(root, path);
+ if (ret < 0)
+ goto out;
+ /* No dev extents at all? Not good */
+ if (ret > 0) {
+ ret = -EUCLEAN;
+ goto out;
+ }
+ }
+
+ leaf = path->nodes[0];
+ dext = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent);
+ fs_info->zone_size = btrfs_dev_extent_length(leaf, dext);
+ ret = 0;
+
+out:
+ btrfs_free_path(path);
+
+ return ret;
+}
+
int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
@@ -158,6 +233,12 @@ int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
if (!device->bdev)
continue;
+ if (device->force_zoned && !fs_info->zone_size) {
+ ret = calculate_emulated_zone_size(fs_info);
+ if (ret)
+ break;
+ }
+
ret = btrfs_get_dev_zone_info(device);
if (ret)
break;
@@ -177,9 +258,11 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
struct blk_zone *zones = NULL;
unsigned int i, nreported = 0, nr_zones;
unsigned int zone_sectors;
+ const bool force_zoned = device->force_zoned;
+ char *model, *emulated;
int ret;
- if (!bdev_is_zoned(bdev))
+ if (!bdev_is_zoned(bdev) && !force_zoned)
return 0;
if (device->zone_info)
@@ -189,8 +272,12 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
if (!zone_info)
return -ENOMEM;
+ if (force_zoned)
+ zone_sectors = device->fs_info->zone_size >> SECTOR_SHIFT;
+ else
+ zone_sectors = bdev_zone_sectors(bdev);
+
nr_sectors = bdev->bd_part->nr_sects;
- zone_sectors = bdev_zone_sectors(bdev);
/* Check if it's power of 2 (see is_power_of_2) */
ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
zone_info->zone_size = zone_sectors << SECTOR_SHIFT;
@@ -296,12 +383,22 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
device->zone_info = zone_info;
- /* device->fs_info is not safe to use for printing messages */
- btrfs_info_in_rcu(NULL,
- "host-%s zoned block device %s, %u zones of %llu bytes",
- bdev_zoned_model(bdev) == BLK_ZONED_HM ? "managed" : "aware",
- rcu_str_deref(device->name), zone_info->nr_zones,
- zone_info->zone_size);
+ if (bdev_zoned_model(bdev) == BLK_ZONED_HM) {
+ model = "host-managed zoned";
+ emulated = "";
+ } else if (bdev_zoned_model(bdev) == BLK_ZONED_HA) {
+ model = "host-aware zoned";
+ emulated = "";
+ } else if (bdev_zoned_model(bdev) == BLK_ZONED_NONE &&
+ device->force_zoned) {
+ model = "regular";
+ emulated = "emulated ";
+ }
+
+ btrfs_info_in_rcu(device->fs_info,
+ "%s block device %s, %u %szones of %llu bytes",
+ model, rcu_str_deref(device->name), zone_info->nr_zones,
+ emulated, zone_info->zone_size);
return 0;
@@ -348,7 +445,7 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
u64 nr_devices = 0;
u64 zone_size = 0;
u64 max_zone_append_size = 0;
- const bool incompat_zoned = btrfs_is_zoned(fs_info);
+ const bool incompat_zoned = btrfs_fs_incompat(fs_info, ZONED);
int ret = 0;
/* Count zoned devices */
@@ -360,8 +457,10 @@ int btrfs_check_zoned_mode(struct btrfs_fs_info *fs_info)
model = bdev_zoned_model(device->bdev);
if (model == BLK_ZONED_HM ||
- (model == BLK_ZONED_HA && incompat_zoned)) {
- struct btrfs_zoned_device_info *zone_info;
+ (model == BLK_ZONED_HA && incompat_zoned) ||
+ device->force_zoned) {
+ struct btrfs_zoned_device_info *zone_info =
+ device->zone_info;
zone_info = device->zone_info;
zoned_devices++;
@@ -143,12 +143,16 @@ static inline void btrfs_dev_clear_zone_empty(struct btrfs_device *device, u64 p
static inline bool btrfs_check_device_zone_type(const struct btrfs_fs_info *fs_info,
struct block_device *bdev)
{
- u64 zone_size;
-
if (btrfs_is_zoned(fs_info)) {
- zone_size = bdev_zone_sectors(bdev) << SECTOR_SHIFT;
- /* Do not allow non-zoned device */
- return bdev_is_zoned(bdev) && fs_info->zone_size == zone_size;
+ /*
+ * We can allow a regular device on a zoned btrfs, because
+ * we will emulate zoned device on the regular device.
+ */
+ if (!bdev_is_zoned(bdev))
+ return true;
+
+ return fs_info->zone_size ==
+ (bdev_zone_sectors(bdev) << SECTOR_SHIFT);
}
/* Do not allow Host Manged zoned device */