diff mbox series

[05/12] btrfs-progs: zoned: activate block group on loading

Message ID 0a272ad61dca26d4ee7f1e7ba474685b21825a05.1739756953.git.naohiro.aota@wdc.com (mailing list archive)
State New
Headers show
Series btrfs-progs: zoned: support zone capacity and | expand

Commit Message

Naohiro Aota Feb. 17, 2025, 2:37 a.m. UTC
Introduce "zone_is_active" member to struct btrfs_block_group and activate it
on loading a block group.

Note that activeness check for the extent allocation is currently not
implemented. The activeness checking requires to activate a non-active block
group on the extent allocation, which also require finishing a zone in the case
of hitting the active zone limit. Since mkfs should not hit the limit,
implementing the zone finishing code would not be necessary at the moment.

Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
---
 kernel-shared/ctree.h |  1 +
 kernel-shared/zoned.c | 15 +++++++++++++++
 2 files changed, 16 insertions(+)
diff mbox series

Patch

diff --git a/kernel-shared/ctree.h b/kernel-shared/ctree.h
index f10142df80eb..da0635d567dc 100644
--- a/kernel-shared/ctree.h
+++ b/kernel-shared/ctree.h
@@ -286,6 +286,7 @@  struct btrfs_block_group {
 	u64 alloc_offset;
 	u64 write_offset;
 	u64 zone_capacity;
+	bool zone_is_active;
 
 	u64 global_root_id;
 };
diff --git a/kernel-shared/zoned.c b/kernel-shared/zoned.c
index a97466635ecb..ee6c4ee61e4a 100644
--- a/kernel-shared/zoned.c
+++ b/kernel-shared/zoned.c
@@ -901,6 +901,7 @@  int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
 	u64 logical = cache->start;
 	u64 length = cache->length;
 	struct zone_info *zone_info = NULL;
+	unsigned long *active = NULL;
 	int ret = 0;
 	int i;
 	u64 last_alloc = 0;
@@ -935,6 +936,13 @@  int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
 		return -ENOMEM;
 	}
 
+	active = bitmap_zalloc(map->num_stripes);
+	if (!active) {
+		free(zone_info);
+		error_msg(ERROR_MSG_MEMORY, "active bitmap");
+		return -ENOMEM;
+	}
+
 	for (i = 0; i < map->num_stripes; i++) {
 		struct zone_info *info = &zone_info[i];
 		bool is_sequential;
@@ -948,6 +956,10 @@  int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
 			continue;
 		}
 
+		/* Consider a zone as active if we can allow any number of active zones. */
+		if (!device->zone_info->max_active_zones)
+			set_bit(i, active);
+
 		is_sequential = btrfs_dev_is_sequential(device, info->physical);
 		if (!is_sequential) {
 			num_conventional++;
@@ -983,6 +995,7 @@  int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
 		default:
 			/* Partially used zone */
 			info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
+			set_bit(i, active);
 			break;
 		}
 	}
@@ -1008,8 +1021,10 @@  int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
 		ret = -EINVAL;
 		goto out;
 	}
+	/* SINGLE profile case. */
 	cache->alloc_offset = zone_info[0].alloc_offset;
 	cache->zone_capacity = zone_info[0].capacity;
+	cache->zone_is_active = test_bit(0, active);
 
 out:
 	/* An extent is allocated after the write pointer */