diff mbox series

[03/12] btrfs-progs: zoned: support zone capacity

Message ID 8c68704eca02321ab9577d4a1e96eb83751fbc35.1739756953.git.naohiro.aota@wdc.com (mailing list archive)
State New
Headers show
Series btrfs-progs: zoned: support zone capacity and | expand

Commit Message

Naohiro Aota Feb. 17, 2025, 2:37 a.m. UTC
The userland tools did not load and use the zone capacity. Support it properly.

Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
---
 kernel-shared/ctree.h       | 1 +
 kernel-shared/extent-tree.c | 2 +-
 kernel-shared/zoned.c       | 9 ++++++++-
 3 files changed, 10 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/kernel-shared/ctree.h b/kernel-shared/ctree.h
index 8c923be96705..a6aa10a690bb 100644
--- a/kernel-shared/ctree.h
+++ b/kernel-shared/ctree.h
@@ -285,6 +285,7 @@  struct btrfs_block_group {
 	 */
 	u64 alloc_offset;
 	u64 write_offset;
+	u64 zone_capacity;
 
 	u64 global_root_id;
 };
diff --git a/kernel-shared/extent-tree.c b/kernel-shared/extent-tree.c
index 20eef4f3df7b..2b7a962f294b 100644
--- a/kernel-shared/extent-tree.c
+++ b/kernel-shared/extent-tree.c
@@ -300,7 +300,7 @@  again:
 		goto new_group;
 
 	if (btrfs_is_zoned(root->fs_info)) {
-		if (cache->length - cache->alloc_offset < num)
+		if (cache->zone_capacity - cache->alloc_offset < num)
 			goto new_group;
 		*start_ret = cache->start + cache->alloc_offset;
 		cache->alloc_offset += num;
diff --git a/kernel-shared/zoned.c b/kernel-shared/zoned.c
index b06774482cfd..319ee88d5b06 100644
--- a/kernel-shared/zoned.c
+++ b/kernel-shared/zoned.c
@@ -776,7 +776,7 @@  static int calculate_alloc_pointer(struct btrfs_fs_info *fs_info,
 		length = fs_info->nodesize;
 
 	if (!(found_key.objectid >= cache->start &&
-	       found_key.objectid + length <= cache->start + cache->length)) {
+	       found_key.objectid + length <= cache->start + cache->zone_capacity)) {
 		ret = -EUCLEAN;
 		goto out;
 	}
@@ -830,6 +830,7 @@  bool zoned_profile_supported(u64 map_type, bool rst)
 
 struct zone_info {
 	u64 physical;
+	u64 capacity;
 	u64 alloc_offset;
 };
 
@@ -894,6 +895,7 @@  int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
 		if (!is_sequential) {
 			num_conventional++;
 			info->alloc_offset = WP_CONVENTIONAL;
+			info->capacity = device->zone_info->zone_size;
 			continue;
 		}
 
@@ -904,6 +906,8 @@  int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
 		WARN_ON(!IS_ALIGNED(info->physical, fs_info->zone_size));
 		zone = device->zone_info->zones[info->physical / fs_info->zone_size];
 
+		info->capacity = (zone.capacity << SECTOR_SHIFT);
+
 		switch (zone.cond) {
 		case BLK_ZONE_COND_OFFLINE:
 		case BLK_ZONE_COND_READONLY:
@@ -927,6 +931,8 @@  int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
 	}
 
 	if (num_conventional > 0) {
+		/* Zone capacity is always zone size in emulation */
+		cache->zone_capacity = cache->length;
 		ret = calculate_alloc_pointer(fs_info, cache, &last_alloc);
 		if (ret || map->num_stripes == num_conventional) {
 			if (!ret)
@@ -946,6 +952,7 @@  int btrfs_load_block_group_zone_info(struct btrfs_fs_info *fs_info,
 		goto out;
 	}
 	cache->alloc_offset = zone_info[0].alloc_offset;
+	cache->zone_capacity = zone_info[0].capacity;
 
 out:
 	/* An extent is allocated after the write pointer */