@@ -144,6 +144,7 @@ void btrfs_put_block_group(struct btrfs_block_group *cache)
*/
WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
kfree(cache->free_space_ctl);
+ kfree(cache->physical_map);
kfree(cache);
}
}
@@ -204,6 +204,7 @@ struct btrfs_block_group {
u64 zone_unusable;
u64 zone_capacity;
u64 meta_write_pointer;
+ struct map_lookup *physical_map;
};
static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group)
@@ -1158,10 +1158,19 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
map = em->map_lookup;
+ cache->physical_map = kmalloc(map_lookup_size(map->num_stripes),
+ GFP_NOFS);
+ if (!cache->physical_map) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(cache->physical_map, map, map_lookup_size(map->num_stripes));
+
alloc_offsets = kcalloc(map->num_stripes, sizeof(*alloc_offsets), GFP_NOFS);
if (!alloc_offsets) {
- free_extent_map(em);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
caps = kcalloc(map->num_stripes, sizeof(*caps), GFP_NOFS);
@@ -1344,6 +1353,10 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
if (!ret)
cache->meta_write_pointer = cache->alloc_offset + cache->start;
+ if (ret) {
+ kfree(cache->physical_map);
+ cache->physical_map = NULL;
+ }
kfree(caps);
kfree(alloc_offsets);
free_extent_map(em);
We will use a block group's physical location to track active zones and finish fully written zones in the following commits. Since the zone activation is done in the extent allocation context which already holding the tree locks, we can't query the chunk tree for the physical locations. So, copy the location info into a block group and use it for activation. Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com> --- fs/btrfs/block-group.c | 1 + fs/btrfs/block-group.h | 1 + fs/btrfs/zoned.c | 17 +++++++++++++++-- 3 files changed, 17 insertions(+), 2 deletions(-)