diff mbox series

[v12,05/41] btrfs: release path before calling into btrfs_load_block_group_zone_info

Message ID 0786a9782ec6306cddb0a2808116c3f95a88849b.1610693037.git.naohiro.aota@wdc.com (mailing list archive)
State New, archived
Headers show
Series btrfs: zoned block device support | expand

Commit Message

Naohiro Aota Jan. 15, 2021, 6:53 a.m. UTC
From: Johannes Thumshirn <johannes.thumshirn@wdc.com>

Since we have no write pointer in conventional zones, we cannot determine
the allocation offset from it. Instead, we set the allocation offset after
the highest addressed extent. This is done by reading the extent tree in
btrfs_load_block_group_zone_info().

However, this function is called from btrfs_read_block_groups(), so the
read lock for the tree node can recursively taken.

To avoid this unsafe locking scenario, release the path before reading the
extent tree to get the allocation offset.

Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
---
 fs/btrfs/block-group.c | 39 ++++++++++++++++++---------------------
 1 file changed, 18 insertions(+), 21 deletions(-)

Comments

Josef Bacik Jan. 15, 2021, 10:22 p.m. UTC | #1
On 1/15/21 1:53 AM, Naohiro Aota wrote:
> From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> 
> Since we have no write pointer in conventional zones, we cannot determine
> the allocation offset from it. Instead, we set the allocation offset after
> the highest addressed extent. This is done by reading the extent tree in
> btrfs_load_block_group_zone_info().
> 
> However, this function is called from btrfs_read_block_groups(), so the
> read lock for the tree node can recursively taken.
> 
> To avoid this unsafe locking scenario, release the path before reading the
> extent tree to get the allocation offset.
> 
> Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
> ---
>   fs/btrfs/block-group.c | 39 ++++++++++++++++++---------------------
>   1 file changed, 18 insertions(+), 21 deletions(-)
> 
> diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
> index b8bbdd95743e..ff13f7554ee5 100644
> --- a/fs/btrfs/block-group.c
> +++ b/fs/btrfs/block-group.c
> @@ -1806,24 +1806,8 @@ static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
>   	return ret;
>   }
>   
> -static void read_block_group_item(struct btrfs_block_group *cache,
> -				 struct btrfs_path *path,
> -				 const struct btrfs_key *key)
> -{
> -	struct extent_buffer *leaf = path->nodes[0];
> -	struct btrfs_block_group_item bgi;
> -	int slot = path->slots[0];
> -
> -	cache->length = key->offset;
> -
> -	read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
> -			   sizeof(bgi));
> -	cache->used = btrfs_stack_block_group_used(&bgi);
> -	cache->flags = btrfs_stack_block_group_flags(&bgi);
> -}
> -
>   static int read_one_block_group(struct btrfs_fs_info *info,
> -				struct btrfs_path *path,
> +				struct btrfs_block_group_item *bgi,
>   				const struct btrfs_key *key,
>   				int need_clear)
>   {
> @@ -1838,7 +1822,9 @@ static int read_one_block_group(struct btrfs_fs_info *info,
>   	if (!cache)
>   		return -ENOMEM;
>   
> -	read_block_group_item(cache, path, key);
> +	cache->length = key->offset;
> +	cache->used = btrfs_stack_block_group_used(bgi);
> +	cache->flags = btrfs_stack_block_group_flags(bgi);
>   
>   	set_free_space_tree_thresholds(cache);
>   
> @@ -1997,19 +1983,30 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
>   		need_clear = 1;
>   
>   	while (1) {
> +		struct btrfs_block_group_item bgi;
> +		struct extent_buffer *leaf;
> +		int slot;
> +
>   		ret = find_first_block_group(info, path, &key);
>   		if (ret > 0)
>   			break;
>   		if (ret != 0)
>   			goto error;
>   
> -		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
> -		ret = read_one_block_group(info, path, &key, need_clear);
> +		leaf = path->nodes[0];
> +		slot = path->slots[0];
> +		btrfs_release_path(path);

You're releasing the path and then reading from it, a potential UAF.  Thanks,

Josef
Johannes Thumshirn Jan. 18, 2021, 8:55 a.m. UTC | #2
On 15/01/2021 23:22, Josef Bacik wrote:
> You're releasing the path and then reading from it, a potential UAF.  Thanks,

*\@#%! I'm stupid, fixed for v13.
diff mbox series

Patch

diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index b8bbdd95743e..ff13f7554ee5 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1806,24 +1806,8 @@  static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
 	return ret;
 }
 
-static void read_block_group_item(struct btrfs_block_group *cache,
-				 struct btrfs_path *path,
-				 const struct btrfs_key *key)
-{
-	struct extent_buffer *leaf = path->nodes[0];
-	struct btrfs_block_group_item bgi;
-	int slot = path->slots[0];
-
-	cache->length = key->offset;
-
-	read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
-			   sizeof(bgi));
-	cache->used = btrfs_stack_block_group_used(&bgi);
-	cache->flags = btrfs_stack_block_group_flags(&bgi);
-}
-
 static int read_one_block_group(struct btrfs_fs_info *info,
-				struct btrfs_path *path,
+				struct btrfs_block_group_item *bgi,
 				const struct btrfs_key *key,
 				int need_clear)
 {
@@ -1838,7 +1822,9 @@  static int read_one_block_group(struct btrfs_fs_info *info,
 	if (!cache)
 		return -ENOMEM;
 
-	read_block_group_item(cache, path, key);
+	cache->length = key->offset;
+	cache->used = btrfs_stack_block_group_used(bgi);
+	cache->flags = btrfs_stack_block_group_flags(bgi);
 
 	set_free_space_tree_thresholds(cache);
 
@@ -1997,19 +1983,30 @@  int btrfs_read_block_groups(struct btrfs_fs_info *info)
 		need_clear = 1;
 
 	while (1) {
+		struct btrfs_block_group_item bgi;
+		struct extent_buffer *leaf;
+		int slot;
+
 		ret = find_first_block_group(info, path, &key);
 		if (ret > 0)
 			break;
 		if (ret != 0)
 			goto error;
 
-		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
-		ret = read_one_block_group(info, path, &key, need_clear);
+		leaf = path->nodes[0];
+		slot = path->slots[0];
+		btrfs_release_path(path);
+
+		read_extent_buffer(leaf, &bgi,
+				   btrfs_item_ptr_offset(leaf, slot),
+				   sizeof(bgi));
+
+		btrfs_item_key_to_cpu(leaf, &key, slot);
+		ret = read_one_block_group(info, &bgi, &key, need_clear);
 		if (ret < 0)
 			goto error;
 		key.objectid += key.offset;
 		key.offset = 0;
-		btrfs_release_path(path);
 	}
 	btrfs_release_path(path);