@@ -892,8 +892,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
struct btrfs_block_group *block_group;
struct btrfs_free_cluster *cluster;
- struct btrfs_root *tree_root = fs_info->tree_root;
- struct btrfs_key key;
struct inode *inode;
struct kobject *kobj = NULL;
int ret;
@@ -971,42 +969,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&trans->transaction->dirty_bgs_lock);
mutex_unlock(&trans->transaction->cache_write_mutex);
- if (!IS_ERR(inode)) {
- ret = btrfs_orphan_add(trans, BTRFS_I(inode));
- if (ret) {
- btrfs_add_delayed_iput(inode);
- goto out;
- }
- clear_nlink(inode);
- /* One for the block groups ref */
- spin_lock(&block_group->lock);
- if (block_group->iref) {
- block_group->iref = 0;
- block_group->inode = NULL;
- spin_unlock(&block_group->lock);
- iput(inode);
- } else {
- spin_unlock(&block_group->lock);
- }
- /* One for our lookup ref */
- btrfs_add_delayed_iput(inode);
- }
-
- key.objectid = BTRFS_FREE_SPACE_OBJECTID;
- key.type = 0;
- key.offset = block_group->start;
-
- ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
- if (ret < 0)
+ ret = remove_free_space_inode(trans, block_group);
+ if (ret)
goto out;
- if (ret > 0)
- btrfs_release_path(path);
- if (ret == 0) {
- ret = btrfs_del_item(trans, tree_root, path);
- if (ret)
- goto out;
- btrfs_release_path(path);
- }
spin_lock(&fs_info->block_group_cache_lock);
rb_erase(&block_group->cache_node,
@@ -2343,6 +2308,9 @@ static int cache_save_setup(struct btrfs_block_group *block_group,
int retries = 0;
int ret = 0;
+ if (!btrfs_test_opt(fs_info, SPACE_CACHE))
+ return 0;
+
/*
* If this block group is smaller than 100 megs don't bother caching the
* block group.
@@ -207,6 +207,54 @@ int create_free_space_inode(struct btrfs_trans_handle *trans,
ino, block_group->start);
}
+int remove_free_space_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group)
+{
+ struct btrfs_path *path;
+ struct inode *inode;
+ struct btrfs_key key;
+ int ret = 0;
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ inode = lookup_free_space_inode(block_group, path);
+ if (IS_ERR(inode)) {
+ if (PTR_ERR(inode) != -ENOENT)
+ ret = PTR_ERR(inode);
+ goto out;
+ }
+ ret = btrfs_orphan_add(trans, BTRFS_I(inode));
+ if (ret) {
+ btrfs_add_delayed_iput(inode);
+ goto out;
+ }
+ clear_nlink(inode);
+ /* One for the block groups ref */
+ spin_lock(&block_group->lock);
+ if (block_group->iref) {
+ block_group->iref = 0;
+ block_group->inode = NULL;
+ spin_unlock(&block_group->lock);
+ iput(inode);
+ } else {
+ spin_unlock(&block_group->lock);
+ }
+ /* One for our lookup ref */
+ btrfs_add_delayed_iput(inode);
+
+ key.objectid = BTRFS_FREE_SPACE_OBJECTID;
+ key.type = 0;
+ key.offset = block_group->start;
+ ret = btrfs_search_slot(trans, trans->fs_info->tree_root, &key, path, -1, 1);
+ if (ret != 0)
+ goto out;
+ ret = btrfs_del_item(trans, trans->fs_info->tree_root, path);
+out:
+ btrfs_free_path(path);
+ return ret;
+}
+
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv)
{
@@ -2806,7 +2854,6 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group)
__btrfs_remove_free_space_cache_locked(ctl);
btrfs_discard_update_discardable(block_group, ctl);
spin_unlock(&ctl->tree_lock);
-
}
/**
@@ -84,6 +84,8 @@ struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
int create_free_space_inode(struct btrfs_trans_handle *trans,
struct btrfs_block_group *block_group,
struct btrfs_path *path);
+int remove_free_space_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_block_group *block_group);
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv);
@@ -1165,6 +1165,9 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
block_group = rb_entry(node, struct btrfs_block_group,
cache_node);
ret = populate_free_space_tree(trans, block_group);
+ if (ret)
+ goto abort;
+ ret = remove_free_space_inode(trans, block_group);
if (ret)
goto abort;
node = rb_next(node);
When the file system transitions from space cache v1 to v2 it removes the old cached data, but does not remove the FREE_SPACE items nor the free space inodes they point to. This doesn't cause any issues besides being a bit inefficient, since these items no longer do anything useful. To fix it, as part of populating the free space tree, destroy each block group's free space item and free space inode. This code is lifted from the existing code for removing them when removing the block group. Furthermore, cache_save_setup is called unconditionally from transaction commit on dirty block groups, so we must also stop creating these items when we are not using SPACE_CACHE. References: https://github.com/btrfs/btrfs-todo/issues/5 Signed-off-by: Boris Burkov <boris@bur.io> --- fs/btrfs/block-group.c | 42 ++++--------------------------- fs/btrfs/free-space-cache.c | 49 ++++++++++++++++++++++++++++++++++++- fs/btrfs/free-space-cache.h | 2 ++ fs/btrfs/free-space-tree.c | 3 +++ 4 files changed, 58 insertions(+), 38 deletions(-)