@@ -1470,6 +1470,10 @@ struct btrfs_fs_info {
int backup_root_index;
int num_tolerated_disk_barrier_failures;
+
+ /* protect against recursive do_chunk_alloc() */
+ const void *chunk_alloc_task;
+ const struct btrfs_space_info *prev_sinfo;
};
/*
@@ -2087,6 +2087,9 @@ int open_ctree(struct super_block *sb,
fs_info->btree_inode->i_ino = BTRFS_BTREE_INODE_OBJECTID;
set_nlink(fs_info->btree_inode, 1);
+
+ fs_info->chunk_alloc_task = NULL;
+ fs_info->prev_sinfo = NULL;
/*
* we set the i_size on the btree inode to the max possible int.
* the real end of the address space is determined by all of
@@ -3582,6 +3582,16 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
}
BUG_ON(!space_info); /* Logic error */
+ if (mutex_is_locked(&fs_info->chunk_mutex) &&
+ unlikely(fs_info->chunk_alloc_task == current)) {
+ WARN_ONCE(1, "do_chunk_alloc() about to recursively acquire "
+ "fs_info->chunk_mutex: impending deadlock avoided!\n"
+ "outer call space_info = %p flags %#llx\n"
+ "nested call space_info = %p flags %#llx\n",
+ fs_info->prev_sinfo, fs_info->prev_sinfo->flags,
+ space_info, space_info->flags);
+ return -EDEADLK;
+ }
again:
spin_lock(&space_info->lock);
if (force < space_info->force_alloc)
@@ -3603,6 +3613,8 @@ again:
spin_unlock(&space_info->lock);
mutex_lock(&fs_info->chunk_mutex);
+ fs_info->chunk_alloc_task = current;
+ fs_info->prev_sinfo = space_info;
/*
* The chunk_mutex is held throughout the entirety of a chunk
@@ -3655,6 +3667,8 @@ again:
space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
out:
+ fs_info->chunk_alloc_task = NULL;
+ fs_info->prev_sinfo = NULL;
mutex_unlock(&fs_info->chunk_mutex);
return ret;
}