@@ -196,12 +196,13 @@ BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
BlockCompletionFunc *cb, void *opaque,
JobTxn *txn, Error **errp);
-BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
- const char *child_name,
- const BdrvChildClass *child_class,
- BdrvChildRole child_role,
- uint64_t perm, uint64_t shared_perm,
- void *opaque, Error **errp);
+BdrvChild * GRAPH_WRLOCK
+bdrv_root_attach_child(BlockDriverState *child_bs, const char *child_name,
+ const BdrvChildClass *child_class,
+ BdrvChildRole child_role,
+ uint64_t perm, uint64_t shared_perm,
+ void *opaque, Error **errp);
+
void GRAPH_WRLOCK bdrv_root_unref_child(BdrvChild *child);
void GRAPH_RDLOCK bdrv_get_cumulative_perm(BlockDriverState *bs, uint64_t *perm,
@@ -3214,8 +3214,6 @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
GLOBAL_STATE_CODE();
- bdrv_graph_wrlock(child_bs);
-
child = bdrv_attach_child_common(child_bs, child_name, child_class,
child_role, perm, shared_perm, opaque,
tran, errp);
@@ -3228,9 +3226,8 @@ BdrvChild *bdrv_root_attach_child(BlockDriverState *child_bs,
out:
tran_finalize(tran, ret);
- bdrv_graph_wrunlock();
- bdrv_unref(child_bs);
+ bdrv_schedule_unref(child_bs);
return ret < 0 ? NULL : child;
}
@@ -931,10 +931,12 @@ int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
GLOBAL_STATE_CODE();
bdrv_ref(bs);
+ bdrv_graph_wrlock(bs);
blk->root = bdrv_root_attach_child(bs, "root", &child_root,
BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
blk->perm, blk->shared_perm,
blk, errp);
+ bdrv_graph_wrunlock();
if (blk->root == NULL) {
return -EPERM;
}
@@ -248,8 +248,10 @@ int block_job_add_bdrv(BlockJob *job, const char *name, BlockDriverState *bs,
}
aio_context_acquire(ctx);
}
+ bdrv_graph_wrlock(bs);
c = bdrv_root_attach_child(bs, name, &child_job, 0, perm, shared_perm, job,
errp);
+ bdrv_graph_wrunlock();
if (need_context_ops) {
aio_context_release(ctx);
if (job->job.aio_context != qemu_get_aio_context()) {
Instead of taking the writer lock internally, require callers to already hold it when calling bdrv_root_attach_child(). These callers will typically already hold the graph lock once the locking work is completed, which means that they can't call functions that take it internally. Signed-off-by: Kevin Wolf <kwolf@redhat.com> --- include/block/block_int-global-state.h | 13 +++++++------ block.c | 5 +---- block/block-backend.c | 2 ++ blockjob.c | 2 ++ 4 files changed, 12 insertions(+), 10 deletions(-)