@@ -269,7 +269,10 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
return -ECANCELED;
}
+ /* rdlock protects the subsequent call to bdrv_is_allocated() */
+ bdrv_graph_co_rdlock();
ret = block_copy_reset_unallocated(s->bcs, offset, &count);
+ bdrv_graph_co_rdunlock();
if (ret < 0) {
return ret;
}
@@ -1433,6 +1433,7 @@ int coroutine_fn blk_block_status_above(BlockBackend *blk,
BlockDriverState **file)
{
IO_CODE();
+ GRAPH_RDLOCK_GUARD();
return bdrv_block_status_above(blk_bs(blk), base, offset, bytes, pnum, map,
file);
}
@@ -1443,6 +1444,7 @@ int coroutine_fn blk_is_allocated_above(BlockBackend *blk,
int64_t bytes, int64_t *pnum)
{
IO_CODE();
+ GRAPH_RDLOCK_GUARD();
return bdrv_is_allocated_above(blk_bs(blk), base, include_base, offset,
bytes, pnum);
}
@@ -630,6 +630,7 @@ static int coroutine_fn block_copy_is_cluster_allocated(BlockCopyState *s,
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
while (true) {
+ /* protected in backup_run() */
ret = bdrv_is_allocated(bs, offset, bytes, &count);
if (ret < 0) {
return ret;
@@ -892,6 +893,7 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
static void coroutine_fn block_copy_async_co_entry(void *opaque)
{
+ GRAPH_RDLOCK_GUARD();
block_copy_common(opaque);
}
@@ -2215,6 +2215,7 @@ static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
bool has_filtered_child;
assert(pnum);
+ assert_bdrv_graph_readable();
*pnum = 0;
total_size = bdrv_getlength(bs);
if (total_size < 0) {
@@ -2445,6 +2446,7 @@ bdrv_co_common_block_status_above(BlockDriverState *bs,
IO_CODE();
assert(!include_base || base); /* Can't include NULL base */
+ assert_bdrv_graph_readable();
if (!depth) {
depth = &dummy;
@@ -559,9 +559,11 @@ static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
MirrorMethod mirror_method = MIRROR_METHOD_COPY;
assert(!(offset % s->granularity));
- ret = bdrv_block_status_above(source, NULL, offset,
- nb_chunks * s->granularity,
- &io_bytes, NULL, NULL);
+ WITH_GRAPH_RDLOCK_GUARD() {
+ ret = bdrv_block_status_above(source, NULL, offset,
+ nb_chunks * s->granularity,
+ &io_bytes, NULL, NULL);
+ }
if (ret < 0) {
io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
} else if (ret & BDRV_BLOCK_DATA) {
@@ -864,8 +866,10 @@ static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
return 0;
}
- ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset, bytes,
- &count);
+ WITH_GRAPH_RDLOCK_GUARD() {
+ ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset,
+ bytes, &count);
+ }
if (ret < 0) {
return ret;
}
@@ -161,21 +161,25 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
copy = false;
- ret = bdrv_is_allocated(unfiltered_bs, offset, STREAM_CHUNK, &n);
- if (ret == 1) {
- /* Allocated in the top, no need to copy. */
- } else if (ret >= 0) {
- /* Copy if allocated in the intermediate images. Limit to the
- * known-unallocated area [offset, offset+n*BDRV_SECTOR_SIZE). */
- ret = bdrv_is_allocated_above(bdrv_cow_bs(unfiltered_bs),
- s->base_overlay, true,
- offset, n, &n);
- /* Finish early if end of backing file has been reached */
- if (ret == 0 && n == 0) {
- n = len - offset;
+ WITH_GRAPH_RDLOCK_GUARD() {
+ ret = bdrv_is_allocated(unfiltered_bs, offset, STREAM_CHUNK, &n);
+ if (ret == 1) {
+ /* Allocated in the top, no need to copy. */
+ } else if (ret >= 0) {
+ /*
+ * Copy if allocated in the intermediate images. Limit to the
+ * known-unallocated area [offset, offset+n*BDRV_SECTOR_SIZE).
+ */
+ ret = bdrv_is_allocated_above(bdrv_cow_bs(unfiltered_bs),
+ s->base_overlay, true,
+ offset, n, &n);
+ /* Finish early if end of backing file has been reached */
+ if (ret == 0 && n == 0) {
+ n = len - offset;
+ }
+
+ copy = (ret > 0);
}
-
- copy = (ret > 0);
}
trace_stream_one_iteration(s, offset, n, ret);
if (copy) {
@@ -623,6 +623,8 @@ struct BlockDriver {
* block/io.c's bdrv_co_block_status() will utilize an unclamped
* *pnum value for the block-status cache on protocol nodes, prior
* to clamping *pnum for return to its caller.
+ *
+ * Called with graph rdlock taken.
*/
int coroutine_fn (*bdrv_co_block_status)(BlockDriverState *bs,
bool want_zero, int64_t offset, int64_t bytes, int64_t *pnum,
@@ -1977,7 +1977,9 @@ static void coroutine_fn convert_co_do_copy(void *opaque)
qemu_co_mutex_unlock(&s->lock);
break;
}
- n = convert_iteration_sectors(s, s->sector_num);
+ WITH_GRAPH_RDLOCK_GUARD() {
+ n = convert_iteration_sectors(s, s->sector_num);
+ }
if (n < 0) {
qemu_co_mutex_unlock(&s->lock);
s->ret = n;
This function, in addition to be called by a generated_co_wrapper, is also called elsewhere else. The strategy is to always take the lock at the function called when the coroutine is created, to avoid recursive locking. Protecting bdrv_co_block_status() called by bdrv_co_common_block_status_above() implies that BlockDriver->bdrv_co_block_status() is always called with graph rdlock taken. Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> --- block/backup.c | 3 +++ block/block-backend.c | 2 ++ block/block-copy.c | 2 ++ block/io.c | 2 ++ block/mirror.c | 14 +++++++++----- block/stream.c | 32 ++++++++++++++++++-------------- include/block/block_int-common.h | 2 ++ qemu-img.c | 4 +++- 8 files changed, 41 insertions(+), 20 deletions(-)