Message ID | 1571243333-882302-3-git-send-email-andrey.shinkevich@virtuozzo.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | qcow2: advanced compression options | expand |
16.10.2019 19:28, Andrey Shinkevich wrote: > QEMU currently supports writing compressed data of the size equal to > one cluster. This patch allows writing QCOW2 compressed data that > exceed one cluster. Now, we split buffered data into separate clusters > and write them compressed using the existing functionality. > > Suggested-by: Pavel Butsykin <pbutsykin@virtuozzo.com> > Signed-off-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com> > --- > block/qcow2.c | 102 ++++++++++++++++++++++++++++++++++++++++++---------------- > 1 file changed, 75 insertions(+), 27 deletions(-) > > diff --git a/block/qcow2.c b/block/qcow2.c > index 6b29e16..9a85d73 100644 > --- a/block/qcow2.c > +++ b/block/qcow2.c > @@ -4156,10 +4156,8 @@ fail: > return ret; > } > > -/* XXX: put compressed sectors first, then all the cluster aligned > - tables to avoid losing bytes in alignment */ > static coroutine_fn int > -qcow2_co_pwritev_compressed_part(BlockDriverState *bs, > +qcow2_co_pwritev_compressed_task(BlockDriverState *bs, > uint64_t offset, uint64_t bytes, > QEMUIOVector *qiov, size_t qiov_offset) > { > @@ -4169,32 +4167,11 @@ qcow2_co_pwritev_compressed_part(BlockDriverState *bs, > uint8_t *buf, *out_buf; > uint64_t cluster_offset; > > - if (has_data_file(bs)) { > - return -ENOTSUP; > - } > - > - if (bytes == 0) { > - /* align end of file to a sector boundary to ease reading with > - sector based I/Os */ > - int64_t len = bdrv_getlength(bs->file->bs); > - if (len < 0) { > - return len; > - } > - return bdrv_co_truncate(bs->file, len, PREALLOC_MODE_OFF, NULL); > - } > - > - if (offset_into_cluster(s, offset)) { > - return -EINVAL; > - } > + assert(bytes == s->cluster_size || (bytes < s->cluster_size && > + (offset + bytes == bs->total_sectors << BDRV_SECTOR_BITS))); > > buf = qemu_blockalign(bs, s->cluster_size); > - if (bytes != s->cluster_size) { > - if (bytes > s->cluster_size || > - offset + bytes != bs->total_sectors << BDRV_SECTOR_BITS) > - { > - qemu_vfree(buf); > - return -EINVAL; > - } > + if (bytes < s->cluster_size) { > /* Zero-pad last write if image size is not cluster aligned */ > memset(buf + bytes, 0, s->cluster_size - bytes); > } > @@ -4243,6 +4220,77 @@ fail: > return ret; > } > > +static coroutine_fn int qcow2_co_pwritev_compressed_task_entry(AioTask *task) > +{ > + Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); > + > + assert(!t->cluster_type && !t->l2meta); > + > + return qcow2_co_pwritev_compressed_task(t->bs, t->offset, t->bytes, t->qiov, > + t->qiov_offset); > +} > + > +/* > + * XXX: put compressed sectors first, then all the cluster aligned > + tables to avoid losing bytes in alignment missed asterisk > + */ > +static coroutine_fn int > +qcow2_co_pwritev_compressed_part(BlockDriverState *bs, > + uint64_t offset, uint64_t bytes, > + QEMUIOVector *qiov, size_t qiov_offset) > +{ > + BDRVQcow2State *s = bs->opaque; > + AioTaskPool *aio = NULL; > + int ret; > + > + if (has_data_file(bs)) { > + return -ENOTSUP; > + } > + > + if (bytes == 0) { > + /* > + * align end of file to a sector boundary to ease reading with > + * sector based I/Os > + */ > + int64_t len = bdrv_getlength(bs->file->bs); > + if (len < 0) { > + return len; > + } > + return bdrv_co_truncate(bs->file, len, PREALLOC_MODE_OFF, NULL); > + } > + > + if (offset_into_cluster(s, offset)) { > + return -EINVAL; > + } > + > + while (bytes && aio_task_pool_status(aio) == 0) { > + uint32_t chunk_size = MIN(bytes, s->cluster_size); Hmm. cluster_size is int. bytes is uint64_t. qcow2_add_task argument type is uint64_t. I think better to choose from these types.. Abd, I'd prefer to use uint64_t for chunk_size.. But, uint32_t should work too, of course. > + > + if (!aio && chunk_size != bytes) { > + aio = aio_task_pool_new(QCOW2_MAX_WORKERS); > + } > + > + ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_compressed_task_entry, > + 0, 0, offset, chunk_size, qiov, qiov_offset, NULL); > + if (ret < 0) { > + break; > + } > + qiov_offset += chunk_size; > + offset += chunk_size; > + bytes -= chunk_size; > + } > + > + if (aio) { > + aio_task_pool_wait_all(aio); > + if (ret == 0) { > + ret = aio_task_pool_status(aio); > + } > + g_free(aio); > + } > + > + return ret; > +} > + > static int coroutine_fn > qcow2_co_preadv_compressed(BlockDriverState *bs, > uint64_t file_cluster_offset, > With asterisk in comment fixed, and optionally chunk_size type changed to uint64_t: Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Also, I'd prefer this patch to go first, otherwise we added in previous patch an option which doesn't work for requests larger than one cluster. Or, otherwise, you can in previous patch set max_transfer to one cluster in case of all_writes_compressed, and in this patch drop this restriction. (Note, that this patch is needed: if we just set max_transfer to one cluster instead for all_writes_compressed case, we'll miss benefits of aio_task_pool and will not compress clusters in parallel threads for one request).
diff --git a/block/qcow2.c b/block/qcow2.c index 6b29e16..9a85d73 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -4156,10 +4156,8 @@ fail: return ret; } -/* XXX: put compressed sectors first, then all the cluster aligned - tables to avoid losing bytes in alignment */ static coroutine_fn int -qcow2_co_pwritev_compressed_part(BlockDriverState *bs, +qcow2_co_pwritev_compressed_task(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, size_t qiov_offset) { @@ -4169,32 +4167,11 @@ qcow2_co_pwritev_compressed_part(BlockDriverState *bs, uint8_t *buf, *out_buf; uint64_t cluster_offset; - if (has_data_file(bs)) { - return -ENOTSUP; - } - - if (bytes == 0) { - /* align end of file to a sector boundary to ease reading with - sector based I/Os */ - int64_t len = bdrv_getlength(bs->file->bs); - if (len < 0) { - return len; - } - return bdrv_co_truncate(bs->file, len, PREALLOC_MODE_OFF, NULL); - } - - if (offset_into_cluster(s, offset)) { - return -EINVAL; - } + assert(bytes == s->cluster_size || (bytes < s->cluster_size && + (offset + bytes == bs->total_sectors << BDRV_SECTOR_BITS))); buf = qemu_blockalign(bs, s->cluster_size); - if (bytes != s->cluster_size) { - if (bytes > s->cluster_size || - offset + bytes != bs->total_sectors << BDRV_SECTOR_BITS) - { - qemu_vfree(buf); - return -EINVAL; - } + if (bytes < s->cluster_size) { /* Zero-pad last write if image size is not cluster aligned */ memset(buf + bytes, 0, s->cluster_size - bytes); } @@ -4243,6 +4220,77 @@ fail: return ret; } +static coroutine_fn int qcow2_co_pwritev_compressed_task_entry(AioTask *task) +{ + Qcow2AioTask *t = container_of(task, Qcow2AioTask, task); + + assert(!t->cluster_type && !t->l2meta); + + return qcow2_co_pwritev_compressed_task(t->bs, t->offset, t->bytes, t->qiov, + t->qiov_offset); +} + +/* + * XXX: put compressed sectors first, then all the cluster aligned + tables to avoid losing bytes in alignment + */ +static coroutine_fn int +qcow2_co_pwritev_compressed_part(BlockDriverState *bs, + uint64_t offset, uint64_t bytes, + QEMUIOVector *qiov, size_t qiov_offset) +{ + BDRVQcow2State *s = bs->opaque; + AioTaskPool *aio = NULL; + int ret; + + if (has_data_file(bs)) { + return -ENOTSUP; + } + + if (bytes == 0) { + /* + * align end of file to a sector boundary to ease reading with + * sector based I/Os + */ + int64_t len = bdrv_getlength(bs->file->bs); + if (len < 0) { + return len; + } + return bdrv_co_truncate(bs->file, len, PREALLOC_MODE_OFF, NULL); + } + + if (offset_into_cluster(s, offset)) { + return -EINVAL; + } + + while (bytes && aio_task_pool_status(aio) == 0) { + uint32_t chunk_size = MIN(bytes, s->cluster_size); + + if (!aio && chunk_size != bytes) { + aio = aio_task_pool_new(QCOW2_MAX_WORKERS); + } + + ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_compressed_task_entry, + 0, 0, offset, chunk_size, qiov, qiov_offset, NULL); + if (ret < 0) { + break; + } + qiov_offset += chunk_size; + offset += chunk_size; + bytes -= chunk_size; + } + + if (aio) { + aio_task_pool_wait_all(aio); + if (ret == 0) { + ret = aio_task_pool_status(aio); + } + g_free(aio); + } + + return ret; +} + static int coroutine_fn qcow2_co_preadv_compressed(BlockDriverState *bs, uint64_t file_cluster_offset,
QEMU currently supports writing compressed data of the size equal to one cluster. This patch allows writing QCOW2 compressed data that exceed one cluster. Now, we split buffered data into separate clusters and write them compressed using the existing functionality. Suggested-by: Pavel Butsykin <pbutsykin@virtuozzo.com> Signed-off-by: Andrey Shinkevich <andrey.shinkevich@virtuozzo.com> --- block/qcow2.c | 102 ++++++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 75 insertions(+), 27 deletions(-)