Message ID | 20230831093252.2461282-10-ray.huang@amd.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | Support blob memory and venus on qemu | expand |
On 2023/08/31 18:32, Huang Rui wrote: > From: Antonio Caggiano <antonio.caggiano@collabora.com> > > Support BLOB resources creation, mapping and unmapping by calling the > new stable virglrenderer 0.10 interface. Only enabled when available and > via the blob config. E.g. -device virtio-vga-gl,blob=true > > Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com> > Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> > Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com> > Signed-off-by: Huang Rui <ray.huang@amd.com> > --- > > v1->v2: > - Remove unused #include "hw/virtio/virtio-iommu.h" > > - Add a local function, called virgl_resource_destroy(), that is used > to release a vgpu resource on error paths and in resource_unref. > > - Remove virtio_gpu_virgl_resource_unmap from virtio_gpu_cleanup_mapping(), > since this function won't be called on blob resources and also because > blob resources are unmapped via virgl_cmd_resource_unmap_blob(). > > - In virgl_cmd_resource_create_blob(), do proper cleanup in error paths > and move QTAILQ_INSERT_HEAD(&g->reslist, res, next) after the resource > has been fully initialized. > > - Memory region has a different life-cycle from virtio gpu resources > i.e. cannot be released synchronously along with the vgpu resource. > So, here the field "region" was changed to a pointer that will be > released automatically once the memory region is unparented and all > of its references have been released. > Also, since the pointer can be used to indicate whether the blob > is mapped, the explicit field "mapped" was removed. > > - In virgl_cmd_resource_map_blob(), add check on the value of > res->region, to prevent beeing called twice on the same resource. > > - Remove direct references to parent_obj. > > - Separate declarations from code. > > hw/display/virtio-gpu-virgl.c | 213 +++++++++++++++++++++++++++++++++ > hw/display/virtio-gpu.c | 4 +- > include/hw/virtio/virtio-gpu.h | 5 + > meson.build | 4 + > 4 files changed, 225 insertions(+), 1 deletion(-) > > diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c > index 312953ec16..17b634d4ee 100644 > --- a/hw/display/virtio-gpu-virgl.c > +++ b/hw/display/virtio-gpu-virgl.c > @@ -17,6 +17,7 @@ > #include "trace.h" > #include "hw/virtio/virtio.h" > #include "hw/virtio/virtio-gpu.h" > +#include "hw/virtio/virtio-gpu-bswap.h" > > #include "ui/egl-helpers.h" > > @@ -78,9 +79,24 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, > virgl_renderer_resource_create(&args, NULL, 0); > } > > +static void virgl_resource_destroy(VirtIOGPU *g, > + struct virtio_gpu_simple_resource *res) > +{ > + if (!res) > + return; > + > + QTAILQ_REMOVE(&g->reslist, res, next); > + > + virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); > + g_free(res->addrs); > + > + g_free(res); > +} > + > static void virgl_cmd_resource_unref(VirtIOGPU *g, > struct virtio_gpu_ctrl_command *cmd) > { > + struct virtio_gpu_simple_resource *res; > struct virtio_gpu_resource_unref unref; > struct iovec *res_iovs = NULL; > int num_iovs = 0; > @@ -88,13 +104,22 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g, > VIRTIO_GPU_FILL_CMD(unref); > trace_virtio_gpu_cmd_res_unref(unref.resource_id); > > + res = virtio_gpu_find_resource(g, unref.resource_id); > + > virgl_renderer_resource_detach_iov(unref.resource_id, > &res_iovs, > &num_iovs); > if (res_iovs != NULL && num_iovs != 0) { > virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); > + if (res) { > + res->iov = NULL; > + res->iov_cnt = 0; > + } > } > + > virgl_renderer_resource_unref(unref.resource_id); > + > + virgl_resource_destroy(g, res); > } > > static void virgl_cmd_context_create(VirtIOGPU *g, > @@ -426,6 +451,183 @@ static void virgl_cmd_get_capset(VirtIOGPU *g, > g_free(resp); > } > > +#ifdef HAVE_VIRGL_RESOURCE_BLOB > + > +static void virgl_cmd_resource_create_blob(VirtIOGPU *g, > + struct virtio_gpu_ctrl_command *cmd) > +{ > + struct virtio_gpu_simple_resource *res; > + struct virtio_gpu_resource_create_blob cblob; > + struct virgl_renderer_resource_create_blob_args virgl_args = { 0 }; > + int ret; > + > + VIRTIO_GPU_FILL_CMD(cblob); > + virtio_gpu_create_blob_bswap(&cblob); > + trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); > + > + if (cblob.resource_id == 0) { > + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", > + __func__); > + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > + return; > + } > + > + res = virtio_gpu_find_resource(g, cblob.resource_id); > + if (res) { > + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", > + __func__, cblob.resource_id); > + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > + return; > + } > + > + res = g_new0(struct virtio_gpu_simple_resource, 1); > + if (!res) { > + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; > + return; > + } > + > + res->resource_id = cblob.resource_id; > + res->blob_size = cblob.size; > + > + if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) { > + ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), > + cmd, &res->addrs, &res->iov, > + &res->iov_cnt); > + if (!ret) { > + g_free(res); > + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; > + return; > + } > + } > + > + QTAILQ_INSERT_HEAD(&g->reslist, res, next); > + > + virgl_args.res_handle = cblob.resource_id; > + virgl_args.ctx_id = cblob.hdr.ctx_id; > + virgl_args.blob_mem = cblob.blob_mem; > + virgl_args.blob_id = cblob.blob_id; > + virgl_args.blob_flags = cblob.blob_flags; > + virgl_args.size = cblob.size; > + virgl_args.iovecs = res->iov; > + virgl_args.num_iovs = res->iov_cnt; > + > + ret = virgl_renderer_resource_create_blob(&virgl_args); > + if (ret) { > + virgl_resource_destroy(g, res); > + qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n", > + __func__, strerror(-ret)); > + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; > + } > +} > + > +static void virgl_cmd_resource_map_blob(VirtIOGPU *g, > + struct virtio_gpu_ctrl_command *cmd) > +{ > + struct virtio_gpu_simple_resource *res; > + struct virtio_gpu_resource_map_blob mblob; > + int ret; > + void *data; > + uint64_t size; > + struct virtio_gpu_resp_map_info resp; > + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); > + > + VIRTIO_GPU_FILL_CMD(mblob); > + virtio_gpu_map_blob_bswap(&mblob); > + > + if (mblob.resource_id == 0) { > + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", > + __func__); > + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > + return; > + } > + > + res = virtio_gpu_find_resource(g, mblob.resource_id); > + if (!res) { > + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", > + __func__, mblob.resource_id); > + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > + return; > + } > + if (res->region) { > + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already mapped %d\n", > + __func__, mblob.resource_id); > + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > + return; > + } > + > + ret = virgl_renderer_resource_map(res->resource_id, &data, &size); > + if (ret) { > + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource map error: %s\n", > + __func__, strerror(-ret)); > + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > + return; > + } > + > + res->region = g_new0(MemoryRegion, 1); > + if (!res->region) { > + virgl_renderer_resource_unmap(res->resource_id); > + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; > + return; > + } > + memory_region_init_ram_device_ptr(res->region, OBJECT(g), NULL, size, data); I think memory_region_init_ram_ptr() should be used instead.
On Thu, Aug 31, 2023 at 06:24:32PM +0800, Akihiko Odaki wrote: > On 2023/08/31 18:32, Huang Rui wrote: > > From: Antonio Caggiano <antonio.caggiano@collabora.com> > > > > Support BLOB resources creation, mapping and unmapping by calling the > > new stable virglrenderer 0.10 interface. Only enabled when available and > > via the blob config. E.g. -device virtio-vga-gl,blob=true > > > > Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com> > > Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> > > Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com> > > Signed-off-by: Huang Rui <ray.huang@amd.com> > > --- > > > > v1->v2: > > - Remove unused #include "hw/virtio/virtio-iommu.h" > > > > - Add a local function, called virgl_resource_destroy(), that is used > > to release a vgpu resource on error paths and in resource_unref. > > > > - Remove virtio_gpu_virgl_resource_unmap from virtio_gpu_cleanup_mapping(), > > since this function won't be called on blob resources and also because > > blob resources are unmapped via virgl_cmd_resource_unmap_blob(). > > > > - In virgl_cmd_resource_create_blob(), do proper cleanup in error paths > > and move QTAILQ_INSERT_HEAD(&g->reslist, res, next) after the resource > > has been fully initialized. > > > > - Memory region has a different life-cycle from virtio gpu resources > > i.e. cannot be released synchronously along with the vgpu resource. > > So, here the field "region" was changed to a pointer that will be > > released automatically once the memory region is unparented and all > > of its references have been released. > > Also, since the pointer can be used to indicate whether the blob > > is mapped, the explicit field "mapped" was removed. > > > > - In virgl_cmd_resource_map_blob(), add check on the value of > > res->region, to prevent beeing called twice on the same resource. > > > > - Remove direct references to parent_obj. > > > > - Separate declarations from code. > > > > hw/display/virtio-gpu-virgl.c | 213 +++++++++++++++++++++++++++++++++ > > hw/display/virtio-gpu.c | 4 +- > > include/hw/virtio/virtio-gpu.h | 5 + > > meson.build | 4 + > > 4 files changed, 225 insertions(+), 1 deletion(-) > > > > diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c > > index 312953ec16..17b634d4ee 100644 > > --- a/hw/display/virtio-gpu-virgl.c > > +++ b/hw/display/virtio-gpu-virgl.c > > @@ -17,6 +17,7 @@ > > #include "trace.h" > > #include "hw/virtio/virtio.h" > > #include "hw/virtio/virtio-gpu.h" > > +#include "hw/virtio/virtio-gpu-bswap.h" > > > > #include "ui/egl-helpers.h" > > > > @@ -78,9 +79,24 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, > > virgl_renderer_resource_create(&args, NULL, 0); > > } > > > > +static void virgl_resource_destroy(VirtIOGPU *g, > > + struct virtio_gpu_simple_resource *res) > > +{ > > + if (!res) > > + return; > > + > > + QTAILQ_REMOVE(&g->reslist, res, next); > > + > > + virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); > > + g_free(res->addrs); > > + > > + g_free(res); > > +} > > + > > static void virgl_cmd_resource_unref(VirtIOGPU *g, > > struct virtio_gpu_ctrl_command *cmd) > > { > > + struct virtio_gpu_simple_resource *res; > > struct virtio_gpu_resource_unref unref; > > struct iovec *res_iovs = NULL; > > int num_iovs = 0; > > @@ -88,13 +104,22 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g, > > VIRTIO_GPU_FILL_CMD(unref); > > trace_virtio_gpu_cmd_res_unref(unref.resource_id); > > > > + res = virtio_gpu_find_resource(g, unref.resource_id); > > + > > virgl_renderer_resource_detach_iov(unref.resource_id, > > &res_iovs, > > &num_iovs); > > if (res_iovs != NULL && num_iovs != 0) { > > virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); > > + if (res) { > > + res->iov = NULL; > > + res->iov_cnt = 0; > > + } > > } > > + > > virgl_renderer_resource_unref(unref.resource_id); > > + > > + virgl_resource_destroy(g, res); > > } > > > > static void virgl_cmd_context_create(VirtIOGPU *g, > > @@ -426,6 +451,183 @@ static void virgl_cmd_get_capset(VirtIOGPU *g, > > g_free(resp); > > } > > > > +#ifdef HAVE_VIRGL_RESOURCE_BLOB > > + > > +static void virgl_cmd_resource_create_blob(VirtIOGPU *g, > > + struct virtio_gpu_ctrl_command *cmd) > > +{ > > + struct virtio_gpu_simple_resource *res; > > + struct virtio_gpu_resource_create_blob cblob; > > + struct virgl_renderer_resource_create_blob_args virgl_args = { 0 }; > > + int ret; > > + > > + VIRTIO_GPU_FILL_CMD(cblob); > > + virtio_gpu_create_blob_bswap(&cblob); > > + trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); > > + > > + if (cblob.resource_id == 0) { > > + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", > > + __func__); > > + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > > + return; > > + } > > + > > + res = virtio_gpu_find_resource(g, cblob.resource_id); > > + if (res) { > > + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", > > + __func__, cblob.resource_id); > > + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > > + return; > > + } > > + > > + res = g_new0(struct virtio_gpu_simple_resource, 1); > > + if (!res) { > > + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; > > + return; > > + } > > + > > + res->resource_id = cblob.resource_id; > > + res->blob_size = cblob.size; > > + > > + if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) { > > + ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), > > + cmd, &res->addrs, &res->iov, > > + &res->iov_cnt); > > + if (!ret) { > > + g_free(res); > > + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; > > + return; > > + } > > + } > > + > > + QTAILQ_INSERT_HEAD(&g->reslist, res, next); > > + > > + virgl_args.res_handle = cblob.resource_id; > > + virgl_args.ctx_id = cblob.hdr.ctx_id; > > + virgl_args.blob_mem = cblob.blob_mem; > > + virgl_args.blob_id = cblob.blob_id; > > + virgl_args.blob_flags = cblob.blob_flags; > > + virgl_args.size = cblob.size; > > + virgl_args.iovecs = res->iov; > > + virgl_args.num_iovs = res->iov_cnt; > > + > > + ret = virgl_renderer_resource_create_blob(&virgl_args); > > + if (ret) { > > + virgl_resource_destroy(g, res); > > + qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n", > > + __func__, strerror(-ret)); > > + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; > > + } > > +} > > + > > +static void virgl_cmd_resource_map_blob(VirtIOGPU *g, > > + struct virtio_gpu_ctrl_command *cmd) > > +{ > > + struct virtio_gpu_simple_resource *res; > > + struct virtio_gpu_resource_map_blob mblob; > > + int ret; > > + void *data; > > + uint64_t size; > > + struct virtio_gpu_resp_map_info resp; > > + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); > > + > > + VIRTIO_GPU_FILL_CMD(mblob); > > + virtio_gpu_map_blob_bswap(&mblob); > > + > > + if (mblob.resource_id == 0) { > > + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", > > + __func__); > > + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > > + return; > > + } > > + > > + res = virtio_gpu_find_resource(g, mblob.resource_id); > > + if (!res) { > > + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", > > + __func__, mblob.resource_id); > > + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > > + return; > > + } > > + if (res->region) { > > + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already mapped %d\n", > > + __func__, mblob.resource_id); > > + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > > + return; > > + } > > + > > + ret = virgl_renderer_resource_map(res->resource_id, &data, &size); > > + if (ret) { > > + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource map error: %s\n", > > + __func__, strerror(-ret)); > > + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > > + return; > > + } > > + > > + res->region = g_new0(MemoryRegion, 1); > > + if (!res->region) { > > + virgl_renderer_resource_unmap(res->resource_id); > > + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; > > + return; > > + } > > + memory_region_init_ram_device_ptr(res->region, OBJECT(g), NULL, size, data); > > I think memory_region_init_ram_ptr() should be used instead. Would you mind to explain the reason? Thanks, Ray
On 2023/09/05 18:08, Huang Rui wrote: > On Thu, Aug 31, 2023 at 06:24:32PM +0800, Akihiko Odaki wrote: >> On 2023/08/31 18:32, Huang Rui wrote: >>> From: Antonio Caggiano <antonio.caggiano@collabora.com> >>> >>> Support BLOB resources creation, mapping and unmapping by calling the >>> new stable virglrenderer 0.10 interface. Only enabled when available and >>> via the blob config. E.g. -device virtio-vga-gl,blob=true >>> >>> Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com> >>> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> >>> Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com> >>> Signed-off-by: Huang Rui <ray.huang@amd.com> >>> --- >>> >>> v1->v2: >>> - Remove unused #include "hw/virtio/virtio-iommu.h" >>> >>> - Add a local function, called virgl_resource_destroy(), that is used >>> to release a vgpu resource on error paths and in resource_unref. >>> >>> - Remove virtio_gpu_virgl_resource_unmap from virtio_gpu_cleanup_mapping(), >>> since this function won't be called on blob resources and also because >>> blob resources are unmapped via virgl_cmd_resource_unmap_blob(). >>> >>> - In virgl_cmd_resource_create_blob(), do proper cleanup in error paths >>> and move QTAILQ_INSERT_HEAD(&g->reslist, res, next) after the resource >>> has been fully initialized. >>> >>> - Memory region has a different life-cycle from virtio gpu resources >>> i.e. cannot be released synchronously along with the vgpu resource. >>> So, here the field "region" was changed to a pointer that will be >>> released automatically once the memory region is unparented and all >>> of its references have been released. >>> Also, since the pointer can be used to indicate whether the blob >>> is mapped, the explicit field "mapped" was removed. >>> >>> - In virgl_cmd_resource_map_blob(), add check on the value of >>> res->region, to prevent beeing called twice on the same resource. >>> >>> - Remove direct references to parent_obj. >>> >>> - Separate declarations from code. >>> >>> hw/display/virtio-gpu-virgl.c | 213 +++++++++++++++++++++++++++++++++ >>> hw/display/virtio-gpu.c | 4 +- >>> include/hw/virtio/virtio-gpu.h | 5 + >>> meson.build | 4 + >>> 4 files changed, 225 insertions(+), 1 deletion(-) >>> >>> diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c >>> index 312953ec16..17b634d4ee 100644 >>> --- a/hw/display/virtio-gpu-virgl.c >>> +++ b/hw/display/virtio-gpu-virgl.c >>> @@ -17,6 +17,7 @@ >>> #include "trace.h" >>> #include "hw/virtio/virtio.h" >>> #include "hw/virtio/virtio-gpu.h" >>> +#include "hw/virtio/virtio-gpu-bswap.h" >>> >>> #include "ui/egl-helpers.h" >>> >>> @@ -78,9 +79,24 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, >>> virgl_renderer_resource_create(&args, NULL, 0); >>> } >>> >>> +static void virgl_resource_destroy(VirtIOGPU *g, >>> + struct virtio_gpu_simple_resource *res) >>> +{ >>> + if (!res) >>> + return; >>> + >>> + QTAILQ_REMOVE(&g->reslist, res, next); >>> + >>> + virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); >>> + g_free(res->addrs); >>> + >>> + g_free(res); >>> +} >>> + >>> static void virgl_cmd_resource_unref(VirtIOGPU *g, >>> struct virtio_gpu_ctrl_command *cmd) >>> { >>> + struct virtio_gpu_simple_resource *res; >>> struct virtio_gpu_resource_unref unref; >>> struct iovec *res_iovs = NULL; >>> int num_iovs = 0; >>> @@ -88,13 +104,22 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g, >>> VIRTIO_GPU_FILL_CMD(unref); >>> trace_virtio_gpu_cmd_res_unref(unref.resource_id); >>> >>> + res = virtio_gpu_find_resource(g, unref.resource_id); >>> + >>> virgl_renderer_resource_detach_iov(unref.resource_id, >>> &res_iovs, >>> &num_iovs); >>> if (res_iovs != NULL && num_iovs != 0) { >>> virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); >>> + if (res) { >>> + res->iov = NULL; >>> + res->iov_cnt = 0; >>> + } >>> } >>> + >>> virgl_renderer_resource_unref(unref.resource_id); >>> + >>> + virgl_resource_destroy(g, res); >>> } >>> >>> static void virgl_cmd_context_create(VirtIOGPU *g, >>> @@ -426,6 +451,183 @@ static void virgl_cmd_get_capset(VirtIOGPU *g, >>> g_free(resp); >>> } >>> >>> +#ifdef HAVE_VIRGL_RESOURCE_BLOB >>> + >>> +static void virgl_cmd_resource_create_blob(VirtIOGPU *g, >>> + struct virtio_gpu_ctrl_command *cmd) >>> +{ >>> + struct virtio_gpu_simple_resource *res; >>> + struct virtio_gpu_resource_create_blob cblob; >>> + struct virgl_renderer_resource_create_blob_args virgl_args = { 0 }; >>> + int ret; >>> + >>> + VIRTIO_GPU_FILL_CMD(cblob); >>> + virtio_gpu_create_blob_bswap(&cblob); >>> + trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); >>> + >>> + if (cblob.resource_id == 0) { >>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", >>> + __func__); >>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>> + return; >>> + } >>> + >>> + res = virtio_gpu_find_resource(g, cblob.resource_id); >>> + if (res) { >>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", >>> + __func__, cblob.resource_id); >>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>> + return; >>> + } >>> + >>> + res = g_new0(struct virtio_gpu_simple_resource, 1); >>> + if (!res) { >>> + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; >>> + return; >>> + } >>> + >>> + res->resource_id = cblob.resource_id; >>> + res->blob_size = cblob.size; >>> + >>> + if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) { >>> + ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), >>> + cmd, &res->addrs, &res->iov, >>> + &res->iov_cnt); >>> + if (!ret) { >>> + g_free(res); >>> + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; >>> + return; >>> + } >>> + } >>> + >>> + QTAILQ_INSERT_HEAD(&g->reslist, res, next); >>> + >>> + virgl_args.res_handle = cblob.resource_id; >>> + virgl_args.ctx_id = cblob.hdr.ctx_id; >>> + virgl_args.blob_mem = cblob.blob_mem; >>> + virgl_args.blob_id = cblob.blob_id; >>> + virgl_args.blob_flags = cblob.blob_flags; >>> + virgl_args.size = cblob.size; >>> + virgl_args.iovecs = res->iov; >>> + virgl_args.num_iovs = res->iov_cnt; >>> + >>> + ret = virgl_renderer_resource_create_blob(&virgl_args); >>> + if (ret) { >>> + virgl_resource_destroy(g, res); >>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n", >>> + __func__, strerror(-ret)); >>> + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; >>> + } >>> +} >>> + >>> +static void virgl_cmd_resource_map_blob(VirtIOGPU *g, >>> + struct virtio_gpu_ctrl_command *cmd) >>> +{ >>> + struct virtio_gpu_simple_resource *res; >>> + struct virtio_gpu_resource_map_blob mblob; >>> + int ret; >>> + void *data; >>> + uint64_t size; >>> + struct virtio_gpu_resp_map_info resp; >>> + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); >>> + >>> + VIRTIO_GPU_FILL_CMD(mblob); >>> + virtio_gpu_map_blob_bswap(&mblob); >>> + >>> + if (mblob.resource_id == 0) { >>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", >>> + __func__); >>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>> + return; >>> + } >>> + >>> + res = virtio_gpu_find_resource(g, mblob.resource_id); >>> + if (!res) { >>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", >>> + __func__, mblob.resource_id); >>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>> + return; >>> + } >>> + if (res->region) { >>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already mapped %d\n", >>> + __func__, mblob.resource_id); >>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>> + return; >>> + } >>> + >>> + ret = virgl_renderer_resource_map(res->resource_id, &data, &size); >>> + if (ret) { >>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource map error: %s\n", >>> + __func__, strerror(-ret)); >>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>> + return; >>> + } >>> + >>> + res->region = g_new0(MemoryRegion, 1); >>> + if (!res->region) { >>> + virgl_renderer_resource_unmap(res->resource_id); >>> + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; >>> + return; >>> + } >>> + memory_region_init_ram_device_ptr(res->region, OBJECT(g), NULL, size, data); >> >> I think memory_region_init_ram_ptr() should be used instead. > > Would you mind to explain the reason? The documentation comment of memory_region_init_ram_device_ptr() says: > A RAM device represents a mapping to a physical device, such as to a > PCI MMIO BAR of an vfio-pci assigned device. The memory region may be > mapped into the VM address space and access to the region will modify > memory directly. However, the memory region should not be included in > a memory dump (device may not be enabled/mapped at the time of the > dump), and operations incompatible with manipulating MMIO should be > avoided. Replaces skip_dump flag. In my understanding it's not MMIO so memory_region_init_ram_ptr() should be used instead. Regards, Akihiko Odaki
On Tue, Sep 05, 2023 at 05:20:43PM +0800, Akihiko Odaki wrote: > On 2023/09/05 18:08, Huang Rui wrote: > > On Thu, Aug 31, 2023 at 06:24:32PM +0800, Akihiko Odaki wrote: > >> On 2023/08/31 18:32, Huang Rui wrote: > >>> From: Antonio Caggiano <antonio.caggiano@collabora.com> > >>> > >>> Support BLOB resources creation, mapping and unmapping by calling the > >>> new stable virglrenderer 0.10 interface. Only enabled when available and > >>> via the blob config. E.g. -device virtio-vga-gl,blob=true > >>> > >>> Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com> > >>> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> > >>> Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com> > >>> Signed-off-by: Huang Rui <ray.huang@amd.com> > >>> --- > >>> > >>> v1->v2: > >>> - Remove unused #include "hw/virtio/virtio-iommu.h" > >>> > >>> - Add a local function, called virgl_resource_destroy(), that is used > >>> to release a vgpu resource on error paths and in resource_unref. > >>> > >>> - Remove virtio_gpu_virgl_resource_unmap from virtio_gpu_cleanup_mapping(), > >>> since this function won't be called on blob resources and also because > >>> blob resources are unmapped via virgl_cmd_resource_unmap_blob(). > >>> > >>> - In virgl_cmd_resource_create_blob(), do proper cleanup in error paths > >>> and move QTAILQ_INSERT_HEAD(&g->reslist, res, next) after the resource > >>> has been fully initialized. > >>> > >>> - Memory region has a different life-cycle from virtio gpu resources > >>> i.e. cannot be released synchronously along with the vgpu resource. > >>> So, here the field "region" was changed to a pointer that will be > >>> released automatically once the memory region is unparented and all > >>> of its references have been released. > >>> Also, since the pointer can be used to indicate whether the blob > >>> is mapped, the explicit field "mapped" was removed. > >>> > >>> - In virgl_cmd_resource_map_blob(), add check on the value of > >>> res->region, to prevent beeing called twice on the same resource. > >>> > >>> - Remove direct references to parent_obj. > >>> > >>> - Separate declarations from code. > >>> > >>> hw/display/virtio-gpu-virgl.c | 213 +++++++++++++++++++++++++++++++++ > >>> hw/display/virtio-gpu.c | 4 +- > >>> include/hw/virtio/virtio-gpu.h | 5 + > >>> meson.build | 4 + > >>> 4 files changed, 225 insertions(+), 1 deletion(-) > >>> > >>> diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c > >>> index 312953ec16..17b634d4ee 100644 > >>> --- a/hw/display/virtio-gpu-virgl.c > >>> +++ b/hw/display/virtio-gpu-virgl.c > >>> @@ -17,6 +17,7 @@ > >>> #include "trace.h" > >>> #include "hw/virtio/virtio.h" > >>> #include "hw/virtio/virtio-gpu.h" > >>> +#include "hw/virtio/virtio-gpu-bswap.h" > >>> > >>> #include "ui/egl-helpers.h" > >>> > >>> @@ -78,9 +79,24 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, > >>> virgl_renderer_resource_create(&args, NULL, 0); > >>> } > >>> > >>> +static void virgl_resource_destroy(VirtIOGPU *g, > >>> + struct virtio_gpu_simple_resource *res) > >>> +{ > >>> + if (!res) > >>> + return; > >>> + > >>> + QTAILQ_REMOVE(&g->reslist, res, next); > >>> + > >>> + virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); > >>> + g_free(res->addrs); > >>> + > >>> + g_free(res); > >>> +} > >>> + > >>> static void virgl_cmd_resource_unref(VirtIOGPU *g, > >>> struct virtio_gpu_ctrl_command *cmd) > >>> { > >>> + struct virtio_gpu_simple_resource *res; > >>> struct virtio_gpu_resource_unref unref; > >>> struct iovec *res_iovs = NULL; > >>> int num_iovs = 0; > >>> @@ -88,13 +104,22 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g, > >>> VIRTIO_GPU_FILL_CMD(unref); > >>> trace_virtio_gpu_cmd_res_unref(unref.resource_id); > >>> > >>> + res = virtio_gpu_find_resource(g, unref.resource_id); > >>> + > >>> virgl_renderer_resource_detach_iov(unref.resource_id, > >>> &res_iovs, > >>> &num_iovs); > >>> if (res_iovs != NULL && num_iovs != 0) { > >>> virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); > >>> + if (res) { > >>> + res->iov = NULL; > >>> + res->iov_cnt = 0; > >>> + } > >>> } > >>> + > >>> virgl_renderer_resource_unref(unref.resource_id); > >>> + > >>> + virgl_resource_destroy(g, res); > >>> } > >>> > >>> static void virgl_cmd_context_create(VirtIOGPU *g, > >>> @@ -426,6 +451,183 @@ static void virgl_cmd_get_capset(VirtIOGPU *g, > >>> g_free(resp); > >>> } > >>> > >>> +#ifdef HAVE_VIRGL_RESOURCE_BLOB > >>> + > >>> +static void virgl_cmd_resource_create_blob(VirtIOGPU *g, > >>> + struct virtio_gpu_ctrl_command *cmd) > >>> +{ > >>> + struct virtio_gpu_simple_resource *res; > >>> + struct virtio_gpu_resource_create_blob cblob; > >>> + struct virgl_renderer_resource_create_blob_args virgl_args = { 0 }; > >>> + int ret; > >>> + > >>> + VIRTIO_GPU_FILL_CMD(cblob); > >>> + virtio_gpu_create_blob_bswap(&cblob); > >>> + trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); > >>> + > >>> + if (cblob.resource_id == 0) { > >>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", > >>> + __func__); > >>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > >>> + return; > >>> + } > >>> + > >>> + res = virtio_gpu_find_resource(g, cblob.resource_id); > >>> + if (res) { > >>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", > >>> + __func__, cblob.resource_id); > >>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > >>> + return; > >>> + } > >>> + > >>> + res = g_new0(struct virtio_gpu_simple_resource, 1); > >>> + if (!res) { > >>> + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; > >>> + return; > >>> + } > >>> + > >>> + res->resource_id = cblob.resource_id; > >>> + res->blob_size = cblob.size; > >>> + > >>> + if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) { > >>> + ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), > >>> + cmd, &res->addrs, &res->iov, > >>> + &res->iov_cnt); > >>> + if (!ret) { > >>> + g_free(res); > >>> + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; > >>> + return; > >>> + } > >>> + } > >>> + > >>> + QTAILQ_INSERT_HEAD(&g->reslist, res, next); > >>> + > >>> + virgl_args.res_handle = cblob.resource_id; > >>> + virgl_args.ctx_id = cblob.hdr.ctx_id; > >>> + virgl_args.blob_mem = cblob.blob_mem; > >>> + virgl_args.blob_id = cblob.blob_id; > >>> + virgl_args.blob_flags = cblob.blob_flags; > >>> + virgl_args.size = cblob.size; > >>> + virgl_args.iovecs = res->iov; > >>> + virgl_args.num_iovs = res->iov_cnt; > >>> + > >>> + ret = virgl_renderer_resource_create_blob(&virgl_args); > >>> + if (ret) { > >>> + virgl_resource_destroy(g, res); > >>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n", > >>> + __func__, strerror(-ret)); > >>> + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; > >>> + } > >>> +} > >>> + > >>> +static void virgl_cmd_resource_map_blob(VirtIOGPU *g, > >>> + struct virtio_gpu_ctrl_command *cmd) > >>> +{ > >>> + struct virtio_gpu_simple_resource *res; > >>> + struct virtio_gpu_resource_map_blob mblob; > >>> + int ret; > >>> + void *data; > >>> + uint64_t size; > >>> + struct virtio_gpu_resp_map_info resp; > >>> + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); > >>> + > >>> + VIRTIO_GPU_FILL_CMD(mblob); > >>> + virtio_gpu_map_blob_bswap(&mblob); > >>> + > >>> + if (mblob.resource_id == 0) { > >>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", > >>> + __func__); > >>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > >>> + return; > >>> + } > >>> + > >>> + res = virtio_gpu_find_resource(g, mblob.resource_id); > >>> + if (!res) { > >>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", > >>> + __func__, mblob.resource_id); > >>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > >>> + return; > >>> + } > >>> + if (res->region) { > >>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already mapped %d\n", > >>> + __func__, mblob.resource_id); > >>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > >>> + return; > >>> + } > >>> + > >>> + ret = virgl_renderer_resource_map(res->resource_id, &data, &size); > >>> + if (ret) { > >>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource map error: %s\n", > >>> + __func__, strerror(-ret)); > >>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > >>> + return; > >>> + } > >>> + > >>> + res->region = g_new0(MemoryRegion, 1); > >>> + if (!res->region) { > >>> + virgl_renderer_resource_unmap(res->resource_id); > >>> + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; > >>> + return; > >>> + } > >>> + memory_region_init_ram_device_ptr(res->region, OBJECT(g), NULL, size, data); > >> > >> I think memory_region_init_ram_ptr() should be used instead. > > > > Would you mind to explain the reason? > > The documentation comment of memory_region_init_ram_device_ptr() says: > > A RAM device represents a mapping to a physical device, such as to a > > PCI MMIO BAR of an vfio-pci assigned device. The memory region may be > > mapped into the VM address space and access to the region will modify > > memory directly. However, the memory region should not be included in > > a memory dump (device may not be enabled/mapped at the time of the > > dump), and operations incompatible with manipulating MMIO should be > > avoided. Replaces skip_dump flag. > > In my understanding it's not MMIO so memory_region_init_ram_ptr() should > be used instead. > It actually maybe the video memory (mmio) or system memory here. :-) We will get the host memory for blob from host with virgl_renderer_resource_map() in virglrenderer. In virglrenderer, there are two types of VIRGL_RESOURCE_FD_DMABUF and VIRGL_RESOURCE_FD_SHM to indicate the memory types. The shmem is the system memory that won't need GPU accessible, and dmabuf is the memory that required GPU accessible. Host kernel amdgpu driver will register dma-buf to export the resource buffer for sharing, and here, it may have video memory that exposed by amdgpu pcie bar0 in the dma-buf buffers. And we also have system memory(gtt) that can be mapped as gpu page tables for gpu accessible. 07:00.0 VGA compatible controller: Advanced Micro Devices, Inc. [AMD/ATI] Renoir (rev c1) (prog-if 00 [VGA controller]) Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Renoir Flags: bus master, fast devsel, latency 0, IRQ 56 Memory at fcc0000000 (64-bit, prefetchable) [size=256M] Memory at fcd0000000 (64-bit, prefetchable) [size=2M] I/O ports at 1000 [size=256] Memory at d0400000 (32-bit, non-prefetchable) [size=512K] Capabilities: <access denied> Kernel driver in use: amdgpu Kernel modules: amdgpu Thanks, Ray
On 2023/09/06 12:09, Huang Rui wrote: > On Tue, Sep 05, 2023 at 05:20:43PM +0800, Akihiko Odaki wrote: >> On 2023/09/05 18:08, Huang Rui wrote: >>> On Thu, Aug 31, 2023 at 06:24:32PM +0800, Akihiko Odaki wrote: >>>> On 2023/08/31 18:32, Huang Rui wrote: >>>>> From: Antonio Caggiano <antonio.caggiano@collabora.com> >>>>> >>>>> Support BLOB resources creation, mapping and unmapping by calling the >>>>> new stable virglrenderer 0.10 interface. Only enabled when available and >>>>> via the blob config. E.g. -device virtio-vga-gl,blob=true >>>>> >>>>> Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com> >>>>> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> >>>>> Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com> >>>>> Signed-off-by: Huang Rui <ray.huang@amd.com> >>>>> --- >>>>> >>>>> v1->v2: >>>>> - Remove unused #include "hw/virtio/virtio-iommu.h" >>>>> >>>>> - Add a local function, called virgl_resource_destroy(), that is used >>>>> to release a vgpu resource on error paths and in resource_unref. >>>>> >>>>> - Remove virtio_gpu_virgl_resource_unmap from virtio_gpu_cleanup_mapping(), >>>>> since this function won't be called on blob resources and also because >>>>> blob resources are unmapped via virgl_cmd_resource_unmap_blob(). >>>>> >>>>> - In virgl_cmd_resource_create_blob(), do proper cleanup in error paths >>>>> and move QTAILQ_INSERT_HEAD(&g->reslist, res, next) after the resource >>>>> has been fully initialized. >>>>> >>>>> - Memory region has a different life-cycle from virtio gpu resources >>>>> i.e. cannot be released synchronously along with the vgpu resource. >>>>> So, here the field "region" was changed to a pointer that will be >>>>> released automatically once the memory region is unparented and all >>>>> of its references have been released. >>>>> Also, since the pointer can be used to indicate whether the blob >>>>> is mapped, the explicit field "mapped" was removed. >>>>> >>>>> - In virgl_cmd_resource_map_blob(), add check on the value of >>>>> res->region, to prevent beeing called twice on the same resource. >>>>> >>>>> - Remove direct references to parent_obj. >>>>> >>>>> - Separate declarations from code. >>>>> >>>>> hw/display/virtio-gpu-virgl.c | 213 +++++++++++++++++++++++++++++++++ >>>>> hw/display/virtio-gpu.c | 4 +- >>>>> include/hw/virtio/virtio-gpu.h | 5 + >>>>> meson.build | 4 + >>>>> 4 files changed, 225 insertions(+), 1 deletion(-) >>>>> >>>>> diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c >>>>> index 312953ec16..17b634d4ee 100644 >>>>> --- a/hw/display/virtio-gpu-virgl.c >>>>> +++ b/hw/display/virtio-gpu-virgl.c >>>>> @@ -17,6 +17,7 @@ >>>>> #include "trace.h" >>>>> #include "hw/virtio/virtio.h" >>>>> #include "hw/virtio/virtio-gpu.h" >>>>> +#include "hw/virtio/virtio-gpu-bswap.h" >>>>> >>>>> #include "ui/egl-helpers.h" >>>>> >>>>> @@ -78,9 +79,24 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, >>>>> virgl_renderer_resource_create(&args, NULL, 0); >>>>> } >>>>> >>>>> +static void virgl_resource_destroy(VirtIOGPU *g, >>>>> + struct virtio_gpu_simple_resource *res) >>>>> +{ >>>>> + if (!res) >>>>> + return; >>>>> + >>>>> + QTAILQ_REMOVE(&g->reslist, res, next); >>>>> + >>>>> + virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); >>>>> + g_free(res->addrs); >>>>> + >>>>> + g_free(res); >>>>> +} >>>>> + >>>>> static void virgl_cmd_resource_unref(VirtIOGPU *g, >>>>> struct virtio_gpu_ctrl_command *cmd) >>>>> { >>>>> + struct virtio_gpu_simple_resource *res; >>>>> struct virtio_gpu_resource_unref unref; >>>>> struct iovec *res_iovs = NULL; >>>>> int num_iovs = 0; >>>>> @@ -88,13 +104,22 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g, >>>>> VIRTIO_GPU_FILL_CMD(unref); >>>>> trace_virtio_gpu_cmd_res_unref(unref.resource_id); >>>>> >>>>> + res = virtio_gpu_find_resource(g, unref.resource_id); >>>>> + >>>>> virgl_renderer_resource_detach_iov(unref.resource_id, >>>>> &res_iovs, >>>>> &num_iovs); >>>>> if (res_iovs != NULL && num_iovs != 0) { >>>>> virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); >>>>> + if (res) { >>>>> + res->iov = NULL; >>>>> + res->iov_cnt = 0; >>>>> + } >>>>> } >>>>> + >>>>> virgl_renderer_resource_unref(unref.resource_id); >>>>> + >>>>> + virgl_resource_destroy(g, res); >>>>> } >>>>> >>>>> static void virgl_cmd_context_create(VirtIOGPU *g, >>>>> @@ -426,6 +451,183 @@ static void virgl_cmd_get_capset(VirtIOGPU *g, >>>>> g_free(resp); >>>>> } >>>>> >>>>> +#ifdef HAVE_VIRGL_RESOURCE_BLOB >>>>> + >>>>> +static void virgl_cmd_resource_create_blob(VirtIOGPU *g, >>>>> + struct virtio_gpu_ctrl_command *cmd) >>>>> +{ >>>>> + struct virtio_gpu_simple_resource *res; >>>>> + struct virtio_gpu_resource_create_blob cblob; >>>>> + struct virgl_renderer_resource_create_blob_args virgl_args = { 0 }; >>>>> + int ret; >>>>> + >>>>> + VIRTIO_GPU_FILL_CMD(cblob); >>>>> + virtio_gpu_create_blob_bswap(&cblob); >>>>> + trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); >>>>> + >>>>> + if (cblob.resource_id == 0) { >>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", >>>>> + __func__); >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>>>> + return; >>>>> + } >>>>> + >>>>> + res = virtio_gpu_find_resource(g, cblob.resource_id); >>>>> + if (res) { >>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", >>>>> + __func__, cblob.resource_id); >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>>>> + return; >>>>> + } >>>>> + >>>>> + res = g_new0(struct virtio_gpu_simple_resource, 1); >>>>> + if (!res) { >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; >>>>> + return; >>>>> + } >>>>> + >>>>> + res->resource_id = cblob.resource_id; >>>>> + res->blob_size = cblob.size; >>>>> + >>>>> + if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) { >>>>> + ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), >>>>> + cmd, &res->addrs, &res->iov, >>>>> + &res->iov_cnt); >>>>> + if (!ret) { >>>>> + g_free(res); >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; >>>>> + return; >>>>> + } >>>>> + } >>>>> + >>>>> + QTAILQ_INSERT_HEAD(&g->reslist, res, next); >>>>> + >>>>> + virgl_args.res_handle = cblob.resource_id; >>>>> + virgl_args.ctx_id = cblob.hdr.ctx_id; >>>>> + virgl_args.blob_mem = cblob.blob_mem; >>>>> + virgl_args.blob_id = cblob.blob_id; >>>>> + virgl_args.blob_flags = cblob.blob_flags; >>>>> + virgl_args.size = cblob.size; >>>>> + virgl_args.iovecs = res->iov; >>>>> + virgl_args.num_iovs = res->iov_cnt; >>>>> + >>>>> + ret = virgl_renderer_resource_create_blob(&virgl_args); >>>>> + if (ret) { >>>>> + virgl_resource_destroy(g, res); >>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n", >>>>> + __func__, strerror(-ret)); >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; >>>>> + } >>>>> +} >>>>> + >>>>> +static void virgl_cmd_resource_map_blob(VirtIOGPU *g, >>>>> + struct virtio_gpu_ctrl_command *cmd) >>>>> +{ >>>>> + struct virtio_gpu_simple_resource *res; >>>>> + struct virtio_gpu_resource_map_blob mblob; >>>>> + int ret; >>>>> + void *data; >>>>> + uint64_t size; >>>>> + struct virtio_gpu_resp_map_info resp; >>>>> + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); >>>>> + >>>>> + VIRTIO_GPU_FILL_CMD(mblob); >>>>> + virtio_gpu_map_blob_bswap(&mblob); >>>>> + >>>>> + if (mblob.resource_id == 0) { >>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", >>>>> + __func__); >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>>>> + return; >>>>> + } >>>>> + >>>>> + res = virtio_gpu_find_resource(g, mblob.resource_id); >>>>> + if (!res) { >>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", >>>>> + __func__, mblob.resource_id); >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>>>> + return; >>>>> + } >>>>> + if (res->region) { >>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already mapped %d\n", >>>>> + __func__, mblob.resource_id); >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>>>> + return; >>>>> + } >>>>> + >>>>> + ret = virgl_renderer_resource_map(res->resource_id, &data, &size); >>>>> + if (ret) { >>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource map error: %s\n", >>>>> + __func__, strerror(-ret)); >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>>>> + return; >>>>> + } >>>>> + >>>>> + res->region = g_new0(MemoryRegion, 1); >>>>> + if (!res->region) { >>>>> + virgl_renderer_resource_unmap(res->resource_id); >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; >>>>> + return; >>>>> + } >>>>> + memory_region_init_ram_device_ptr(res->region, OBJECT(g), NULL, size, data); >>>> >>>> I think memory_region_init_ram_ptr() should be used instead. >>> >>> Would you mind to explain the reason? >> >> The documentation comment of memory_region_init_ram_device_ptr() says: >> > A RAM device represents a mapping to a physical device, such as to a >> > PCI MMIO BAR of an vfio-pci assigned device. The memory region may be >> > mapped into the VM address space and access to the region will modify >> > memory directly. However, the memory region should not be included in >> > a memory dump (device may not be enabled/mapped at the time of the >> > dump), and operations incompatible with manipulating MMIO should be >> > avoided. Replaces skip_dump flag. >> >> In my understanding it's not MMIO so memory_region_init_ram_ptr() should >> be used instead. >> > > It actually maybe the video memory (mmio) or system memory here. :-) > > We will get the host memory for blob from host with > virgl_renderer_resource_map() in virglrenderer. In virglrenderer, there are two > types of VIRGL_RESOURCE_FD_DMABUF and VIRGL_RESOURCE_FD_SHM to indicate the > memory types. The shmem is the system memory that won't need GPU > accessible, and dmabuf is the memory that required GPU accessible. Host > kernel amdgpu driver will register dma-buf to export the resource buffer > for sharing, and here, it may have video memory that exposed by amdgpu pcie > bar0 in the dma-buf buffers. And we also have system memory(gtt) that can > be mapped as gpu page tables for gpu accessible. > > 07:00.0 VGA compatible controller: Advanced Micro Devices, Inc. [AMD/ATI] Renoir (rev c1) (prog-if 00 [VGA controller]) > Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Renoir > Flags: bus master, fast devsel, latency 0, IRQ 56 > Memory at fcc0000000 (64-bit, prefetchable) [size=256M] > Memory at fcd0000000 (64-bit, prefetchable) [size=2M] > I/O ports at 1000 [size=256] > Memory at d0400000 (32-bit, non-prefetchable) [size=512K] > Capabilities: <access denied> > Kernel driver in use: amdgpu > Kernel modules: amdgpu In my understanding it is not relevant if the memory is backed by device or not. Here MMIO means memory-mapping I/O registers that has side-effects during accesses. Reading such a register may acknowledge an interrupt for example and the unit of writes may also matter. memory_region_init_ram_device_ptr() ensures no supurious memory read will not happen and word accesses are preserved. They do not matter for video memory even if it lies in a separate device memory. In this sense the name "memory_region_init_ram_device_ptr" is somewhat misnomer. Regards, Akihiko Odaki
On Wed, Sep 06, 2023 at 11:39:09AM +0800, Akihiko Odaki wrote: > On 2023/09/06 12:09, Huang Rui wrote: > > On Tue, Sep 05, 2023 at 05:20:43PM +0800, Akihiko Odaki wrote: > >> On 2023/09/05 18:08, Huang Rui wrote: > >>> On Thu, Aug 31, 2023 at 06:24:32PM +0800, Akihiko Odaki wrote: > >>>> On 2023/08/31 18:32, Huang Rui wrote: > >>>>> From: Antonio Caggiano <antonio.caggiano@collabora.com> > >>>>> > >>>>> Support BLOB resources creation, mapping and unmapping by calling the > >>>>> new stable virglrenderer 0.10 interface. Only enabled when available and > >>>>> via the blob config. E.g. -device virtio-vga-gl,blob=true > >>>>> > >>>>> Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com> > >>>>> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> > >>>>> Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com> > >>>>> Signed-off-by: Huang Rui <ray.huang@amd.com> > >>>>> --- > >>>>> > >>>>> v1->v2: > >>>>> - Remove unused #include "hw/virtio/virtio-iommu.h" > >>>>> > >>>>> - Add a local function, called virgl_resource_destroy(), that is used > >>>>> to release a vgpu resource on error paths and in resource_unref. > >>>>> > >>>>> - Remove virtio_gpu_virgl_resource_unmap from virtio_gpu_cleanup_mapping(), > >>>>> since this function won't be called on blob resources and also because > >>>>> blob resources are unmapped via virgl_cmd_resource_unmap_blob(). > >>>>> > >>>>> - In virgl_cmd_resource_create_blob(), do proper cleanup in error paths > >>>>> and move QTAILQ_INSERT_HEAD(&g->reslist, res, next) after the resource > >>>>> has been fully initialized. > >>>>> > >>>>> - Memory region has a different life-cycle from virtio gpu resources > >>>>> i.e. cannot be released synchronously along with the vgpu resource. > >>>>> So, here the field "region" was changed to a pointer that will be > >>>>> released automatically once the memory region is unparented and all > >>>>> of its references have been released. > >>>>> Also, since the pointer can be used to indicate whether the blob > >>>>> is mapped, the explicit field "mapped" was removed. > >>>>> > >>>>> - In virgl_cmd_resource_map_blob(), add check on the value of > >>>>> res->region, to prevent beeing called twice on the same resource. > >>>>> > >>>>> - Remove direct references to parent_obj. > >>>>> > >>>>> - Separate declarations from code. > >>>>> > >>>>> hw/display/virtio-gpu-virgl.c | 213 +++++++++++++++++++++++++++++++++ > >>>>> hw/display/virtio-gpu.c | 4 +- > >>>>> include/hw/virtio/virtio-gpu.h | 5 + > >>>>> meson.build | 4 + > >>>>> 4 files changed, 225 insertions(+), 1 deletion(-) > >>>>> > >>>>> diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c > >>>>> index 312953ec16..17b634d4ee 100644 > >>>>> --- a/hw/display/virtio-gpu-virgl.c > >>>>> +++ b/hw/display/virtio-gpu-virgl.c > >>>>> @@ -17,6 +17,7 @@ > >>>>> #include "trace.h" > >>>>> #include "hw/virtio/virtio.h" > >>>>> #include "hw/virtio/virtio-gpu.h" > >>>>> +#include "hw/virtio/virtio-gpu-bswap.h" > >>>>> > >>>>> #include "ui/egl-helpers.h" > >>>>> > >>>>> @@ -78,9 +79,24 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, > >>>>> virgl_renderer_resource_create(&args, NULL, 0); > >>>>> } > >>>>> > >>>>> +static void virgl_resource_destroy(VirtIOGPU *g, > >>>>> + struct virtio_gpu_simple_resource *res) > >>>>> +{ > >>>>> + if (!res) > >>>>> + return; > >>>>> + > >>>>> + QTAILQ_REMOVE(&g->reslist, res, next); > >>>>> + > >>>>> + virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); > >>>>> + g_free(res->addrs); > >>>>> + > >>>>> + g_free(res); > >>>>> +} > >>>>> + > >>>>> static void virgl_cmd_resource_unref(VirtIOGPU *g, > >>>>> struct virtio_gpu_ctrl_command *cmd) > >>>>> { > >>>>> + struct virtio_gpu_simple_resource *res; > >>>>> struct virtio_gpu_resource_unref unref; > >>>>> struct iovec *res_iovs = NULL; > >>>>> int num_iovs = 0; > >>>>> @@ -88,13 +104,22 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g, > >>>>> VIRTIO_GPU_FILL_CMD(unref); > >>>>> trace_virtio_gpu_cmd_res_unref(unref.resource_id); > >>>>> > >>>>> + res = virtio_gpu_find_resource(g, unref.resource_id); > >>>>> + > >>>>> virgl_renderer_resource_detach_iov(unref.resource_id, > >>>>> &res_iovs, > >>>>> &num_iovs); > >>>>> if (res_iovs != NULL && num_iovs != 0) { > >>>>> virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); > >>>>> + if (res) { > >>>>> + res->iov = NULL; > >>>>> + res->iov_cnt = 0; > >>>>> + } > >>>>> } > >>>>> + > >>>>> virgl_renderer_resource_unref(unref.resource_id); > >>>>> + > >>>>> + virgl_resource_destroy(g, res); > >>>>> } > >>>>> > >>>>> static void virgl_cmd_context_create(VirtIOGPU *g, > >>>>> @@ -426,6 +451,183 @@ static void virgl_cmd_get_capset(VirtIOGPU *g, > >>>>> g_free(resp); > >>>>> } > >>>>> > >>>>> +#ifdef HAVE_VIRGL_RESOURCE_BLOB > >>>>> + > >>>>> +static void virgl_cmd_resource_create_blob(VirtIOGPU *g, > >>>>> + struct virtio_gpu_ctrl_command *cmd) > >>>>> +{ > >>>>> + struct virtio_gpu_simple_resource *res; > >>>>> + struct virtio_gpu_resource_create_blob cblob; > >>>>> + struct virgl_renderer_resource_create_blob_args virgl_args = { 0 }; > >>>>> + int ret; > >>>>> + > >>>>> + VIRTIO_GPU_FILL_CMD(cblob); > >>>>> + virtio_gpu_create_blob_bswap(&cblob); > >>>>> + trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); > >>>>> + > >>>>> + if (cblob.resource_id == 0) { > >>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", > >>>>> + __func__); > >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > >>>>> + return; > >>>>> + } > >>>>> + > >>>>> + res = virtio_gpu_find_resource(g, cblob.resource_id); > >>>>> + if (res) { > >>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", > >>>>> + __func__, cblob.resource_id); > >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > >>>>> + return; > >>>>> + } > >>>>> + > >>>>> + res = g_new0(struct virtio_gpu_simple_resource, 1); > >>>>> + if (!res) { > >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; > >>>>> + return; > >>>>> + } > >>>>> + > >>>>> + res->resource_id = cblob.resource_id; > >>>>> + res->blob_size = cblob.size; > >>>>> + > >>>>> + if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) { > >>>>> + ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), > >>>>> + cmd, &res->addrs, &res->iov, > >>>>> + &res->iov_cnt); > >>>>> + if (!ret) { > >>>>> + g_free(res); > >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; > >>>>> + return; > >>>>> + } > >>>>> + } > >>>>> + > >>>>> + QTAILQ_INSERT_HEAD(&g->reslist, res, next); > >>>>> + > >>>>> + virgl_args.res_handle = cblob.resource_id; > >>>>> + virgl_args.ctx_id = cblob.hdr.ctx_id; > >>>>> + virgl_args.blob_mem = cblob.blob_mem; > >>>>> + virgl_args.blob_id = cblob.blob_id; > >>>>> + virgl_args.blob_flags = cblob.blob_flags; > >>>>> + virgl_args.size = cblob.size; > >>>>> + virgl_args.iovecs = res->iov; > >>>>> + virgl_args.num_iovs = res->iov_cnt; > >>>>> + > >>>>> + ret = virgl_renderer_resource_create_blob(&virgl_args); > >>>>> + if (ret) { > >>>>> + virgl_resource_destroy(g, res); > >>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n", > >>>>> + __func__, strerror(-ret)); > >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; > >>>>> + } > >>>>> +} > >>>>> + > >>>>> +static void virgl_cmd_resource_map_blob(VirtIOGPU *g, > >>>>> + struct virtio_gpu_ctrl_command *cmd) > >>>>> +{ > >>>>> + struct virtio_gpu_simple_resource *res; > >>>>> + struct virtio_gpu_resource_map_blob mblob; > >>>>> + int ret; > >>>>> + void *data; > >>>>> + uint64_t size; > >>>>> + struct virtio_gpu_resp_map_info resp; > >>>>> + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); > >>>>> + > >>>>> + VIRTIO_GPU_FILL_CMD(mblob); > >>>>> + virtio_gpu_map_blob_bswap(&mblob); > >>>>> + > >>>>> + if (mblob.resource_id == 0) { > >>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", > >>>>> + __func__); > >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > >>>>> + return; > >>>>> + } > >>>>> + > >>>>> + res = virtio_gpu_find_resource(g, mblob.resource_id); > >>>>> + if (!res) { > >>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", > >>>>> + __func__, mblob.resource_id); > >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > >>>>> + return; > >>>>> + } > >>>>> + if (res->region) { > >>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already mapped %d\n", > >>>>> + __func__, mblob.resource_id); > >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > >>>>> + return; > >>>>> + } > >>>>> + > >>>>> + ret = virgl_renderer_resource_map(res->resource_id, &data, &size); > >>>>> + if (ret) { > >>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource map error: %s\n", > >>>>> + __func__, strerror(-ret)); > >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; > >>>>> + return; > >>>>> + } > >>>>> + > >>>>> + res->region = g_new0(MemoryRegion, 1); > >>>>> + if (!res->region) { > >>>>> + virgl_renderer_resource_unmap(res->resource_id); > >>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; > >>>>> + return; > >>>>> + } > >>>>> + memory_region_init_ram_device_ptr(res->region, OBJECT(g), NULL, size, data); > >>>> > >>>> I think memory_region_init_ram_ptr() should be used instead. > >>> > >>> Would you mind to explain the reason? > >> > >> The documentation comment of memory_region_init_ram_device_ptr() says: > >> > A RAM device represents a mapping to a physical device, such as to a > >> > PCI MMIO BAR of an vfio-pci assigned device. The memory region may be > >> > mapped into the VM address space and access to the region will modify > >> > memory directly. However, the memory region should not be included in > >> > a memory dump (device may not be enabled/mapped at the time of the > >> > dump), and operations incompatible with manipulating MMIO should be > >> > avoided. Replaces skip_dump flag. > >> > >> In my understanding it's not MMIO so memory_region_init_ram_ptr() should > >> be used instead. > >> > > > > It actually maybe the video memory (mmio) or system memory here. :-) > > > > We will get the host memory for blob from host with > > virgl_renderer_resource_map() in virglrenderer. In virglrenderer, there are two > > types of VIRGL_RESOURCE_FD_DMABUF and VIRGL_RESOURCE_FD_SHM to indicate the > > memory types. The shmem is the system memory that won't need GPU > > accessible, and dmabuf is the memory that required GPU accessible. Host > > kernel amdgpu driver will register dma-buf to export the resource buffer > > for sharing, and here, it may have video memory that exposed by amdgpu pcie > > bar0 in the dma-buf buffers. And we also have system memory(gtt) that can > > be mapped as gpu page tables for gpu accessible. > > > > 07:00.0 VGA compatible controller: Advanced Micro Devices, Inc. [AMD/ATI] Renoir (rev c1) (prog-if 00 [VGA controller]) > > Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Renoir > > Flags: bus master, fast devsel, latency 0, IRQ 56 > > Memory at fcc0000000 (64-bit, prefetchable) [size=256M] > > Memory at fcd0000000 (64-bit, prefetchable) [size=2M] > > I/O ports at 1000 [size=256] > > Memory at d0400000 (32-bit, non-prefetchable) [size=512K] > > Capabilities: <access denied> > > Kernel driver in use: amdgpu > > Kernel modules: amdgpu > > In my understanding it is not relevant if the memory is backed by device > or not. Here MMIO means memory-mapping I/O registers that has > side-effects during accesses. Reading such a register may acknowledge an > interrupt for example and the unit of writes may also matter. > memory_region_init_ram_device_ptr() ensures no supurious memory read > will not happen and word accesses are preserved. > > They do not matter for video memory even if it lies in a separate device > memory. In this sense the name "memory_region_init_ram_device_ptr" is > somewhat misnomer. > OK. Thanks for the clarification. After traced the code, if memory_region_init_ram_device_ptr(), it will use memory_region_dispatch_write() to call into memory_region_ram_device_write() which registered as ram_device_mem_ops to write ram_block->host with offset dword by dword. And if we use memory_region_init_ram_ptr(), the flatview_write_continue() will use memmove() to write buf into ramblock while we write the blob memory. May I know whether you mean the "memmove" may have an interrupt or supurious memory access? } else if (!memory_access_is_direct(mr, true)) { release_lock |= prepare_mmio_access(mr); l = memory_access_size(mr, l, addr1); /* XXX: could force current_cpu to NULL to avoid potential bugs */ val = ldn_he_p(buf, l); result |= memory_region_dispatch_write(mr, addr1, val, size_memop(l), attrs); } else { /* RAM case */ ram_ptr = qemu_ram_ptr_length(mr->ram_block, addr1, &l, false); memmove(ram_ptr, buf, l); invalidate_and_set_dirty(mr, addr1, l); } Thanks, Ray
On 2023/09/06 16:56, Huang Rui wrote: > On Wed, Sep 06, 2023 at 11:39:09AM +0800, Akihiko Odaki wrote: >> On 2023/09/06 12:09, Huang Rui wrote: >>> On Tue, Sep 05, 2023 at 05:20:43PM +0800, Akihiko Odaki wrote: >>>> On 2023/09/05 18:08, Huang Rui wrote: >>>>> On Thu, Aug 31, 2023 at 06:24:32PM +0800, Akihiko Odaki wrote: >>>>>> On 2023/08/31 18:32, Huang Rui wrote: >>>>>>> From: Antonio Caggiano <antonio.caggiano@collabora.com> >>>>>>> >>>>>>> Support BLOB resources creation, mapping and unmapping by calling the >>>>>>> new stable virglrenderer 0.10 interface. Only enabled when available and >>>>>>> via the blob config. E.g. -device virtio-vga-gl,blob=true >>>>>>> >>>>>>> Signed-off-by: Antonio Caggiano <antonio.caggiano@collabora.com> >>>>>>> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> >>>>>>> Signed-off-by: Xenia Ragiadakou <xenia.ragiadakou@amd.com> >>>>>>> Signed-off-by: Huang Rui <ray.huang@amd.com> >>>>>>> --- >>>>>>> >>>>>>> v1->v2: >>>>>>> - Remove unused #include "hw/virtio/virtio-iommu.h" >>>>>>> >>>>>>> - Add a local function, called virgl_resource_destroy(), that is used >>>>>>> to release a vgpu resource on error paths and in resource_unref. >>>>>>> >>>>>>> - Remove virtio_gpu_virgl_resource_unmap from virtio_gpu_cleanup_mapping(), >>>>>>> since this function won't be called on blob resources and also because >>>>>>> blob resources are unmapped via virgl_cmd_resource_unmap_blob(). >>>>>>> >>>>>>> - In virgl_cmd_resource_create_blob(), do proper cleanup in error paths >>>>>>> and move QTAILQ_INSERT_HEAD(&g->reslist, res, next) after the resource >>>>>>> has been fully initialized. >>>>>>> >>>>>>> - Memory region has a different life-cycle from virtio gpu resources >>>>>>> i.e. cannot be released synchronously along with the vgpu resource. >>>>>>> So, here the field "region" was changed to a pointer that will be >>>>>>> released automatically once the memory region is unparented and all >>>>>>> of its references have been released. >>>>>>> Also, since the pointer can be used to indicate whether the blob >>>>>>> is mapped, the explicit field "mapped" was removed. >>>>>>> >>>>>>> - In virgl_cmd_resource_map_blob(), add check on the value of >>>>>>> res->region, to prevent beeing called twice on the same resource. >>>>>>> >>>>>>> - Remove direct references to parent_obj. >>>>>>> >>>>>>> - Separate declarations from code. >>>>>>> >>>>>>> hw/display/virtio-gpu-virgl.c | 213 +++++++++++++++++++++++++++++++++ >>>>>>> hw/display/virtio-gpu.c | 4 +- >>>>>>> include/hw/virtio/virtio-gpu.h | 5 + >>>>>>> meson.build | 4 + >>>>>>> 4 files changed, 225 insertions(+), 1 deletion(-) >>>>>>> >>>>>>> diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c >>>>>>> index 312953ec16..17b634d4ee 100644 >>>>>>> --- a/hw/display/virtio-gpu-virgl.c >>>>>>> +++ b/hw/display/virtio-gpu-virgl.c >>>>>>> @@ -17,6 +17,7 @@ >>>>>>> #include "trace.h" >>>>>>> #include "hw/virtio/virtio.h" >>>>>>> #include "hw/virtio/virtio-gpu.h" >>>>>>> +#include "hw/virtio/virtio-gpu-bswap.h" >>>>>>> >>>>>>> #include "ui/egl-helpers.h" >>>>>>> >>>>>>> @@ -78,9 +79,24 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, >>>>>>> virgl_renderer_resource_create(&args, NULL, 0); >>>>>>> } >>>>>>> >>>>>>> +static void virgl_resource_destroy(VirtIOGPU *g, >>>>>>> + struct virtio_gpu_simple_resource *res) >>>>>>> +{ >>>>>>> + if (!res) >>>>>>> + return; >>>>>>> + >>>>>>> + QTAILQ_REMOVE(&g->reslist, res, next); >>>>>>> + >>>>>>> + virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); >>>>>>> + g_free(res->addrs); >>>>>>> + >>>>>>> + g_free(res); >>>>>>> +} >>>>>>> + >>>>>>> static void virgl_cmd_resource_unref(VirtIOGPU *g, >>>>>>> struct virtio_gpu_ctrl_command *cmd) >>>>>>> { >>>>>>> + struct virtio_gpu_simple_resource *res; >>>>>>> struct virtio_gpu_resource_unref unref; >>>>>>> struct iovec *res_iovs = NULL; >>>>>>> int num_iovs = 0; >>>>>>> @@ -88,13 +104,22 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g, >>>>>>> VIRTIO_GPU_FILL_CMD(unref); >>>>>>> trace_virtio_gpu_cmd_res_unref(unref.resource_id); >>>>>>> >>>>>>> + res = virtio_gpu_find_resource(g, unref.resource_id); >>>>>>> + >>>>>>> virgl_renderer_resource_detach_iov(unref.resource_id, >>>>>>> &res_iovs, >>>>>>> &num_iovs); >>>>>>> if (res_iovs != NULL && num_iovs != 0) { >>>>>>> virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); >>>>>>> + if (res) { >>>>>>> + res->iov = NULL; >>>>>>> + res->iov_cnt = 0; >>>>>>> + } >>>>>>> } >>>>>>> + >>>>>>> virgl_renderer_resource_unref(unref.resource_id); >>>>>>> + >>>>>>> + virgl_resource_destroy(g, res); >>>>>>> } >>>>>>> >>>>>>> static void virgl_cmd_context_create(VirtIOGPU *g, >>>>>>> @@ -426,6 +451,183 @@ static void virgl_cmd_get_capset(VirtIOGPU *g, >>>>>>> g_free(resp); >>>>>>> } >>>>>>> >>>>>>> +#ifdef HAVE_VIRGL_RESOURCE_BLOB >>>>>>> + >>>>>>> +static void virgl_cmd_resource_create_blob(VirtIOGPU *g, >>>>>>> + struct virtio_gpu_ctrl_command *cmd) >>>>>>> +{ >>>>>>> + struct virtio_gpu_simple_resource *res; >>>>>>> + struct virtio_gpu_resource_create_blob cblob; >>>>>>> + struct virgl_renderer_resource_create_blob_args virgl_args = { 0 }; >>>>>>> + int ret; >>>>>>> + >>>>>>> + VIRTIO_GPU_FILL_CMD(cblob); >>>>>>> + virtio_gpu_create_blob_bswap(&cblob); >>>>>>> + trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); >>>>>>> + >>>>>>> + if (cblob.resource_id == 0) { >>>>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", >>>>>>> + __func__); >>>>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>>>>>> + return; >>>>>>> + } >>>>>>> + >>>>>>> + res = virtio_gpu_find_resource(g, cblob.resource_id); >>>>>>> + if (res) { >>>>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", >>>>>>> + __func__, cblob.resource_id); >>>>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>>>>>> + return; >>>>>>> + } >>>>>>> + >>>>>>> + res = g_new0(struct virtio_gpu_simple_resource, 1); >>>>>>> + if (!res) { >>>>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; >>>>>>> + return; >>>>>>> + } >>>>>>> + >>>>>>> + res->resource_id = cblob.resource_id; >>>>>>> + res->blob_size = cblob.size; >>>>>>> + >>>>>>> + if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) { >>>>>>> + ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), >>>>>>> + cmd, &res->addrs, &res->iov, >>>>>>> + &res->iov_cnt); >>>>>>> + if (!ret) { >>>>>>> + g_free(res); >>>>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; >>>>>>> + return; >>>>>>> + } >>>>>>> + } >>>>>>> + >>>>>>> + QTAILQ_INSERT_HEAD(&g->reslist, res, next); >>>>>>> + >>>>>>> + virgl_args.res_handle = cblob.resource_id; >>>>>>> + virgl_args.ctx_id = cblob.hdr.ctx_id; >>>>>>> + virgl_args.blob_mem = cblob.blob_mem; >>>>>>> + virgl_args.blob_id = cblob.blob_id; >>>>>>> + virgl_args.blob_flags = cblob.blob_flags; >>>>>>> + virgl_args.size = cblob.size; >>>>>>> + virgl_args.iovecs = res->iov; >>>>>>> + virgl_args.num_iovs = res->iov_cnt; >>>>>>> + >>>>>>> + ret = virgl_renderer_resource_create_blob(&virgl_args); >>>>>>> + if (ret) { >>>>>>> + virgl_resource_destroy(g, res); >>>>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n", >>>>>>> + __func__, strerror(-ret)); >>>>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; >>>>>>> + } >>>>>>> +} >>>>>>> + >>>>>>> +static void virgl_cmd_resource_map_blob(VirtIOGPU *g, >>>>>>> + struct virtio_gpu_ctrl_command *cmd) >>>>>>> +{ >>>>>>> + struct virtio_gpu_simple_resource *res; >>>>>>> + struct virtio_gpu_resource_map_blob mblob; >>>>>>> + int ret; >>>>>>> + void *data; >>>>>>> + uint64_t size; >>>>>>> + struct virtio_gpu_resp_map_info resp; >>>>>>> + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); >>>>>>> + >>>>>>> + VIRTIO_GPU_FILL_CMD(mblob); >>>>>>> + virtio_gpu_map_blob_bswap(&mblob); >>>>>>> + >>>>>>> + if (mblob.resource_id == 0) { >>>>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", >>>>>>> + __func__); >>>>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>>>>>> + return; >>>>>>> + } >>>>>>> + >>>>>>> + res = virtio_gpu_find_resource(g, mblob.resource_id); >>>>>>> + if (!res) { >>>>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", >>>>>>> + __func__, mblob.resource_id); >>>>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>>>>>> + return; >>>>>>> + } >>>>>>> + if (res->region) { >>>>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already mapped %d\n", >>>>>>> + __func__, mblob.resource_id); >>>>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>>>>>> + return; >>>>>>> + } >>>>>>> + >>>>>>> + ret = virgl_renderer_resource_map(res->resource_id, &data, &size); >>>>>>> + if (ret) { >>>>>>> + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource map error: %s\n", >>>>>>> + __func__, strerror(-ret)); >>>>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; >>>>>>> + return; >>>>>>> + } >>>>>>> + >>>>>>> + res->region = g_new0(MemoryRegion, 1); >>>>>>> + if (!res->region) { >>>>>>> + virgl_renderer_resource_unmap(res->resource_id); >>>>>>> + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; >>>>>>> + return; >>>>>>> + } >>>>>>> + memory_region_init_ram_device_ptr(res->region, OBJECT(g), NULL, size, data); >>>>>> >>>>>> I think memory_region_init_ram_ptr() should be used instead. >>>>> >>>>> Would you mind to explain the reason? >>>> >>>> The documentation comment of memory_region_init_ram_device_ptr() says: >>>> > A RAM device represents a mapping to a physical device, such as to a >>>> > PCI MMIO BAR of an vfio-pci assigned device. The memory region may be >>>> > mapped into the VM address space and access to the region will modify >>>> > memory directly. However, the memory region should not be included in >>>> > a memory dump (device may not be enabled/mapped at the time of the >>>> > dump), and operations incompatible with manipulating MMIO should be >>>> > avoided. Replaces skip_dump flag. >>>> >>>> In my understanding it's not MMIO so memory_region_init_ram_ptr() should >>>> be used instead. >>>> >>> >>> It actually maybe the video memory (mmio) or system memory here. :-) >>> >>> We will get the host memory for blob from host with >>> virgl_renderer_resource_map() in virglrenderer. In virglrenderer, there are two >>> types of VIRGL_RESOURCE_FD_DMABUF and VIRGL_RESOURCE_FD_SHM to indicate the >>> memory types. The shmem is the system memory that won't need GPU >>> accessible, and dmabuf is the memory that required GPU accessible. Host >>> kernel amdgpu driver will register dma-buf to export the resource buffer >>> for sharing, and here, it may have video memory that exposed by amdgpu pcie >>> bar0 in the dma-buf buffers. And we also have system memory(gtt) that can >>> be mapped as gpu page tables for gpu accessible. >>> >>> 07:00.0 VGA compatible controller: Advanced Micro Devices, Inc. [AMD/ATI] Renoir (rev c1) (prog-if 00 [VGA controller]) >>> Subsystem: Advanced Micro Devices, Inc. [AMD/ATI] Renoir >>> Flags: bus master, fast devsel, latency 0, IRQ 56 >>> Memory at fcc0000000 (64-bit, prefetchable) [size=256M] >>> Memory at fcd0000000 (64-bit, prefetchable) [size=2M] >>> I/O ports at 1000 [size=256] >>> Memory at d0400000 (32-bit, non-prefetchable) [size=512K] >>> Capabilities: <access denied> >>> Kernel driver in use: amdgpu >>> Kernel modules: amdgpu >> >> In my understanding it is not relevant if the memory is backed by device >> or not. Here MMIO means memory-mapping I/O registers that has >> side-effects during accesses. Reading such a register may acknowledge an >> interrupt for example and the unit of writes may also matter. >> memory_region_init_ram_device_ptr() ensures no supurious memory read >> will not happen and word accesses are preserved. >> >> They do not matter for video memory even if it lies in a separate device >> memory. In this sense the name "memory_region_init_ram_device_ptr" is >> somewhat misnomer. >> > > OK. Thanks for the clarification. > > After traced the code, if memory_region_init_ram_device_ptr(), it will use > memory_region_dispatch_write() to call into memory_region_ram_device_write() > which registered as ram_device_mem_ops to write ram_block->host with offset > dword by dword. And if we use memory_region_init_ram_ptr(), the > flatview_write_continue() will use memmove() to write buf into ramblock while > we write the blob memory. May I know whether you mean the "memmove" may have an > interrupt or supurious memory access? memmove can split one word write into smaller writes and that is implementation dependent. git blame is your friend if you want to know more. Particularly commits 21e00fa55f ("memory: Replace skip_dump flag with "ram_device"") and 4a2e242bbb ("memory: Don't use memcpy for ram_device regions" [the memmove call was used to be memcpy) may interest you. Regards, Akihiko Odaki
diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c index 312953ec16..17b634d4ee 100644 --- a/hw/display/virtio-gpu-virgl.c +++ b/hw/display/virtio-gpu-virgl.c @@ -17,6 +17,7 @@ #include "trace.h" #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-gpu.h" +#include "hw/virtio/virtio-gpu-bswap.h" #include "ui/egl-helpers.h" @@ -78,9 +79,24 @@ static void virgl_cmd_create_resource_3d(VirtIOGPU *g, virgl_renderer_resource_create(&args, NULL, 0); } +static void virgl_resource_destroy(VirtIOGPU *g, + struct virtio_gpu_simple_resource *res) +{ + if (!res) + return; + + QTAILQ_REMOVE(&g->reslist, res, next); + + virtio_gpu_cleanup_mapping_iov(g, res->iov, res->iov_cnt); + g_free(res->addrs); + + g_free(res); +} + static void virgl_cmd_resource_unref(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { + struct virtio_gpu_simple_resource *res; struct virtio_gpu_resource_unref unref; struct iovec *res_iovs = NULL; int num_iovs = 0; @@ -88,13 +104,22 @@ static void virgl_cmd_resource_unref(VirtIOGPU *g, VIRTIO_GPU_FILL_CMD(unref); trace_virtio_gpu_cmd_res_unref(unref.resource_id); + res = virtio_gpu_find_resource(g, unref.resource_id); + virgl_renderer_resource_detach_iov(unref.resource_id, &res_iovs, &num_iovs); if (res_iovs != NULL && num_iovs != 0) { virtio_gpu_cleanup_mapping_iov(g, res_iovs, num_iovs); + if (res) { + res->iov = NULL; + res->iov_cnt = 0; + } } + virgl_renderer_resource_unref(unref.resource_id); + + virgl_resource_destroy(g, res); } static void virgl_cmd_context_create(VirtIOGPU *g, @@ -426,6 +451,183 @@ static void virgl_cmd_get_capset(VirtIOGPU *g, g_free(resp); } +#ifdef HAVE_VIRGL_RESOURCE_BLOB + +static void virgl_cmd_resource_create_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_resource_create_blob cblob; + struct virgl_renderer_resource_create_blob_args virgl_args = { 0 }; + int ret; + + VIRTIO_GPU_FILL_CMD(cblob); + virtio_gpu_create_blob_bswap(&cblob); + trace_virtio_gpu_cmd_res_create_blob(cblob.resource_id, cblob.size); + + if (cblob.resource_id == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = virtio_gpu_find_resource(g, cblob.resource_id); + if (res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", + __func__, cblob.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = g_new0(struct virtio_gpu_simple_resource, 1); + if (!res) { + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; + return; + } + + res->resource_id = cblob.resource_id; + res->blob_size = cblob.size; + + if (cblob.blob_mem != VIRTIO_GPU_BLOB_MEM_HOST3D) { + ret = virtio_gpu_create_mapping_iov(g, cblob.nr_entries, sizeof(cblob), + cmd, &res->addrs, &res->iov, + &res->iov_cnt); + if (!ret) { + g_free(res); + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + } + + QTAILQ_INSERT_HEAD(&g->reslist, res, next); + + virgl_args.res_handle = cblob.resource_id; + virgl_args.ctx_id = cblob.hdr.ctx_id; + virgl_args.blob_mem = cblob.blob_mem; + virgl_args.blob_id = cblob.blob_id; + virgl_args.blob_flags = cblob.blob_flags; + virgl_args.size = cblob.size; + virgl_args.iovecs = res->iov; + virgl_args.num_iovs = res->iov_cnt; + + ret = virgl_renderer_resource_create_blob(&virgl_args); + if (ret) { + virgl_resource_destroy(g, res); + qemu_log_mask(LOG_GUEST_ERROR, "%s: virgl blob create error: %s\n", + __func__, strerror(-ret)); + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + } +} + +static void virgl_cmd_resource_map_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_resource_map_blob mblob; + int ret; + void *data; + uint64_t size; + struct virtio_gpu_resp_map_info resp; + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); + + VIRTIO_GPU_FILL_CMD(mblob); + virtio_gpu_map_blob_bswap(&mblob); + + if (mblob.resource_id == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = virtio_gpu_find_resource(g, mblob.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", + __func__, mblob.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + if (res->region) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already mapped %d\n", + __func__, mblob.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + ret = virgl_renderer_resource_map(res->resource_id, &data, &size); + if (ret) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource map error: %s\n", + __func__, strerror(-ret)); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res->region = g_new0(MemoryRegion, 1); + if (!res->region) { + virgl_renderer_resource_unmap(res->resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; + return; + } + memory_region_init_ram_device_ptr(res->region, OBJECT(g), NULL, size, data); + OBJECT(res->region)->free = g_free; + memory_region_add_subregion(&b->hostmem, mblob.offset, res->region); + memory_region_set_enabled(res->region, true); + + memset(&resp, 0, sizeof(resp)); + resp.hdr.type = VIRTIO_GPU_RESP_OK_MAP_INFO; + virgl_renderer_resource_get_map_info(mblob.resource_id, &resp.map_info); + virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); +} + +static int virtio_gpu_virgl_resource_unmap(VirtIOGPU *g, + struct virtio_gpu_simple_resource + *res) +{ + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); + + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already unmapped %d\n", + __func__, res->resource_id); + return VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + } + + memory_region_set_enabled(res->region, false); + memory_region_del_subregion(&b->hostmem, res->region); + object_unparent(OBJECT(res->region)); + res->region = NULL; + + return virgl_renderer_resource_unmap(res->resource_id); +} + +static void virgl_cmd_resource_unmap_blob(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_resource_unmap_blob ublob; + VIRTIO_GPU_FILL_CMD(ublob); + virtio_gpu_unmap_blob_bswap(&ublob); + + if (ublob.resource_id == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = virtio_gpu_find_resource(g, ublob.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource does not exist %d\n", + __func__, ublob.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + virtio_gpu_virgl_resource_unmap(g, res); +} + +#endif /* HAVE_VIRGL_RESOURCE_BLOB */ + void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { @@ -492,6 +694,17 @@ void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, case VIRTIO_GPU_CMD_GET_EDID: virtio_gpu_get_edid(g, cmd); break; +#ifdef HAVE_VIRGL_RESOURCE_BLOB + case VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB: + virgl_cmd_resource_create_blob(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB: + virgl_cmd_resource_map_blob(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB: + virgl_cmd_resource_unmap_blob(g, cmd); + break; +#endif /* HAVE_VIRGL_RESOURCE_BLOB */ default: cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; break; diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 5b7a7eab4f..cc4c1f81bb 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -1367,10 +1367,12 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) return; } +#ifndef HAVE_VIRGL_RESOURCE_BLOB if (virtio_gpu_virgl_enabled(g->parent_obj.conf)) { - error_setg(errp, "blobs and virgl are not compatible (yet)"); + error_setg(errp, "Linked virglrenderer does not support blob resources"); return; } +#endif } if (!virtio_gpu_base_device_realize(qdev, diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h index 55973e112f..b9adc28071 100644 --- a/include/hw/virtio/virtio-gpu.h +++ b/include/hw/virtio/virtio-gpu.h @@ -58,6 +58,11 @@ struct virtio_gpu_simple_resource { int dmabuf_fd; uint8_t *remapped; +#ifdef HAVE_VIRGL_RESOURCE_BLOB + /* only blob resource needs this region to be mapped as guest mmio */ + MemoryRegion *region; +#endif + QTAILQ_ENTRY(virtio_gpu_simple_resource) next; }; diff --git a/meson.build b/meson.build index ff20d3c249..f7b744ab82 100644 --- a/meson.build +++ b/meson.build @@ -1072,6 +1072,10 @@ if not get_option('virglrenderer').auto() or have_system or have_vhost_user_gpu cc.has_function('virgl_renderer_context_create_with_flags', prefix: '#include <virglrenderer.h>', dependencies: virgl)) + config_host_data.set('HAVE_VIRGL_RESOURCE_BLOB', + cc.has_function('virgl_renderer_resource_create_blob', + prefix: '#include <virglrenderer.h>', + dependencies: virgl)) endif blkio = not_found if not get_option('blkio').auto() or have_block