@@ -211,6 +211,9 @@ virtio_gpu_base_get_features(VirtIODevice *vdev, uint64_t features,
if (virtio_gpu_blob_enabled(g->conf)) {
features |= (1 << VIRTIO_GPU_F_RESOURCE_BLOB);
}
+ if (virtio_gpu_relfence_enabled(g->conf)) {
+ features |= (1 << VIRTIO_GPU_F_RELEASE_FENCE);
+ }
return features;
}
@@ -157,7 +157,7 @@ void virtio_gpu_fini_udmabuf(struct virtio_gpu_simple_resource *res)
}
}
-static void virtio_gpu_free_dmabuf(VirtIOGPU *g, VGPUDMABuf *dmabuf)
+void virtio_gpu_free_dmabuf(VirtIOGPU *g, VGPUDMABuf *dmabuf)
{
struct virtio_gpu_scanout *scanout;
@@ -216,7 +216,7 @@ int virtio_gpu_update_dmabuf(VirtIOGPU *g,
new_primary->buf.height);
dpy_gl_scanout_dmabuf(scanout->con, &new_primary->buf);
- if (old_primary) {
+ if (old_primary && !virtio_gpu_relfence_enabled(g->parent_obj.conf)) {
virtio_gpu_free_dmabuf(g, old_primary);
}
@@ -771,6 +771,11 @@ static void virtio_gpu_set_scanout_blob(VirtIOGPU *g,
virtio_gpu_do_set_scanout(g, ss.scanout_id,
&fb, res, &ss.r, &cmd->error);
+
+ if (virtio_gpu_relfence_enabled(g->parent_obj.conf)) {
+ cmd->finished = true;
+ cmd->error = VIRTIO_GPU_RESP_OK_NODATA;
+ }
}
int virtio_gpu_create_mapping_iov(VirtIOGPU *g,
@@ -1038,12 +1043,43 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g)
fprintf(stderr, "inflight: %3d (+)\r", g->inflight);
}
} else {
- g_free(cmd);
+ if (cmd->cmd_hdr.type == VIRTIO_GPU_CMD_SET_SCANOUT_BLOB &&
+ cmd->error == VIRTIO_GPU_RESP_OK_NODATA) {
+ QTAILQ_INSERT_TAIL(&g->rel_fenceq, cmd, next);
+ } else {
+ g_free(cmd);
+ }
}
}
g->processing_cmdq = false;
}
+static void virtio_gpu_process_rel_fenceq(VirtIOGPU *g)
+{
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_set_scanout_blob ss;
+ struct virtio_gpu_ctrl_command *cmd, *tmp;
+ VGPUDMABuf *dmabuf, *temp;
+
+ QTAILQ_FOREACH_SAFE(dmabuf, &g->dmabuf.bufs, next, temp) {
+ if (dmabuf->buf.fence_fd < 0) {
+ QTAILQ_FOREACH_SAFE(cmd, &g->rel_fenceq, next, tmp) {
+ VIRTIO_GPU_FILL_CMD(ss);
+ virtio_gpu_bswap_32(&ss, sizeof(ss));
+ res = virtio_gpu_find_check_resource(g, ss.resource_id, true,
+ __func__, &cmd->error);
+ if (res && dmabuf->buf.fd == res->dmabuf_fd) {
+ virtio_gpu_ctrl_response_nodata(g, cmd,
+ VIRTIO_GPU_RESP_OK_NODATA);
+ QTAILQ_REMOVE(&g->rel_fenceq, cmd, next);
+ g_free(cmd);
+ }
+ }
+ virtio_gpu_free_dmabuf(g, dmabuf);
+ }
+ }
+}
+
static void virtio_gpu_process_fenceq(VirtIOGPU *g)
{
struct virtio_gpu_ctrl_command *cmd, *tmp;
@@ -1064,6 +1100,12 @@ static void virtio_gpu_handle_gl_flushed(VirtIOGPUBase *b)
{
VirtIOGPU *g = container_of(b, VirtIOGPU, parent_obj);
+ if (virtio_gpu_relfence_enabled(g->parent_obj.conf)) {
+ virtio_gpu_process_rel_fenceq(g);
+ }
+ if (g->parent_obj.renderer_blocked) {
+ return;
+ }
virtio_gpu_process_fenceq(g);
virtio_gpu_process_cmdq(g);
}
@@ -1323,6 +1365,13 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
}
}
+ if (virtio_gpu_relfence_enabled(g->parent_obj.conf)) {
+ if (!virtio_gpu_blob_enabled(g->parent_obj.conf)) {
+ error_setg(errp, "cannot enable relfence without blob resources");
+ return;
+ }
+ }
+
if (!virtio_gpu_base_device_realize(qdev,
virtio_gpu_handle_ctrl_cb,
virtio_gpu_handle_cursor_cb,
@@ -1337,6 +1386,7 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
QTAILQ_INIT(&g->reslist);
QTAILQ_INIT(&g->cmdq);
QTAILQ_INIT(&g->fenceq);
+ QTAILQ_INIT(&g->rel_fenceq);
}
void virtio_gpu_reset(VirtIODevice *vdev)
@@ -1418,6 +1468,8 @@ static Property virtio_gpu_properties[] = {
256 * MiB),
DEFINE_PROP_BIT("blob", VirtIOGPU, parent_obj.conf.flags,
VIRTIO_GPU_FLAG_BLOB_ENABLED, false),
+ DEFINE_PROP_BIT("relfence", VirtIOGPU, parent_obj.conf.flags,
+ VIRTIO_GPU_FLAG_RELFENCE_ENABLED, false),
DEFINE_PROP_END_OF_LIST(),
};
@@ -90,6 +90,7 @@ enum virtio_gpu_base_conf_flags {
VIRTIO_GPU_FLAG_EDID_ENABLED,
VIRTIO_GPU_FLAG_DMABUF_ENABLED,
VIRTIO_GPU_FLAG_BLOB_ENABLED,
+ VIRTIO_GPU_FLAG_RELFENCE_ENABLED,
};
#define virtio_gpu_virgl_enabled(_cfg) \
@@ -102,6 +103,8 @@ enum virtio_gpu_base_conf_flags {
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_DMABUF_ENABLED))
#define virtio_gpu_blob_enabled(_cfg) \
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_BLOB_ENABLED))
+#define virtio_gpu_relfence_enabled(_cfg) \
+ (_cfg.flags & (1 << VIRTIO_GPU_FLAG_RELFENCE_ENABLED))
struct virtio_gpu_base_conf {
uint32_t max_outputs;
@@ -170,6 +173,7 @@ struct VirtIOGPU {
QTAILQ_HEAD(, virtio_gpu_simple_resource) reslist;
QTAILQ_HEAD(, virtio_gpu_ctrl_command) cmdq;
QTAILQ_HEAD(, virtio_gpu_ctrl_command) fenceq;
+ QTAILQ_HEAD(, virtio_gpu_ctrl_command) rel_fenceq;
uint64_t hostmem;
@@ -274,6 +278,8 @@ int virtio_gpu_update_dmabuf(VirtIOGPU *g,
uint32_t scanout_id,
struct virtio_gpu_simple_resource *res,
struct virtio_gpu_framebuffer *fb);
+void virtio_gpu_free_dmabuf(VirtIOGPU *g,
+ VGPUDMABuf *dmabuf);
/* virtio-gpu-3d.c */
void virtio_gpu_virgl_process_cmd(VirtIOGPU *g,
@@ -60,6 +60,8 @@
*/
#define VIRTIO_GPU_F_RESOURCE_BLOB 3
+#define VIRTIO_GPU_F_RELEASE_FENCE 4
+
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
To support this feature, we add a new queue named rel_fenceq to enqueue finished set_scanout_blob cmds. Cc: Gerd Hoffmann <kraxel@redhat.com> Signed-off-by: Vivek Kasireddy <vivek.kasireddy@intel.com> --- hw/display/virtio-gpu-base.c | 3 ++ hw/display/virtio-gpu-udmabuf.c | 4 +- hw/display/virtio-gpu.c | 54 ++++++++++++++++++++- include/hw/virtio/virtio-gpu.h | 6 +++ include/standard-headers/linux/virtio_gpu.h | 2 + 5 files changed, 66 insertions(+), 3 deletions(-)