@@ -177,8 +177,6 @@ struct virtio_gpu_device {
struct kmem_cache *vbufs;
bool vqs_ready;
- bool disable_notify;
-
struct ida resource_ida;
wait_queue_head_t resp_wq;
@@ -256,6 +254,9 @@ void virtio_gpu_array_put_free_work(struct work_struct *work);
/* virtio vg */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
+
+void virtio_gpu_commit_ctrl(struct virtio_gpu_device *vgdev);
+
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
struct virtio_gpu_object_params *params,
@@ -263,20 +264,48 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id);
-void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+
+void virtio_gpu_add_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
uint32_t x, uint32_t y,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
-void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+
+static inline void
+virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+ uint64_t offset,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y,
+ struct virtio_gpu_object_array *objs,
+ struct virtio_gpu_fence *fence)
+{
+ virtio_gpu_add_transfer_to_host_2d(vgdev, offset, width, height, x, y,
+ objs, fence);
+ virtio_gpu_commit_ctrl(vgdev);
+}
+
+void virtio_gpu_add_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
uint32_t width, uint32_t height);
-void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
+
+void virtio_gpu_add_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t scanout_id, uint32_t resource_id,
uint32_t width, uint32_t height,
uint32_t x, uint32_t y);
+
+static inline void
+virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
+ uint32_t scanout_id, uint32_t resource_id,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y)
+{
+ virtio_gpu_add_set_scanout(vgdev, scanout_id, resource_id,
+ width, height, x, y);
+ virtio_gpu_commit_ctrl(vgdev);
+}
+
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
struct virtio_gpu_fence *fence);
@@ -332,9 +361,6 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
void virtio_gpu_dequeue_fence_func(struct work_struct *work);
-void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev);
-void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev);
-
/* virtio_gpu_display.c */
void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
@@ -122,7 +122,7 @@ static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
return;
virtio_gpu_array_add_obj(objs, &bo->base.base);
- virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y,
+ virtio_gpu_add_transfer_to_host_2d(vgdev, off, w, h, x, y,
objs, NULL);
}
@@ -154,8 +154,6 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
return;
- virtio_gpu_disable_notify(vgdev);
-
bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
if (bo->dumb)
virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
@@ -173,7 +171,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
plane->state->src_h >> 16,
plane->state->src_x >> 16,
plane->state->src_y >> 16);
- virtio_gpu_cmd_set_scanout(vgdev, output->index,
+ virtio_gpu_add_set_scanout(vgdev, output->index,
bo->hw_res_handle,
plane->state->src_w >> 16,
plane->state->src_h >> 16,
@@ -181,13 +179,13 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
plane->state->src_y >> 16);
}
- virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle,
+ virtio_gpu_add_resource_flush(vgdev, bo->hw_res_handle,
rect.x1,
rect.y1,
rect.x2 - rect.x1,
rect.y2 - rect.y1);
- virtio_gpu_enable_notify(vgdev);
+ virtio_gpu_commit_ctrl(vgdev);
}
static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
@@ -423,14 +423,11 @@ static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
}
}
-static void virtio_gpu_commit_ctrl(struct virtio_gpu_device *vgdev)
+void virtio_gpu_commit_ctrl(struct virtio_gpu_device *vgdev)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
bool notify;
- if (vgdev->disable_notify)
- return;
-
spin_lock(&vgdev->ctrlq.qlock);
notify = virtqueue_kick_prepare(vq);
spin_unlock(&vgdev->ctrlq.qlock);
@@ -443,17 +440,6 @@ static void virtio_gpu_commit_ctrl(struct virtio_gpu_device *vgdev)
virtqueue_notify(vq);
}
-void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev)
-{
- vgdev->disable_notify = true;
-}
-
-void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
-{
- vgdev->disable_notify = false;
- virtio_gpu_commit_ctrl(vgdev);
-}
-
static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
@@ -559,7 +545,7 @@ static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgde
virtio_gpu_commit_ctrl(vgdev);
}
-void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
+void virtio_gpu_add_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t scanout_id, uint32_t resource_id,
uint32_t width, uint32_t height,
uint32_t x, uint32_t y)
@@ -579,10 +565,9 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
- virtio_gpu_commit_ctrl(vgdev);
}
-void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
+void virtio_gpu_add_resource_flush(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
uint32_t x, uint32_t y,
uint32_t width, uint32_t height)
@@ -601,10 +586,9 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
- virtio_gpu_commit_ctrl(vgdev);
}
-void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
+void virtio_gpu_add_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
uint32_t x, uint32_t y,
@@ -634,7 +618,6 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
cmd_p->r.y = cpu_to_le32(y);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
- virtio_gpu_commit_ctrl(vgdev);
}
static void
The global disable_notify state does not scale well when we start using it in more places and when there are multiple threads. Allow individual commands to be added to the virtqueue but not committed. The naming conventions are virtio_gpu_cmd_foo -> add foo and commit virtio_gpu_add_bar -> add bar but do not commit virtio_gpu_{disable,enable}_notify is replaced by virtio_gpu_commit_ctrl. Signed-off-by: Chia-I Wu <olvaffe@gmail.com> --- drivers/gpu/drm/virtio/virtgpu_drv.h | 42 +++++++++++++++++++++----- drivers/gpu/drm/virtio/virtgpu_plane.c | 10 +++--- drivers/gpu/drm/virtio/virtgpu_vq.c | 25 +++------------ 3 files changed, 42 insertions(+), 35 deletions(-)