@@ -46,9 +46,9 @@ static int virtio_gpu_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, virtio_gpu_modeset, int, 0400);
-static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vdev)
+static int virtio_gpu_pci_quirk(struct drm_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(vdev->dev.parent);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
const char *pname = dev_name(&pdev->dev);
bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
char unique[20];
@@ -101,6 +101,7 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd
static int virtio_gpu_probe(struct virtio_device *vdev)
{
struct drm_device *dev;
+ struct device *dma_dev;
int ret;
if (drm_firmware_drivers_only() && virtio_gpu_modeset == -1)
@@ -109,18 +110,29 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
if (virtio_gpu_modeset == 0)
return -EINVAL;
- dev = drm_dev_alloc(&driver, &vdev->dev);
+ /*
+ * If GPU's parent is a PCI device, then we will use this PCI device
+ * for the DRM's driver device because GPU won't have PCI's IOMMU DMA
+ * ops in this case since GPU device is sitting on a separate (from PCI)
+ * virtio-bus.
+ */
+ if (!strcmp(vdev->dev.parent->bus->name, "pci"))
+ dma_dev = vdev->dev.parent;
+ else
+ dma_dev = &vdev->dev;
+
+ dev = drm_dev_alloc(&driver, dma_dev);
if (IS_ERR(dev))
return PTR_ERR(dev);
vdev->priv = dev;
if (!strcmp(vdev->dev.parent->bus->name, "pci")) {
- ret = virtio_gpu_pci_quirk(dev, vdev);
+ ret = virtio_gpu_pci_quirk(dev);
if (ret)
goto err_free;
}
- ret = virtio_gpu_init(dev);
+ ret = virtio_gpu_init(vdev, dev);
if (ret)
goto err_free;
@@ -100,8 +100,6 @@ struct virtio_gpu_object {
struct virtio_gpu_object_shmem {
struct virtio_gpu_object base;
- struct sg_table *pages;
- uint32_t mapped;
};
struct virtio_gpu_object_vram {
@@ -214,7 +212,6 @@ struct virtio_gpu_drv_cap_cache {
};
struct virtio_gpu_device {
- struct device *dev;
struct drm_device *ddev;
struct virtio_device *vdev;
@@ -282,7 +279,7 @@ extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
/* virtgpu_kms.c */
-int virtio_gpu_init(struct drm_device *dev);
+int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev);
void virtio_gpu_deinit(struct drm_device *dev);
void virtio_gpu_release(struct drm_device *dev);
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -110,7 +110,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
vgdev->num_capsets = num_capsets;
}
-int virtio_gpu_init(struct drm_device *dev)
+int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
{
static vq_callback_t *callbacks[] = {
virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack
@@ -123,7 +123,7 @@ int virtio_gpu_init(struct drm_device *dev)
u32 num_scanouts, num_capsets;
int ret = 0;
- if (!virtio_has_feature(dev_to_virtio(dev->dev), VIRTIO_F_VERSION_1))
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
return -ENODEV;
vgdev = kzalloc(sizeof(struct virtio_gpu_device), GFP_KERNEL);
@@ -132,8 +132,7 @@ int virtio_gpu_init(struct drm_device *dev)
vgdev->ddev = dev;
dev->dev_private = vgdev;
- vgdev->vdev = dev_to_virtio(dev->dev);
- vgdev->dev = dev->dev;
+ vgdev->vdev = vdev;
spin_lock_init(&vgdev->display_info_lock);
spin_lock_init(&vgdev->resource_export_lock);
@@ -67,21 +67,6 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
if (virtio_gpu_is_shmem(bo)) {
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
-
- if (shmem->pages) {
- if (shmem->mapped) {
- dma_unmap_sgtable(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE, 0);
- shmem->mapped = 0;
- }
-
- sg_free_table(shmem->pages);
- kfree(shmem->pages);
- shmem->pages = NULL;
- drm_gem_shmem_unpin(&bo->base);
- }
-
drm_gem_shmem_free(&bo->base);
} else if (virtio_gpu_is_vram(bo)) {
struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
@@ -153,37 +138,18 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
unsigned int *nents)
{
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
struct scatterlist *sg;
- int si, ret;
+ struct sg_table *pages;
+ int si;
- ret = drm_gem_shmem_pin(&bo->base);
- if (ret < 0)
- return -EINVAL;
-
- /*
- * virtio_gpu uses drm_gem_shmem_get_sg_table instead of
- * drm_gem_shmem_get_pages_sgt because virtio has it's own set of
- * dma-ops. This is discouraged for other drivers, but should be fine
- * since virtio_gpu doesn't support dma-buf import from other devices.
- */
- shmem->pages = drm_gem_shmem_get_sg_table(&bo->base);
- ret = PTR_ERR(shmem->pages);
- if (ret) {
- drm_gem_shmem_unpin(&bo->base);
- shmem->pages = NULL;
- return ret;
- }
+ pages = drm_gem_shmem_get_pages_sgt(&bo->base);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
- if (use_dma_api) {
- ret = dma_map_sgtable(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE, 0);
- if (ret)
- return ret;
- *nents = shmem->mapped = shmem->pages->nents;
- } else {
- *nents = shmem->pages->orig_nents;
- }
+ if (use_dma_api)
+ *nents = pages->nents;
+ else
+ *nents = pages->orig_nents;
*ents = kvmalloc_array(*nents,
sizeof(struct virtio_gpu_mem_entry),
@@ -194,13 +160,13 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
}
if (use_dma_api) {
- for_each_sgtable_dma_sg(shmem->pages, sg, si) {
+ for_each_sgtable_dma_sg(pages, sg, si) {
(*ents)[si].addr = cpu_to_le64(sg_dma_address(sg));
(*ents)[si].length = cpu_to_le32(sg_dma_len(sg));
(*ents)[si].padding = 0;
}
} else {
- for_each_sgtable_sg(shmem->pages, sg, si) {
+ for_each_sgtable_sg(pages, sg, si) {
(*ents)[si].addr = cpu_to_le64(sg_phys(sg));
(*ents)[si].length = cpu_to_le32(sg->length);
(*ents)[si].padding = 0;
@@ -593,11 +593,10 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (virtio_gpu_is_shmem(bo) && use_dma_api)
- dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE);
+ dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+ bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -1017,11 +1016,9 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- if (virtio_gpu_is_shmem(bo) && use_dma_api) {
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
- shmem->pages, DMA_TO_DEVICE);
- }
+ if (virtio_gpu_is_shmem(bo) && use_dma_api)
+ dma_sync_sgtable_for_device(&vgdev->vdev->dev,
+ bo->base.sgt, DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
DRM API requires the DRM's driver to be backed with the device that can be used for generic DMA operations. The VirtIO-GPU device can't perform DMA operations if it uses PCI transport because PCI device driver creates a virtual VirtIO-GPU device that isn't associated with the PCI. Use PCI's GPU device for the DRM's device instead of the VirtIO-GPU device and drop DMA-related hacks from the VirtIO-GPU driver. Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> --- drivers/gpu/drm/virtio/virtgpu_drv.c | 22 +++++++--- drivers/gpu/drm/virtio/virtgpu_drv.h | 5 +-- drivers/gpu/drm/virtio/virtgpu_kms.c | 7 ++-- drivers/gpu/drm/virtio/virtgpu_object.c | 56 +++++-------------------- drivers/gpu/drm/virtio/virtgpu_vq.c | 13 +++--- 5 files changed, 37 insertions(+), 66 deletions(-)