Message ID | 20190614134726.3827-4-hch@lst.de (mailing list archive) |
---|---|
State | Not Applicable |
Delegated to: | Johannes Berg |
Headers | show |
Series | [01/16] media: videobuf-dma-contig: use dma_mmap_coherent | expand |
On Fri, Jun 14, 2019 at 03:47:13PM +0200, Christoph Hellwig wrote: > Remove usage of the legacy drm PCI DMA wrappers, and with that the > incorrect usage cocktail of __GFP_COMP, virt_to_page on DMA allocation > and SetPageReserved. > > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > drivers/gpu/drm/i915/i915_gem.c | 30 +++++++++++++------------- > drivers/gpu/drm/i915/i915_gem_object.h | 3 ++- > drivers/gpu/drm/i915/intel_display.c | 2 +- > 3 files changed, 18 insertions(+), 17 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index ad01c92aaf74..8f2053c91aff 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -228,7 +228,6 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, > static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) > { > struct address_space *mapping = obj->base.filp->f_mapping; > - drm_dma_handle_t *phys; > struct sg_table *st; > struct scatterlist *sg; > char *vaddr; > @@ -242,13 +241,13 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) > * to handle all possible callers, and given typical object sizes, > * the alignment of the buddy allocation will naturally match. > */ > - phys = drm_pci_alloc(obj->base.dev, > - roundup_pow_of_two(obj->base.size), > - roundup_pow_of_two(obj->base.size)); > - if (!phys) > + obj->phys_vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev, > + roundup_pow_of_two(obj->base.size), > + &obj->phys_handle, GFP_KERNEL); > + if (!obj->phys_vaddr) > return -ENOMEM; > > - vaddr = phys->vaddr; > + vaddr = obj->phys_vaddr; > for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { > struct page *page; > char *src; > @@ -286,18 +285,17 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) > sg->offset = 0; > sg->length = obj->base.size; > > - sg_dma_address(sg) = phys->busaddr; > + sg_dma_address(sg) = obj->phys_handle; > sg_dma_len(sg) = obj->base.size; > > - obj->phys_handle = phys; > - > __i915_gem_object_set_pages(obj, st, sg->length); > > return 0; > > err_phys: > - drm_pci_free(obj->base.dev, phys); > - > + dma_free_coherent(&obj->base.dev->pdev->dev, > + roundup_pow_of_two(obj->base.size), obj->phys_vaddr, > + obj->phys_handle); Need to undo the damage to obj->phys_vaddr here since i915_gem_pwrite_ioctl() will now use that to determine if it's dealing with a phys obj. > return err; > } > > @@ -335,7 +333,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, > > if (obj->mm.dirty) { > struct address_space *mapping = obj->base.filp->f_mapping; > - char *vaddr = obj->phys_handle->vaddr; > + char *vaddr = obj->phys_vaddr; > int i; > > for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { > @@ -363,7 +361,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, > sg_free_table(pages); > kfree(pages); > > - drm_pci_free(obj->base.dev, obj->phys_handle); > + dma_free_coherent(&obj->base.dev->pdev->dev, > + roundup_pow_of_two(obj->base.size), obj->phys_vaddr, > + obj->phys_handle); This one is fine I think since the object remains a phys obj once turned into one. At least the current code isn't clearing phys_handle here. But my memory is a bit hazy on the details. Chris? Also maybe s/phys_handle/phys_busaddr/ all over? > } > > static void > @@ -603,7 +603,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, > struct drm_i915_gem_pwrite *args, > struct drm_file *file) > { > - void *vaddr = obj->phys_handle->vaddr + args->offset; > + void *vaddr = obj->phys_vaddr + args->offset; > char __user *user_data = u64_to_user_ptr(args->data_ptr); > > /* We manually control the domain here and pretend that it > @@ -1431,7 +1431,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, > ret = i915_gem_gtt_pwrite_fast(obj, args); > > if (ret == -EFAULT || ret == -ENOSPC) { > - if (obj->phys_handle) > + if (obj->phys_vaddr) > ret = i915_gem_phys_pwrite(obj, args, file); > else > ret = i915_gem_shmem_pwrite(obj, args); > diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h > index ca93a40c0c87..14bd2d61d0f6 100644 > --- a/drivers/gpu/drm/i915/i915_gem_object.h > +++ b/drivers/gpu/drm/i915/i915_gem_object.h > @@ -290,7 +290,8 @@ struct drm_i915_gem_object { > }; > > /** for phys allocated objects */ > - struct drm_dma_handle *phys_handle; > + dma_addr_t phys_handle; > + void *phys_vaddr; > > struct reservation_object __builtin_resv; > }; > diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c > index 5098228f1302..4f8b368ac4e2 100644 > --- a/drivers/gpu/drm/i915/intel_display.c > +++ b/drivers/gpu/drm/i915/intel_display.c > @@ -10066,7 +10066,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state) > u32 base; > > if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) > - base = obj->phys_handle->busaddr; > + base = obj->phys_handle; > else > base = intel_plane_ggtt_offset(plane_state); > > -- > 2.20.1 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > https://lists.freedesktop.org/mailman/listinfo/intel-gfx
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ad01c92aaf74..8f2053c91aff 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -228,7 +228,6 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { struct address_space *mapping = obj->base.filp->f_mapping; - drm_dma_handle_t *phys; struct sg_table *st; struct scatterlist *sg; char *vaddr; @@ -242,13 +241,13 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) * to handle all possible callers, and given typical object sizes, * the alignment of the buddy allocation will naturally match. */ - phys = drm_pci_alloc(obj->base.dev, - roundup_pow_of_two(obj->base.size), - roundup_pow_of_two(obj->base.size)); - if (!phys) + obj->phys_vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev, + roundup_pow_of_two(obj->base.size), + &obj->phys_handle, GFP_KERNEL); + if (!obj->phys_vaddr) return -ENOMEM; - vaddr = phys->vaddr; + vaddr = obj->phys_vaddr; for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { struct page *page; char *src; @@ -286,18 +285,17 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) sg->offset = 0; sg->length = obj->base.size; - sg_dma_address(sg) = phys->busaddr; + sg_dma_address(sg) = obj->phys_handle; sg_dma_len(sg) = obj->base.size; - obj->phys_handle = phys; - __i915_gem_object_set_pages(obj, st, sg->length); return 0; err_phys: - drm_pci_free(obj->base.dev, phys); - + dma_free_coherent(&obj->base.dev->pdev->dev, + roundup_pow_of_two(obj->base.size), obj->phys_vaddr, + obj->phys_handle); return err; } @@ -335,7 +333,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, if (obj->mm.dirty) { struct address_space *mapping = obj->base.filp->f_mapping; - char *vaddr = obj->phys_handle->vaddr; + char *vaddr = obj->phys_vaddr; int i; for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { @@ -363,7 +361,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, sg_free_table(pages); kfree(pages); - drm_pci_free(obj->base.dev, obj->phys_handle); + dma_free_coherent(&obj->base.dev->pdev->dev, + roundup_pow_of_two(obj->base.size), obj->phys_vaddr, + obj->phys_handle); } static void @@ -603,7 +603,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, struct drm_i915_gem_pwrite *args, struct drm_file *file) { - void *vaddr = obj->phys_handle->vaddr + args->offset; + void *vaddr = obj->phys_vaddr + args->offset; char __user *user_data = u64_to_user_ptr(args->data_ptr); /* We manually control the domain here and pretend that it @@ -1431,7 +1431,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ret = i915_gem_gtt_pwrite_fast(obj, args); if (ret == -EFAULT || ret == -ENOSPC) { - if (obj->phys_handle) + if (obj->phys_vaddr) ret = i915_gem_phys_pwrite(obj, args, file); else ret = i915_gem_shmem_pwrite(obj, args); diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index ca93a40c0c87..14bd2d61d0f6 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h @@ -290,7 +290,8 @@ struct drm_i915_gem_object { }; /** for phys allocated objects */ - struct drm_dma_handle *phys_handle; + dma_addr_t phys_handle; + void *phys_vaddr; struct reservation_object __builtin_resv; }; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5098228f1302..4f8b368ac4e2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -10066,7 +10066,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state) u32 base; if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) - base = obj->phys_handle->busaddr; + base = obj->phys_handle; else base = intel_plane_ggtt_offset(plane_state);
Remove usage of the legacy drm PCI DMA wrappers, and with that the incorrect usage cocktail of __GFP_COMP, virt_to_page on DMA allocation and SetPageReserved. Signed-off-by: Christoph Hellwig <hch@lst.de> --- drivers/gpu/drm/i915/i915_gem.c | 30 +++++++++++++------------- drivers/gpu/drm/i915/i915_gem_object.h | 3 ++- drivers/gpu/drm/i915/intel_display.c | 2 +- 3 files changed, 18 insertions(+), 17 deletions(-)