Message ID | 20230108210445.3948344-6-dmitry.osipenko@collabora.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add generic memory shrinker to VirtIO-GPU and Panfrost DRM drivers | expand |
On Mon, Jan 09, 2023 at 12:04:39AM +0300, Dmitry Osipenko wrote: > f a multi-GPU system by using drm_WARN_*() and > drm_dbg_kms() helpers that print out DRM device name corresponding > to shmem GEM. That commit message looks truncated ... take care, Gerd
Am 08.01.23 um 22:04 schrieb Dmitry Osipenko: > Ease debugging of a multi-GPU system by using drm_WARN_*() and > drm_dbg_kms() helpers that print out DRM device name corresponding > to shmem GEM. > > Suggested-by: Thomas Zimmermann <tzimmermann@suse.de> > Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de> > --- > drivers/gpu/drm/drm_gem_shmem_helper.c | 38 +++++++++++++++----------- > 1 file changed, 22 insertions(+), 16 deletions(-) > > diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c > index f21f47737817..5006f7da7f2d 100644 > --- a/drivers/gpu/drm/drm_gem_shmem_helper.c > +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c > @@ -141,7 +141,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) > { > struct drm_gem_object *obj = &shmem->base; > > - WARN_ON(shmem->vmap_use_count); > + drm_WARN_ON(obj->dev, shmem->vmap_use_count); > > if (obj->import_attach) { > drm_prime_gem_destroy(obj, shmem->sgt); > @@ -156,7 +156,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) > drm_gem_shmem_put_pages(shmem); > } > > - WARN_ON(shmem->pages_use_count); > + drm_WARN_ON(obj->dev, shmem->pages_use_count); > > drm_gem_object_release(obj); > mutex_destroy(&shmem->pages_lock); > @@ -175,7 +175,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) > > pages = drm_gem_get_pages(obj); > if (IS_ERR(pages)) { > - DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); > + drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n", > + PTR_ERR(pages)); > shmem->pages_use_count = 0; > return PTR_ERR(pages); > } > @@ -207,9 +208,10 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) > */ > int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) > { > + struct drm_gem_object *obj = &shmem->base; > int ret; > > - WARN_ON(shmem->base.import_attach); > + drm_WARN_ON(obj->dev, obj->import_attach); > > ret = mutex_lock_interruptible(&shmem->pages_lock); > if (ret) > @@ -225,7 +227,7 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) > { > struct drm_gem_object *obj = &shmem->base; > > - if (WARN_ON_ONCE(!shmem->pages_use_count)) > + if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) > return; > > if (--shmem->pages_use_count > 0) > @@ -268,7 +270,9 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages); > */ > int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) > { > - WARN_ON(shmem->base.import_attach); > + struct drm_gem_object *obj = &shmem->base; > + > + drm_WARN_ON(obj->dev, obj->import_attach); > > return drm_gem_shmem_get_pages(shmem); > } > @@ -283,7 +287,9 @@ EXPORT_SYMBOL(drm_gem_shmem_pin); > */ > void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) > { > - WARN_ON(shmem->base.import_attach); > + struct drm_gem_object *obj = &shmem->base; > + > + drm_WARN_ON(obj->dev, obj->import_attach); > > drm_gem_shmem_put_pages(shmem); > } > @@ -303,7 +309,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, > if (obj->import_attach) { > ret = dma_buf_vmap(obj->import_attach->dmabuf, map); > if (!ret) { > - if (WARN_ON(map->is_iomem)) { > + if (drm_WARN_ON(obj->dev, map->is_iomem)) { > dma_buf_vunmap(obj->import_attach->dmabuf, map); > ret = -EIO; > goto err_put_pages; > @@ -328,7 +334,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, > } > > if (ret) { > - DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret); > + drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret); > goto err_put_pages; > } > > @@ -378,7 +384,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, > { > struct drm_gem_object *obj = &shmem->base; > > - if (WARN_ON_ONCE(!shmem->vmap_use_count)) > + if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) > return; > > if (--shmem->vmap_use_count > 0) > @@ -463,7 +469,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) > struct drm_gem_object *obj = &shmem->base; > struct drm_device *dev = obj->dev; > > - WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); > + drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem)); > > dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); > sg_free_table(shmem->sgt); > @@ -555,7 +561,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) > mutex_lock(&shmem->pages_lock); > > if (page_offset >= num_pages || > - WARN_ON_ONCE(!shmem->pages) || > + drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || > shmem->madv < 0) { > ret = VM_FAULT_SIGBUS; > } else { > @@ -574,7 +580,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) > struct drm_gem_object *obj = vma->vm_private_data; > struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); > > - WARN_ON(shmem->base.import_attach); > + drm_WARN_ON(obj->dev, obj->import_attach); > > mutex_lock(&shmem->pages_lock); > > @@ -583,7 +589,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) > * mmap'd, vm_open() just grabs an additional reference for the new > * mm the vma is getting copied into (ie. on fork()). > */ > - if (!WARN_ON_ONCE(!shmem->pages_use_count)) > + if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) > shmem->pages_use_count++; > > mutex_unlock(&shmem->pages_lock); > @@ -677,7 +683,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem) > { > struct drm_gem_object *obj = &shmem->base; > > - WARN_ON(shmem->base.import_attach); > + drm_WARN_ON(obj->dev, obj->import_attach); > > return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); > } > @@ -708,7 +714,7 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) > if (shmem->sgt) > return shmem->sgt; > > - WARN_ON(obj->import_attach); > + drm_WARN_ON(obj->dev, obj->import_attach); > > ret = drm_gem_shmem_get_pages(shmem); > if (ret)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index f21f47737817..5006f7da7f2d 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -141,7 +141,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - WARN_ON(shmem->vmap_use_count); + drm_WARN_ON(obj->dev, shmem->vmap_use_count); if (obj->import_attach) { drm_prime_gem_destroy(obj, shmem->sgt); @@ -156,7 +156,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) drm_gem_shmem_put_pages(shmem); } - WARN_ON(shmem->pages_use_count); + drm_WARN_ON(obj->dev, shmem->pages_use_count); drm_gem_object_release(obj); mutex_destroy(&shmem->pages_lock); @@ -175,7 +175,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) pages = drm_gem_get_pages(obj); if (IS_ERR(pages)) { - DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages)); + drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n", + PTR_ERR(pages)); shmem->pages_use_count = 0; return PTR_ERR(pages); } @@ -207,9 +208,10 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) */ int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) { + struct drm_gem_object *obj = &shmem->base; int ret; - WARN_ON(shmem->base.import_attach); + drm_WARN_ON(obj->dev, obj->import_attach); ret = mutex_lock_interruptible(&shmem->pages_lock); if (ret) @@ -225,7 +227,7 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - if (WARN_ON_ONCE(!shmem->pages_use_count)) + if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) return; if (--shmem->pages_use_count > 0) @@ -268,7 +270,9 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages); */ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) { - WARN_ON(shmem->base.import_attach); + struct drm_gem_object *obj = &shmem->base; + + drm_WARN_ON(obj->dev, obj->import_attach); return drm_gem_shmem_get_pages(shmem); } @@ -283,7 +287,9 @@ EXPORT_SYMBOL(drm_gem_shmem_pin); */ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) { - WARN_ON(shmem->base.import_attach); + struct drm_gem_object *obj = &shmem->base; + + drm_WARN_ON(obj->dev, obj->import_attach); drm_gem_shmem_put_pages(shmem); } @@ -303,7 +309,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, if (obj->import_attach) { ret = dma_buf_vmap(obj->import_attach->dmabuf, map); if (!ret) { - if (WARN_ON(map->is_iomem)) { + if (drm_WARN_ON(obj->dev, map->is_iomem)) { dma_buf_vunmap(obj->import_attach->dmabuf, map); ret = -EIO; goto err_put_pages; @@ -328,7 +334,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, } if (ret) { - DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret); + drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret); goto err_put_pages; } @@ -378,7 +384,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, { struct drm_gem_object *obj = &shmem->base; - if (WARN_ON_ONCE(!shmem->vmap_use_count)) + if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count)) return; if (--shmem->vmap_use_count > 0) @@ -463,7 +469,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) struct drm_gem_object *obj = &shmem->base; struct drm_device *dev = obj->dev; - WARN_ON(!drm_gem_shmem_is_purgeable(shmem)); + drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem)); dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); sg_free_table(shmem->sgt); @@ -555,7 +561,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf) mutex_lock(&shmem->pages_lock); if (page_offset >= num_pages || - WARN_ON_ONCE(!shmem->pages) || + drm_WARN_ON_ONCE(obj->dev, !shmem->pages) || shmem->madv < 0) { ret = VM_FAULT_SIGBUS; } else { @@ -574,7 +580,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - WARN_ON(shmem->base.import_attach); + drm_WARN_ON(obj->dev, obj->import_attach); mutex_lock(&shmem->pages_lock); @@ -583,7 +589,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) * mmap'd, vm_open() just grabs an additional reference for the new * mm the vma is getting copied into (ie. on fork()). */ - if (!WARN_ON_ONCE(!shmem->pages_use_count)) + if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) shmem->pages_use_count++; mutex_unlock(&shmem->pages_lock); @@ -677,7 +683,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; - WARN_ON(shmem->base.import_attach); + drm_WARN_ON(obj->dev, obj->import_attach); return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT); } @@ -708,7 +714,7 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem) if (shmem->sgt) return shmem->sgt; - WARN_ON(obj->import_attach); + drm_WARN_ON(obj->dev, obj->import_attach); ret = drm_gem_shmem_get_pages(shmem); if (ret)
Ease debugging of a multi-GPU system by using drm_WARN_*() and drm_dbg_kms() helpers that print out DRM device name corresponding to shmem GEM. Suggested-by: Thomas Zimmermann <tzimmermann@suse.de> Signed-off-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> --- drivers/gpu/drm/drm_gem_shmem_helper.c | 38 +++++++++++++++----------- 1 file changed, 22 insertions(+), 16 deletions(-)