Message ID | 20230130101230.25347-2-matthew.auld@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [1/6] drm/i915/ttm: fix sparse warning | expand |
On 30.01.2023 11:12, Matthew Auld wrote: > In the near future TTM will have NULL bo->resource when the object is > initially created, plus after calling into pipeline-gutting. Try to > handle the remaining cases. In practice NULL bo->resource should be > taken to mean swapped-out or purged object. > > References: 516198d317d8 ("drm/i915: audit bo->resource usage v3") > Signed-off-by: Matthew Auld <matthew.auld@intel.com> > Cc: Christian König <ckoenig.leichtzumerken@gmail.com> > Cc: Nirmoy Das <nirmoy.das@intel.com> > --- > drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 12 +++++++++--- > drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c | 7 ++++++- > drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c | 7 +++++-- > 3 files changed, 20 insertions(+), 6 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c > index 4758f21c91e1..4ba1d7862ff9 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c > @@ -472,7 +472,7 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags) > struct ttm_placement place = {}; > int ret; > > - if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM) > + if (!bo->ttm || (bo->resource && bo->resource->mem_type != TTM_PL_SYSTEM)) > return 0; > > GEM_BUG_ON(!i915_tt->is_shmem); > @@ -511,7 +511,13 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) > { > struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); > > - if (bo->resource && !i915_ttm_is_ghost_object(bo)) { > + /* > + * This gets called twice by ttm, so long as we have a ttm resource or > + * ttm_tt then we can still safely call this. Due to pipeline-gutting, > + * we maybe have NULL bo->resource, but in that case we should always > + * have a ttm alive (like if the pages are swapped out). > + */ > + if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) { > __i915_gem_object_pages_fini(obj); > i915_ttm_free_cached_io_rsgt(obj); > } > @@ -1198,7 +1204,7 @@ static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) > > assert_object_held_shared(obj); > > - if (i915_ttm_cpu_maps_iomem(bo->resource)) { > + if (bo->resource && i915_ttm_cpu_maps_iomem(bo->resource)) { I wonder if i915_ttm_cpu_maps_iomem couldn't handle null resource? > wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); > > /* userfault_count is protected by obj lock and rpm wakeref. */ > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c > index 76dd9e5e1a8b..72953ebadfd8 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c > @@ -83,7 +83,8 @@ void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj) > { > struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); > > - if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { > + if ((bo->resource && i915_ttm_cpu_maps_iomem(bo->resource)) || > + bo->ttm->caching != ttm_cached) { > obj->write_domain = I915_GEM_DOMAIN_WC; > obj->read_domains = I915_GEM_DOMAIN_WC; > } else { > @@ -711,6 +712,10 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, > > assert_object_held(dst); > assert_object_held(src); > + > + if (GEM_WARN_ON(!src_bo->resource || !dst_bo->resource)) > + return -EINVAL; > + > i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); > > ret = dma_resv_reserve_fences(src_bo->base.resv, 1); > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c > index 7e67742bc65e..be44e7eed892 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c > @@ -53,7 +53,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, > unsigned int flags; > int err = 0; > > - if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) > + if (!bo->resource || bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) !i915_ttm_cpu_maps_iomem ? > return 0; > > if (pm_apply->allow_gpu && i915_gem_object_evictable(obj)) > @@ -187,7 +187,10 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, > return err; > > /* Content may have been swapped. */ > - err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); > + if (!backup_bo->resource) > + err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx); > + if (!err) > + err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> Regards Andrzej > if (!err) { > err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, > false);
On Mon, 30 Jan 2023 at 11:00, Andrzej Hajda <andrzej.hajda@intel.com> wrote: > > On 30.01.2023 11:12, Matthew Auld wrote: > > In the near future TTM will have NULL bo->resource when the object is > > initially created, plus after calling into pipeline-gutting. Try to > > handle the remaining cases. In practice NULL bo->resource should be > > taken to mean swapped-out or purged object. > > > > References: 516198d317d8 ("drm/i915: audit bo->resource usage v3") > > Signed-off-by: Matthew Auld <matthew.auld@intel.com> > > Cc: Christian König <ckoenig.leichtzumerken@gmail.com> > > Cc: Nirmoy Das <nirmoy.das@intel.com> > > --- > > drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 12 +++++++++--- > > drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c | 7 ++++++- > > drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c | 7 +++++-- > > 3 files changed, 20 insertions(+), 6 deletions(-) > > > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c > > index 4758f21c91e1..4ba1d7862ff9 100644 > > --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c > > +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c > > @@ -472,7 +472,7 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags) > > struct ttm_placement place = {}; > > int ret; > > > > - if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM) > > + if (!bo->ttm || (bo->resource && bo->resource->mem_type != TTM_PL_SYSTEM)) > > return 0; > > > > GEM_BUG_ON(!i915_tt->is_shmem); > > @@ -511,7 +511,13 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) > > { > > struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); > > > > - if (bo->resource && !i915_ttm_is_ghost_object(bo)) { > > + /* > > + * This gets called twice by ttm, so long as we have a ttm resource or > > + * ttm_tt then we can still safely call this. Due to pipeline-gutting, > > + * we maybe have NULL bo->resource, but in that case we should always > > + * have a ttm alive (like if the pages are swapped out). > > + */ > > + if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) { > > __i915_gem_object_pages_fini(obj); > > i915_ttm_free_cached_io_rsgt(obj); > > } > > @@ -1198,7 +1204,7 @@ static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) > > > > assert_object_held_shared(obj); > > > > - if (i915_ttm_cpu_maps_iomem(bo->resource)) { > > + if (bo->resource && i915_ttm_cpu_maps_iomem(bo->resource)) { > > I wonder if i915_ttm_cpu_maps_iomem couldn't handle null resource? Yeah, seems reasonable to me. > > > > wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); > > > > /* userfault_count is protected by obj lock and rpm wakeref. */ > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c > > index 76dd9e5e1a8b..72953ebadfd8 100644 > > --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c > > +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c > > @@ -83,7 +83,8 @@ void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj) > > { > > struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); > > > > - if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { > > + if ((bo->resource && i915_ttm_cpu_maps_iomem(bo->resource)) || > > + bo->ttm->caching != ttm_cached) { > > obj->write_domain = I915_GEM_DOMAIN_WC; > > obj->read_domains = I915_GEM_DOMAIN_WC; > > } else { > > @@ -711,6 +712,10 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, > > > > assert_object_held(dst); > > assert_object_held(src); > > + > > + if (GEM_WARN_ON(!src_bo->resource || !dst_bo->resource)) > > + return -EINVAL; > > + > > i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); > > > > ret = dma_resv_reserve_fences(src_bo->base.resv, 1); > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c > > index 7e67742bc65e..be44e7eed892 100644 > > --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c > > +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c > > @@ -53,7 +53,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, > > unsigned int flags; > > int err = 0; > > > > - if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) > > + if (!bo->resource || bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) > > !i915_ttm_cpu_maps_iomem ? > > > return 0; > > > > if (pm_apply->allow_gpu && i915_gem_object_evictable(obj)) > > @@ -187,7 +187,10 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, > > return err; > > > > /* Content may have been swapped. */ > > - err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); > > + if (!backup_bo->resource) > > + err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx); > > + if (!err) > > + err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); > > Reviewed-by: Andrzej Hajda <andrzej.hajda@intel.com> > > Regards > Andrzej > > > > if (!err) { > > err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, > > false); >
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 4758f21c91e1..4ba1d7862ff9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -472,7 +472,7 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags) struct ttm_placement place = {}; int ret; - if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM) + if (!bo->ttm || (bo->resource && bo->resource->mem_type != TTM_PL_SYSTEM)) return 0; GEM_BUG_ON(!i915_tt->is_shmem); @@ -511,7 +511,13 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - if (bo->resource && !i915_ttm_is_ghost_object(bo)) { + /* + * This gets called twice by ttm, so long as we have a ttm resource or + * ttm_tt then we can still safely call this. Due to pipeline-gutting, + * we maybe have NULL bo->resource, but in that case we should always + * have a ttm alive (like if the pages are swapped out). + */ + if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) { __i915_gem_object_pages_fini(obj); i915_ttm_free_cached_io_rsgt(obj); } @@ -1198,7 +1204,7 @@ static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) assert_object_held_shared(obj); - if (i915_ttm_cpu_maps_iomem(bo->resource)) { + if (bo->resource && i915_ttm_cpu_maps_iomem(bo->resource)) { wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); /* userfault_count is protected by obj lock and rpm wakeref. */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c index 76dd9e5e1a8b..72953ebadfd8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -83,7 +83,8 @@ void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); - if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { + if ((bo->resource && i915_ttm_cpu_maps_iomem(bo->resource)) || + bo->ttm->caching != ttm_cached) { obj->write_domain = I915_GEM_DOMAIN_WC; obj->read_domains = I915_GEM_DOMAIN_WC; } else { @@ -711,6 +712,10 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, assert_object_held(dst); assert_object_held(src); + + if (GEM_WARN_ON(!src_bo->resource || !dst_bo->resource)) + return -EINVAL; + i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); ret = dma_resv_reserve_fences(src_bo->base.resv, 1); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c index 7e67742bc65e..be44e7eed892 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c @@ -53,7 +53,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, unsigned int flags; int err = 0; - if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) + if (!bo->resource || bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) return 0; if (pm_apply->allow_gpu && i915_gem_object_evictable(obj)) @@ -187,7 +187,10 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, return err; /* Content may have been swapped. */ - err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); + if (!backup_bo->resource) + err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx); + if (!err) + err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); if (!err) { err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, false);
In the near future TTM will have NULL bo->resource when the object is initially created, plus after calling into pipeline-gutting. Try to handle the remaining cases. In practice NULL bo->resource should be taken to mean swapped-out or purged object. References: 516198d317d8 ("drm/i915: audit bo->resource usage v3") Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Christian König <ckoenig.leichtzumerken@gmail.com> Cc: Nirmoy Das <nirmoy.das@intel.com> --- drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 12 +++++++++--- drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c | 7 ++++++- drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c | 7 +++++-- 3 files changed, 20 insertions(+), 6 deletions(-)