Message ID | 20220106174910.280616-3-matthew.auld@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [v3,1/4] drm/i915: don't call free_mmap_offset when purging | expand |
On 1/6/22 18:49, Matthew Auld wrote: > Ensure we call ttm_bo_unmap_virtual when releasing the pages. > Importantly this should now handle the ttm swapping case, and all other > places that already call into i915_ttm_move_notify(). > > v2: fix up the selftest > > Fixes: cf3e3e86d779 ("drm/i915: Use ttm mmap handling for ttm bo's.") > Signed-off-by: Matthew Auld <matthew.auld@intel.com> > Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Hm. I guess we've been saved here previously by the fact that TTM calls ttm_bo_unmap_virtual() before calling the move callback. Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> > --- > drivers/gpu/drm/i915/gem/i915_gem_mman.c | 3 +++ > .../gpu/drm/i915/gem/i915_gem_object_types.h | 1 + > drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 6 ++++++ > .../gpu/drm/i915/gem/selftests/i915_gem_mman.c | 18 ++++-------------- > 4 files changed, 14 insertions(+), 14 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c > index ee5ec0fd4807..5ac2506f4ee8 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c > @@ -538,6 +538,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) > { > struct i915_mmap_offset *mmo, *mn; > > + if (obj->ops->unmap_virtual) > + obj->ops->unmap_virtual(obj); > + > spin_lock(&obj->mmo.lock); > rbtree_postorder_for_each_entry_safe(mmo, mn, > &obj->mmo.offsets, offset) { > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h > index f9f7e44099fe..4b4829eb16c2 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h > +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h > @@ -67,6 +67,7 @@ struct drm_i915_gem_object_ops { > int (*pwrite)(struct drm_i915_gem_object *obj, > const struct drm_i915_gem_pwrite *arg); > u64 (*mmap_offset)(struct drm_i915_gem_object *obj); > + void (*unmap_virtual)(struct drm_i915_gem_object *obj); > > int (*dmabuf_export)(struct drm_i915_gem_object *obj); > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c > index 8d61d4538a64..1530d9f0bc81 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c > @@ -950,6 +950,11 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) > return drm_vma_node_offset_addr(&obj->base.vma_node); > } > > +static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) > +{ > + ttm_bo_unmap_virtual(i915_gem_to_ttm(obj)); > +} > + > static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { > .name = "i915_gem_object_ttm", > .flags = I915_GEM_OBJECT_IS_SHRINKABLE | > @@ -965,6 +970,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { > .migrate = i915_ttm_migrate, > > .mmap_offset = i915_ttm_mmap_offset, > + .unmap_virtual = i915_ttm_unmap_virtual, > .mmap_ops = &vm_ops_ttm, > }; > > diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c > index 743a098facf2..f61356b72b1c 100644 > --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c > +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c > @@ -1369,20 +1369,10 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915, > } > } > > - if (!obj->ops->mmap_ops) { > - err = check_absent(addr, obj->base.size); > - if (err) { > - pr_err("%s: was not absent\n", obj->mm.region->name); > - goto out_unmap; > - } > - } else { > - /* ttm allows access to evicted regions by design */ > - > - err = check_present(addr, obj->base.size); > - if (err) { > - pr_err("%s: was not present\n", obj->mm.region->name); > - goto out_unmap; > - } > + err = check_absent(addr, obj->base.size); > + if (err) { > + pr_err("%s: was not absent\n", obj->mm.region->name); > + goto out_unmap; > } > > out_unmap:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index ee5ec0fd4807..5ac2506f4ee8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -538,6 +538,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj) { struct i915_mmap_offset *mmo, *mn; + if (obj->ops->unmap_virtual) + obj->ops->unmap_virtual(obj); + spin_lock(&obj->mmo.lock); rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index f9f7e44099fe..4b4829eb16c2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -67,6 +67,7 @@ struct drm_i915_gem_object_ops { int (*pwrite)(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *arg); u64 (*mmap_offset)(struct drm_i915_gem_object *obj); + void (*unmap_virtual)(struct drm_i915_gem_object *obj); int (*dmabuf_export)(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 8d61d4538a64..1530d9f0bc81 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -950,6 +950,11 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) return drm_vma_node_offset_addr(&obj->base.vma_node); } +static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj) +{ + ttm_bo_unmap_virtual(i915_gem_to_ttm(obj)); +} + static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { .name = "i915_gem_object_ttm", .flags = I915_GEM_OBJECT_IS_SHRINKABLE | @@ -965,6 +970,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { .migrate = i915_ttm_migrate, .mmap_offset = i915_ttm_mmap_offset, + .unmap_virtual = i915_ttm_unmap_virtual, .mmap_ops = &vm_ops_ttm, }; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 743a098facf2..f61356b72b1c 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -1369,20 +1369,10 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915, } } - if (!obj->ops->mmap_ops) { - err = check_absent(addr, obj->base.size); - if (err) { - pr_err("%s: was not absent\n", obj->mm.region->name); - goto out_unmap; - } - } else { - /* ttm allows access to evicted regions by design */ - - err = check_present(addr, obj->base.size); - if (err) { - pr_err("%s: was not present\n", obj->mm.region->name); - goto out_unmap; - } + err = check_absent(addr, obj->base.size); + if (err) { + pr_err("%s: was not absent\n", obj->mm.region->name); + goto out_unmap; } out_unmap:
Ensure we call ttm_bo_unmap_virtual when releasing the pages. Importantly this should now handle the ttm swapping case, and all other places that already call into i915_ttm_move_notify(). v2: fix up the selftest Fixes: cf3e3e86d779 ("drm/i915: Use ttm mmap handling for ttm bo's.") Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 3 +++ .../gpu/drm/i915/gem/i915_gem_object_types.h | 1 + drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 6 ++++++ .../gpu/drm/i915/gem/selftests/i915_gem_mman.c | 18 ++++-------------- 4 files changed, 14 insertions(+), 14 deletions(-)