Message ID | 20211021103605.735002-10-maarten.lankhorst@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [01/28] drm/i915: Fix i915_request fence wait semantics | expand |
On Thu, 21 Oct 2021 at 11:37, Maarten Lankhorst <maarten.lankhorst@linux.intel.com> wrote: > > Call drop_pages with the gem object lock held, instead of the other > way around. This will allow us to drop the vma bindings with the > gem object lock held. > > We plan to require the object lock for unpinning in the future, > and this is an easy target. > > Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> > Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> > --- > drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 42 ++++++++++---------- > 1 file changed, 22 insertions(+), 20 deletions(-) > > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c > index af3eb7fd951d..d3f29a66cb36 100644 > --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c > +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c > @@ -36,8 +36,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) > return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; > } > > -static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, > - unsigned long shrink, bool trylock_vm) > +static int drop_pages(struct drm_i915_gem_object *obj, > + unsigned long shrink, bool trylock_vm) > { > unsigned long flags; > > @@ -208,26 +208,28 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, > > spin_unlock_irqrestore(&i915->mm.obj_lock, flags); > > - err = 0; > - if (unsafe_drop_pages(obj, shrink, trylock_vm)) { > - /* May arrive from get_pages on another bo */ > - if (!ww) { > - if (!i915_gem_object_trylock(obj)) > - goto skip; > - } else { > - err = i915_gem_object_lock(obj, ww); > - if (err) > - goto skip; > - } > - > - if (!__i915_gem_object_put_pages(obj)) { > - try_to_writeback(obj, shrink); > - count += obj->base.size >> PAGE_SHIFT; > - } > - if (!ww) > - i915_gem_object_unlock(obj); > + /* May arrive from get_pages on another bo */ > + if (!ww) { > + if (!i915_gem_object_trylock(obj)) > + goto skip; > + } else { > + err = i915_gem_object_lock(obj, ww); > + if (err) > + goto skip; > } > > + if (drop_pages(obj, shrink, trylock_vm) && > + !__i915_gem_object_put_pages(obj)) { > + try_to_writeback(obj, shrink); > + count += obj->base.size >> PAGE_SHIFT; > + } > + > + if (dma_resv_test_signaled(obj->base.resv, true)) > + dma_resv_add_excl_fence(obj->base.resv, NULL); I assume we want to rip out resv_prune here in the series, or something? Instead of randomly adding this back here. > + > + if (!ww) > + i915_gem_object_unlock(obj); > + > scanned += obj->base.size >> PAGE_SHIFT; > skip: > i915_gem_object_put(obj); > -- > 2.33.0 >
Op 21-10-2021 om 18:12 schreef Matthew Auld: > On Thu, 21 Oct 2021 at 11:37, Maarten Lankhorst > <maarten.lankhorst@linux.intel.com> wrote: >> Call drop_pages with the gem object lock held, instead of the other >> way around. This will allow us to drop the vma bindings with the >> gem object lock held. >> >> We plan to require the object lock for unpinning in the future, >> and this is an easy target. >> >> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> >> Reviewed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> >> --- >> drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 42 ++++++++++---------- >> 1 file changed, 22 insertions(+), 20 deletions(-) >> >> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c >> index af3eb7fd951d..d3f29a66cb36 100644 >> --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c >> +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c >> @@ -36,8 +36,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) >> return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; >> } >> >> -static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, >> - unsigned long shrink, bool trylock_vm) >> +static int drop_pages(struct drm_i915_gem_object *obj, >> + unsigned long shrink, bool trylock_vm) >> { >> unsigned long flags; >> >> @@ -208,26 +208,28 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, >> >> spin_unlock_irqrestore(&i915->mm.obj_lock, flags); >> >> - err = 0; >> - if (unsafe_drop_pages(obj, shrink, trylock_vm)) { >> - /* May arrive from get_pages on another bo */ >> - if (!ww) { >> - if (!i915_gem_object_trylock(obj)) >> - goto skip; >> - } else { >> - err = i915_gem_object_lock(obj, ww); >> - if (err) >> - goto skip; >> - } >> - >> - if (!__i915_gem_object_put_pages(obj)) { >> - try_to_writeback(obj, shrink); >> - count += obj->base.size >> PAGE_SHIFT; >> - } >> - if (!ww) >> - i915_gem_object_unlock(obj); >> + /* May arrive from get_pages on another bo */ >> + if (!ww) { >> + if (!i915_gem_object_trylock(obj)) >> + goto skip; >> + } else { >> + err = i915_gem_object_lock(obj, ww); >> + if (err) >> + goto skip; >> } >> >> + if (drop_pages(obj, shrink, trylock_vm) && >> + !__i915_gem_object_put_pages(obj)) { >> + try_to_writeback(obj, shrink); >> + count += obj->base.size >> PAGE_SHIFT; >> + } >> + >> + if (dma_resv_test_signaled(obj->base.resv, true)) >> + dma_resv_add_excl_fence(obj->base.resv, NULL); > I assume we want to rip out resv_prune here in the series, or > something? Instead of randomly adding this back here. Oh yeah, this hunk can be removed safely. It's stale and shouldn't be here. :) >> + >> + if (!ww) >> + i915_gem_object_unlock(obj); >> + >> scanned += obj->base.size >> PAGE_SHIFT; >> skip: >> i915_gem_object_put(obj); >> -- >> 2.33.0 >>
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index af3eb7fd951d..d3f29a66cb36 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -36,8 +36,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) return swap_available() || obj->mm.madv == I915_MADV_DONTNEED; } -static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, - unsigned long shrink, bool trylock_vm) +static int drop_pages(struct drm_i915_gem_object *obj, + unsigned long shrink, bool trylock_vm) { unsigned long flags; @@ -208,26 +208,28 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - err = 0; - if (unsafe_drop_pages(obj, shrink, trylock_vm)) { - /* May arrive from get_pages on another bo */ - if (!ww) { - if (!i915_gem_object_trylock(obj)) - goto skip; - } else { - err = i915_gem_object_lock(obj, ww); - if (err) - goto skip; - } - - if (!__i915_gem_object_put_pages(obj)) { - try_to_writeback(obj, shrink); - count += obj->base.size >> PAGE_SHIFT; - } - if (!ww) - i915_gem_object_unlock(obj); + /* May arrive from get_pages on another bo */ + if (!ww) { + if (!i915_gem_object_trylock(obj)) + goto skip; + } else { + err = i915_gem_object_lock(obj, ww); + if (err) + goto skip; } + if (drop_pages(obj, shrink, trylock_vm) && + !__i915_gem_object_put_pages(obj)) { + try_to_writeback(obj, shrink); + count += obj->base.size >> PAGE_SHIFT; + } + + if (dma_resv_test_signaled(obj->base.resv, true)) + dma_resv_add_excl_fence(obj->base.resv, NULL); + + if (!ww) + i915_gem_object_unlock(obj); + scanned += obj->base.size >> PAGE_SHIFT; skip: i915_gem_object_put(obj);