@@ -103,6 +103,7 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
it = interval_tree_iter_first(&mn->objects, range->start, end);
while (it) {
struct drm_i915_gem_object *obj;
+ unsigned int flags;
if (!mmu_notifier_range_blockable(range)) {
ret = -EAGAIN;
@@ -126,9 +127,12 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
}
spin_unlock(&mn->lock);
- ret = i915_gem_object_unbind(obj,
- I915_GEM_OBJECT_UNBIND_ACTIVE |
- I915_GEM_OBJECT_UNBIND_BARRIER);
+ flags = (I915_GEM_OBJECT_UNBIND_ACTIVE |
+ I915_GEM_OBJECT_UNBIND_BARRIER);
+ if (mmu_notifier_range_mayfail(range))
+ flags = 0;
+
+ ret = i915_gem_object_unbind(obj, flags);
if (ret == 0)
ret = __i915_gem_object_put_pages(obj);
i915_gem_object_put(obj);
The direct reclaim path may trigger the mmu_notifier callback as part of try_to_unmap_one. As this is purely an opportunitistic attempt to reclaim pages, and will be called from any allocation context under unknown conditions (that include attempting to allocate pages for the userptr object itself and subsequently trying to reclaim parts of the partially acquired object) we have to be careful never to wait on anything being held by the calling context. Since that is unknown, we have to avoid waiting from inside direct reclaim. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-)