@@ -112,7 +112,10 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object
static void clear_vm_list(struct list_head *list)
{
struct i915_vma *vma, *vn;
+ bool unlocked;
+restart:
+ unlocked = false;
list_for_each_entry_safe(vma, vn, list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
@@ -138,8 +141,22 @@ static void clear_vm_list(struct list_head *list)
i915_vm_resv_get(vma->vm);
vma->vm_ddestroy = true;
} else {
+ if (!i915_gem_object_trylock(obj, NULL)) {
+ unlocked = true;
+ mutex_unlock(&vma->vm->mutex);
+ i915_gem_object_lock(obj, NULL);
+ mutex_lock(&vma->vm->mutex);
+ /*
+ * The vma may now be on a different list,
+ * but not destroyed. We don't care.
+ * destroy it.
+ */
+ }
i915_vma_destroy_locked(vma);
+ i915_gem_object_unlock(obj);
i915_gem_object_put(obj);
+ if (unlocked)
+ goto restart;
}
}
Currently we guarantee that vmas stay alive when the object lock is held only if we also hold a private vm reference. In order to relax the latter requirement, take the object lock also when destroying vmas from the vm destruction path. Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> --- drivers/gpu/drm/i915/gt/intel_gtt.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+)