@@ -19,6 +19,7 @@
#include "i915_gem_vm_bind.h"
#include "i915_trace.h"
+#define __EXEC3_USERPTR_USED BIT_ULL(34)
#define __EXEC3_HAS_PIN BIT_ULL(33)
#define __EXEC3_ENGINE_PINNED BIT_ULL(32)
#define __EXEC3_INTERNAL_FLAGS (~0ull << 32)
@@ -141,6 +142,21 @@ static void eb_scoop_unbound_vma_all(struct i915_address_space *vm)
{
struct i915_vma *vma, *vn;
+#ifdef CONFIG_MMU_NOTIFIER
+ /**
+ * Move all invalidated userptr vmas back into vm_bind_list so that
+ * they are looked up and revalidated.
+ */
+ spin_lock(&vm->userptr_invalidated_lock);
+ list_for_each_entry_safe(vma, vn, &vm->userptr_invalidated_list,
+ userptr_invalidated_link) {
+ list_del_init(&vma->userptr_invalidated_link);
+ if (!list_empty(&vma->vm_bind_link))
+ list_move_tail(&vma->vm_bind_link, &vm->vm_bind_list);
+ }
+ spin_unlock(&vm->userptr_invalidated_lock);
+#endif
+
/**
* Move all unbound vmas back into vm_bind_list so that they are
* revalidated.
@@ -154,10 +170,47 @@ static void eb_scoop_unbound_vma_all(struct i915_address_space *vm)
spin_unlock(&vm->vm_rebind_lock);
}
+static int eb_lookup_persistent_userptr_vmas(struct i915_execbuffer *eb)
+{
+ struct i915_address_space *vm = eb->context->vm;
+ struct i915_vma *last_vma = NULL;
+ struct i915_vma *vma;
+ int err;
+
+ lockdep_assert_held(&vm->vm_bind_lock);
+
+ list_for_each_entry(vma, &vm->vm_bind_list, vm_bind_link) {
+ if (!i915_gem_object_is_userptr(vma->obj))
+ continue;
+
+ err = i915_gem_object_userptr_submit_init(vma->obj);
+ if (err)
+ return err;
+
+ /**
+ * The above submit_init() call does the object unbind and
+ * hence adds vma into vm_rebind_list. Remove it from that
+ * list as it is already scooped for revalidation.
+ */
+ spin_lock(&vm->vm_rebind_lock);
+ if (!list_empty(&vma->vm_rebind_link))
+ list_del_init(&vma->vm_rebind_link);
+ spin_unlock(&vm->vm_rebind_lock);
+
+ last_vma = vma;
+ }
+
+ if (last_vma)
+ eb->args->flags |= __EXEC3_USERPTR_USED;
+
+ return 0;
+}
+
static int eb_lookup_vma_all(struct i915_execbuffer *eb)
{
unsigned int i, current_batch = 0;
struct i915_vma *vma;
+ int err = 0;
for (i = 0; i < eb->num_batches; i++) {
vma = eb_find_vma(eb->context->vm, eb->batch_addresses[i]);
@@ -170,6 +223,10 @@ static int eb_lookup_vma_all(struct i915_execbuffer *eb)
eb_scoop_unbound_vma_all(eb->context->vm);
+ err = eb_lookup_persistent_userptr_vmas(eb);
+ if (err)
+ return err;
+
return 0;
}
@@ -349,6 +406,29 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
}
}
+#ifdef CONFIG_MMU_NOTIFIER
+ /* Check for further userptr invalidations */
+ spin_lock(&vm->userptr_invalidated_lock);
+ if (!list_empty(&vm->userptr_invalidated_list))
+ err = -EAGAIN;
+ spin_unlock(&vm->userptr_invalidated_lock);
+
+ if (!err && (eb->args->flags & __EXEC3_USERPTR_USED)) {
+ read_lock(&eb->i915->mm.notifier_lock);
+ list_for_each_entry(vma, &vm->vm_bind_list, vm_bind_link) {
+ if (!i915_gem_object_is_userptr(vma->obj))
+ continue;
+
+ err = i915_gem_object_userptr_submit_done(vma->obj);
+ if (err)
+ break;
+ }
+ read_unlock(&eb->i915->mm.notifier_lock);
+ }
+#endif
+ if (unlikely(err))
+ goto err_skip;
+
/* Unconditionally flush any chipset caches (for streaming writes). */
intel_gt_chipset_flush(eb->gt);
@@ -63,6 +63,7 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
{
struct drm_i915_gem_object *obj = container_of(mni, struct drm_i915_gem_object, userptr.notifier);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_vma *vma;
long r;
if (!mmu_notifier_range_blockable(range))
@@ -85,6 +86,22 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
if (current->flags & PF_EXITING)
return true;
+ /**
+ * Add persistent vmas into userptr_invalidated list for relookup
+ * and revalidation.
+ */
+ spin_lock(&obj->vma.lock);
+ list_for_each_entry(vma, &obj->vma.list, obj_link) {
+ if (!i915_vma_is_persistent(vma))
+ continue;
+
+ spin_lock(&vma->vm->userptr_invalidated_lock);
+ list_add_tail(&vma->userptr_invalidated_link,
+ &vma->vm->userptr_invalidated_list);
+ spin_unlock(&vma->vm->userptr_invalidated_lock);
+ }
+ spin_unlock(&obj->vma.lock);
+
/* we will unbind on next submission, still have userptr pins */
r = dma_resv_wait_timeout(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
MAX_SCHEDULE_TIMEOUT);
@@ -298,6 +298,12 @@ static int i915_gem_vm_bind_obj(struct i915_address_space *vm,
goto put_obj;
}
+ if (i915_gem_object_is_userptr(obj)) {
+ ret = i915_gem_object_userptr_submit_init(obj);
+ if (ret)
+ goto put_obj;
+ }
+
ret = mutex_lock_interruptible(&vm->vm_bind_lock);
if (ret)
goto put_obj;
@@ -329,6 +335,15 @@ static int i915_gem_vm_bind_obj(struct i915_address_space *vm,
/* Make it evictable */
__i915_vma_unpin(vma);
+#ifdef CONFIG_MMU_NOTIFIER
+ if (i915_gem_object_is_userptr(obj)) {
+ read_lock(&vm->i915->mm.notifier_lock);
+ ret = i915_gem_object_userptr_submit_done(obj);
+ read_unlock(&vm->i915->mm.notifier_lock);
+ if (ret)
+ continue;
+ }
+#endif
/*
* Add it to vm_bind_list so that next execbuf waits for
* bind to complete.
@@ -298,6 +298,8 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
GEM_BUG_ON(IS_ERR(vm->root_obj));
INIT_LIST_HEAD(&vm->vm_rebind_list);
spin_lock_init(&vm->vm_rebind_lock);
+ spin_lock_init(&vm->userptr_invalidated_lock);
+ INIT_LIST_HEAD(&vm->userptr_invalidated_list);
}
void *__px_vaddr(struct drm_i915_gem_object *p)
@@ -269,6 +269,10 @@ struct i915_address_space {
struct list_head vm_rebind_list;
/* @vm_rebind_lock: protects vm_rebound_list */
spinlock_t vm_rebind_lock;
+ /* @userptr_invalidated_list: list of invalidated userptr vmas */
+ struct list_head userptr_invalidated_list;
+ /* @userptr_invalidated_lock: protects userptr_invalidated_list */
+ spinlock_t userptr_invalidated_lock;
/* @va: tree of persistent vmas */
struct rb_root_cached va;
struct list_head non_priv_vm_bind_list;
@@ -307,6 +307,8 @@ struct i915_vma {
struct list_head non_priv_vm_bind_link;
/* @vm_rebind_link: link to vm_rebind_list and protected by vm_rebind_lock */
struct list_head vm_rebind_link; /* Link in vm_rebind_list */
+ /*@userptr_invalidated_link: link to the vm->userptr_invalidated_list */
+ struct list_head userptr_invalidated_link;
/** Timeline fence for vm_bind completion notification */
struct {