@@ -4,6 +4,7 @@
*/
#include <linux/dma-resv.h>
+#include <linux/lockdep.h>
#include <linux/sync_file.h>
#include <linux/uaccess.h>
@@ -22,6 +23,7 @@
#include "i915_gem_vm_bind.h"
#include "i915_trace.h"
+#define __EXEC3_HAS_PIN BIT_ULL(33)
#define __EXEC3_ENGINE_PINNED BIT_ULL(32)
#define __EXEC3_INTERNAL_FLAGS (~0ull << 32)
@@ -45,7 +47,9 @@
* execlist. Hence, no support for implicit sync.
*
* The new execbuf3 ioctl only works in VM_BIND mode and the VM_BIND mode only
- * works with execbuf3 ioctl for submission.
+ * works with execbuf3 ioctl for submission. All BOs mapped on that VM (through
+ * VM_BIND call) at the time of execbuf3 call are deemed required for that
+ * submission.
*
* The execbuf3 ioctl directly specifies the batch addresses instead of as
* object handles as in execbuf2 ioctl. The execbuf3 ioctl will also not
@@ -61,6 +65,13 @@
* So, a lot of code supporting execbuf2 ioctl, like relocations, VA evictions,
* vma lookup table, implicit sync, vma active reference tracking etc., are not
* applicable for execbuf3 ioctl.
+ *
+ * During each execbuf submission, request fence is added to all VM_BIND mapped
+ * objects with DMA_RESV_USAGE_BOOKKEEP. The DMA_RESV_USAGE_BOOKKEEP usage will
+ * prevent over sync (See enum dma_resv_usage). Note that DRM_I915_GEM_WAIT and
+ * DRM_I915_GEM_BUSY ioctls do not check for DMA_RESV_USAGE_BOOKKEEP usage and
+ * hence should not be used for end of batch check. Instead, the execbuf3
+ * timeline out fence should be used for end of batch check.
*/
struct eb_fence {
@@ -108,6 +119,7 @@ struct i915_execbuffer {
};
static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle);
+static void eb_unpin_engine(struct i915_execbuffer *eb);
static int eb_select_context(struct i915_execbuffer *eb)
{
@@ -132,6 +144,19 @@ eb_find_vma(struct i915_address_space *vm, u64 addr)
return i915_gem_vm_bind_lookup_vma(vm, va);
}
+static void eb_scoop_unbound_vma_all(struct i915_address_space *vm)
+{
+ struct i915_vma *vma, *vn;
+
+ spin_lock(&vm->vm_rebind_lock);
+ list_for_each_entry_safe(vma, vn, &vm->vm_rebind_list, vm_rebind_link) {
+ list_del_init(&vma->vm_rebind_link);
+ if (!list_empty(&vma->vm_bind_link))
+ list_move_tail(&vma->vm_bind_link, &vm->vm_bind_list);
+ }
+ spin_unlock(&vm->vm_rebind_lock);
+}
+
static int eb_lookup_vma_all(struct i915_execbuffer *eb)
{
unsigned int i, current_batch = 0;
@@ -146,11 +171,119 @@ static int eb_lookup_vma_all(struct i915_execbuffer *eb)
++current_batch;
}
+ eb_scoop_unbound_vma_all(eb->context->vm);
+
return 0;
}
+static int eb_lock_vma_all(struct i915_execbuffer *eb)
+{
+ struct i915_address_space *vm = eb->context->vm;
+ struct i915_vma *vma;
+ int err;
+
+ err = i915_gem_object_lock(eb->context->vm->root_obj, &eb->ww);
+ if (err)
+ return err;
+
+ list_for_each_entry(vma, &vm->non_priv_vm_bind_list,
+ non_priv_vm_bind_link) {
+ err = i915_gem_object_lock(vma->obj, &eb->ww);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void eb_release_persistent_vma_all(struct i915_execbuffer *eb,
+ bool final)
+{
+ struct i915_address_space *vm = eb->context->vm;
+ struct i915_vma *vma, *vn;
+
+ lockdep_assert_held(&vm->vm_bind_lock);
+
+ if (!(eb->args->flags & __EXEC3_HAS_PIN))
+ return;
+
+ assert_object_held(vm->root_obj);
+
+ list_for_each_entry(vma, &vm->vm_bind_list, vm_bind_link)
+ __i915_vma_unpin(vma);
+
+ eb->args->flags &= ~__EXEC3_HAS_PIN;
+ if (!final)
+ return;
+
+ list_for_each_entry_safe(vma, vn, &vm->vm_bind_list, vm_bind_link)
+ if (i915_vma_is_bind_complete(vma))
+ list_move_tail(&vma->vm_bind_link, &vm->vm_bound_list);
+}
+
static void eb_release_vma_all(struct i915_execbuffer *eb, bool final)
{
+ eb_release_persistent_vma_all(eb, final);
+ eb_unpin_engine(eb);
+}
+
+static int eb_reserve_fence_for_persistent_vma_all(struct i915_execbuffer *eb)
+{
+ struct i915_address_space *vm = eb->context->vm;
+ struct i915_vma *vma;
+ int ret;
+
+ ret = dma_resv_reserve_fences(vm->root_obj->base.resv, 1);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(vma, &vm->non_priv_vm_bind_list,
+ non_priv_vm_bind_link) {
+ ret = dma_resv_reserve_fences(vma->obj->base.resv, 1);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int eb_validate_persistent_vma_all(struct i915_execbuffer *eb)
+{
+ struct i915_address_space *vm = eb->context->vm;
+ struct i915_vma *vma, *last_pinned_vma = NULL;
+ int ret = 0;
+
+ lockdep_assert_held(&vm->vm_bind_lock);
+ assert_object_held(vm->root_obj);
+
+ ret = eb_reserve_fence_for_persistent_vma_all(eb);
+ if (ret)
+ return ret;
+
+ if (list_empty(&vm->vm_bind_list))
+ return 0;
+
+ list_for_each_entry(vma, &vm->vm_bind_list, vm_bind_link) {
+ u64 pin_flags = vma->start | PIN_OFFSET_FIXED | PIN_USER;
+
+ ret = i915_vma_pin_ww(vma, &eb->ww, 0, 0, pin_flags);
+ if (ret)
+ break;
+
+ last_pinned_vma = vma;
+ }
+
+ if (ret && last_pinned_vma) {
+ list_for_each_entry(vma, &vm->vm_bind_list, vm_bind_link) {
+ __i915_vma_unpin(vma);
+ if (vma == last_pinned_vma)
+ break;
+ }
+ } else if (last_pinned_vma) {
+ eb->args->flags |= __EXEC3_HAS_PIN;
+ }
+
+ return ret;
}
static int eb_validate_vma_all(struct i915_execbuffer *eb)
@@ -160,6 +293,12 @@ static int eb_validate_vma_all(struct i915_execbuffer *eb)
int err;
err = eb_pin_engine(eb, throttle);
+ if (!err)
+ err = eb_lock_vma_all(eb);
+
+ if (!err)
+ err = eb_validate_persistent_vma_all(eb);
+
if (!err)
return 0;
@@ -189,8 +328,43 @@ static int eb_validate_vma_all(struct i915_execbuffer *eb)
BUILD_BUG_ON(!typecheck(int, _i)); \
for ((_i) = (_eb)->num_batches - 1; (_i) >= 0; --(_i))
+static void __eb_persistent_add_shared_fence(struct drm_i915_gem_object *obj,
+ struct dma_fence *fence)
+{
+ dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_BOOKKEEP);
+ obj->write_domain = 0;
+ obj->read_domains |= I915_GEM_GPU_DOMAINS;
+ obj->mm.dirty = true;
+}
+
+static void eb_persistent_add_shared_fence(struct i915_execbuffer *eb)
+{
+ struct i915_address_space *vm = eb->context->vm;
+ struct dma_fence *fence;
+ struct i915_vma *vma;
+
+ fence = eb->composite_fence ? eb->composite_fence :
+ &eb->requests[0]->fence;
+
+ __eb_persistent_add_shared_fence(vm->root_obj, fence);
+ list_for_each_entry(vma, &vm->non_priv_vm_bind_list,
+ non_priv_vm_bind_link)
+ __eb_persistent_add_shared_fence(vma->obj, fence);
+}
+
+static void eb_move_all_persistent_vma_to_active(struct i915_execbuffer *eb)
+{
+ /* Add fence to BOs dma-resv fence list */
+ eb_persistent_add_shared_fence(eb);
+}
+
static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
+ lockdep_assert_held(&eb->context->vm->vm_bind_lock);
+ assert_object_held(eb->context->vm->root_obj);
+
+ eb_move_all_persistent_vma_to_active(eb);
+
/* Unconditionally flush any chipset caches (for streaming writes). */
intel_gt_chipset_flush(eb->gt);
@@ -381,6 +555,30 @@ static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle)
return err;
}
+static void eb_unpin_engine(struct i915_execbuffer *eb)
+{
+ struct intel_context *ce = eb->context, *child;
+
+ if (!(eb->args->flags & __EXEC3_ENGINE_PINNED))
+ return;
+
+ eb->args->flags &= ~__EXEC3_ENGINE_PINNED;
+
+ for_each_child(ce, child) {
+ mutex_lock(&child->timeline->mutex);
+ intel_context_exit(child);
+ mutex_unlock(&child->timeline->mutex);
+
+ intel_context_unpin(child);
+ }
+
+ mutex_lock(&ce->timeline->mutex);
+ intel_context_exit(ce);
+ mutex_unlock(&ce->timeline->mutex);
+
+ intel_context_unpin(ce);
+}
+
static int
eb_select_engine(struct i915_execbuffer *eb)
{