@@ -503,23 +503,6 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
-static inline struct intel_engine_cs *
-i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
-{
- struct intel_engine_cs *engine = NULL;
- struct dma_fence *fence;
-
- rcu_read_lock();
- fence = dma_resv_get_excl_unlocked(obj->base.resv);
- rcu_read_unlock();
-
- if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
- engine = to_request(fence)->engine;
- dma_fence_put(fence);
-
- return engine;
-}
-
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
@@ -135,6 +135,27 @@ static const char *stringify_vma_type(const struct i915_vma *vma)
return "ppgtt";
}
+static struct intel_engine_cs *
+last_write_engine(struct drm_i915_private *i915,
+ struct drm_i915_gem_object *obj)
+{
+ struct intel_engine_cs *engine = NULL;
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = dma_resv_get_excl_unlocked(obj->base.resv);
+ rcu_read_unlock();
+
+ if (fence &&
+ !dma_fence_is_signaled(fence) &&
+ dma_fence_is_i915(fence) &&
+ to_request(fence)->i915 == i915)
+ engine = to_request(fence)->engine;
+ dma_fence_put(fence);
+
+ return engine;
+}
+
void
i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
@@ -230,7 +251,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (i915_gem_object_is_framebuffer(obj))
seq_printf(m, " (fb)");
- engine = i915_gem_object_last_write_engine(obj);
+ engine = last_write_engine(dev_priv, obj);
if (engine)
seq_printf(m, " (%s)", engine->name);
}
@@ -900,6 +900,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
* hold the intel_context reference. In execlist mode the request always
* eventually points to a physical engine so this isn't an issue.
*/
+ rq->i915 = tl->gt->i915;
rq->context = intel_context_get(ce);
rq->engine = ce->engine;
rq->ring = ce->ring;
@@ -1160,6 +1161,9 @@ emit_semaphore_wait(struct i915_request *to,
const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
struct i915_sw_fence *wait = &to->submit;
+ if (to->i915 != from->i915)
+ goto await_fence;
+
if (!intel_context_use_semaphores(to->context))
goto await_fence;
@@ -1263,7 +1267,8 @@ __i915_request_await_execution(struct i915_request *to,
* immediate execution, and so we must wait until it reaches the
* active slot.
*/
- if (intel_engine_has_semaphores(to->engine) &&
+ if (to->i915 == from->i915 &&
+ intel_engine_has_semaphores(to->engine) &&
!i915_request_has_initial_breadcrumb(to)) {
err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
if (err < 0)
@@ -163,6 +163,7 @@ enum {
*/
struct i915_request {
struct dma_fence fence;
+ struct drm_i915_private *i915;
spinlock_t lock;
/**