@@ -2390,6 +2390,7 @@ await_fence_array(struct i915_execbuffer *eb)
for (n = 0; n < eb->n_fences; n++) {
struct drm_syncobj *syncobj;
+ struct dma_fence_chain *chain;
struct dma_fence *fence;
unsigned int flags;
@@ -2410,7 +2411,43 @@ await_fence_array(struct i915_execbuffer *eb)
continue;
}
- err = i915_request_await_dma_fence(eb->request, fence);
+ chain = to_dma_fence_chain(fence);
+ if (chain) {
+ struct dma_fence *iter;
+
+ /*
+ * If we're dealing with a dma-fence-chain, peel the
+ * chain by adding all of the unsignaled fences
+ * (dma_fence_chain_for_each does that for us) the
+ * chain points to.
+ *
+ * This enables us to identify waits on i915 fences
+ * and allows for faster engine-to-engine
+ * synchronization using HW semaphores.
+ */
+ dma_fence_chain_for_each(iter, fence) {
+ struct dma_fence_chain *iter_chain =
+ to_dma_fence_chain(iter);
+
+ /*
+ * It is possible that the last item in the
+ * chain is not a dma_fence_chain.
+ */
+ if (iter_chain) {
+ err = i915_request_await_dma_fence(eb->request,
+ iter_chain->fence);
+ } else {
+ err = i915_request_await_dma_fence(eb->request, iter);
+ }
+ if (err < 0) {
+ dma_fence_put(iter);
+ break;
+ }
+ }
+ } else {
+ err = i915_request_await_dma_fence(eb->request, fence);
+ }
+
dma_fence_put(fence);
if (err < 0)
return err;