diff mbox

[29/53] drm/i915/bdw: Emission of requests with logical rings

Message ID 1402673891-14618-30-git-send-email-oscar.mateo@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

oscar.mateo@intel.com June 13, 2014, 3:37 p.m. UTC
From: Oscar Mateo <oscar.mateo@intel.com>

Also known as __i915_add_request's evil twin.

On seqno preallocation, we set the request context information
correctly so that we can retrieve it both when we want to emit
or retire the request.

This is a candidate to be abstracted away (so that it replaces
__i915_add_request seamlessly).

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c  |  18 +++++-
 drivers/gpu/drm/i915/intel_lrc.c | 116 ++++++++++++++++++++++++++++++++++++++-
 drivers/gpu/drm/i915/intel_lrc.h |   4 ++
 3 files changed, 135 insertions(+), 3 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dcdffab..69db71a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2320,6 +2320,9 @@  int __i915_add_request(struct intel_engine_cs *ring,
 	u32 request_ring_position, request_start;
 	int ret;
 
+	if (intel_enable_execlists(ring->dev))
+		return intel_logical_ring_add_request(ring, file, obj, out_seqno);
+
 	request_start = intel_ring_get_tail(ring->buffer);
 	/*
 	 * Emit any outstanding flushes - execbuf can fail to emit the flush
@@ -2620,6 +2623,7 @@  i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 
 	while (!list_empty(&ring->request_list)) {
 		struct drm_i915_gem_request *request;
+		struct intel_ringbuffer *ringbuf;
 
 		request = list_first_entry(&ring->request_list,
 					   struct drm_i915_gem_request,
@@ -2629,12 +2633,24 @@  i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 			break;
 
 		trace_i915_gem_request_retire(ring, request->seqno);
+
+		/* This is one of the few common intersection points
+		 * between legacy ringbuffer submission and execlists:
+		 * we need to tell them apart in order to find the correct
+		 * ringbuffer to which the request belongs to.
+		 */
+		if (intel_enable_execlists(ring->dev)) {
+			struct intel_context *ctx = request->ctx;
+			ringbuf = ctx->engine[ring->id].ringbuf;
+		} else
+			ringbuf = ring->buffer;
+
 		/* We know the GPU must have read the request to have
 		 * sent us the seqno + interrupt, so use the position
 		 * of tail of the request to update the last known position
 		 * of the GPU head.
 		 */
-		ring->buffer->last_retired_head = request->tail;
+		ringbuf->last_retired_head = request->tail;
 
 		i915_gem_free_request(request);
 	}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3d7fcd6..051e150 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -92,6 +92,113 @@  logical_ringbuf_get(struct intel_engine_cs *ring, struct intel_context *ctx)
 	return ctx->engine[ring->id].ringbuf;
 }
 
+static int logical_ring_flush_all_caches(struct intel_engine_cs *ring,
+					 struct intel_context *ctx)
+{
+	int ret;
+
+	if (!ring->gpu_caches_dirty)
+		return 0;
+
+	ret = ring->emit_flush(ring, ctx, 0, I915_GEM_GPU_DOMAINS);
+	if (ret)
+		return ret;
+
+	ring->gpu_caches_dirty = false;
+	return 0;
+}
+
+int intel_logical_ring_add_request(struct intel_engine_cs *ring,
+				   struct drm_file *file,
+				   struct drm_i915_gem_object *obj,
+				   u32 *out_seqno)
+{
+	struct drm_i915_private *dev_priv = ring->dev->dev_private;
+	struct intel_context *ctx;
+	struct intel_ringbuffer *ringbuf;
+	struct drm_i915_gem_request *request;
+	u32 request_ring_position, request_start;
+	int ret;
+
+	request = ring->preallocated_lazy_request;
+	if (WARN_ON(request == NULL))
+		return -ENOMEM;
+
+	/* We pre-recorded which context the request belongs to */
+	ctx = request->ctx;
+	if (WARN_ON(ctx == NULL))
+		return -EINVAL;
+	ringbuf = logical_ringbuf_get(ring, ctx);
+
+	request_start = intel_ring_get_tail(ringbuf);
+	/*
+	 * Emit any outstanding flushes - execbuf can fail to emit the flush
+	 * after having emitted the batchbuffer command. Hence we need to fix
+	 * things up similar to emitting the lazy request. The difference here
+	 * is that the flush _must_ happen before the next request, no matter
+	 * what.
+	 */
+	ret = logical_ring_flush_all_caches(ring, ctx);
+	if (ret)
+		return ret;
+
+	/* Record the position of the start of the request so that
+	 * should we detect the updated seqno part-way through the
+	 * GPU processing the request, we never over-estimate the
+	 * position of the head.
+	 */
+	request_ring_position = intel_ring_get_tail(ringbuf);
+
+	ret = ring->emit_request(ring, ctx);
+	if (ret)
+		return ret;
+
+	request->seqno = intel_ring_get_seqno(ring);
+	request->ring = ring;
+	request->head = request_start;
+	request->tail = request_ring_position;
+
+	/* Whilst this request exists, batch_obj will be on the
+	 * active_list, and so will hold the active reference. Only when this
+	 * request is retired will the the batch_obj be moved onto the
+	 * inactive_list and lose its active reference. Hence we do not need
+	 * to explicitly hold another reference here.
+	 */
+	request->batch_obj = obj;
+
+	request->emitted_jiffies = jiffies;
+	list_add_tail(&request->list, &ring->request_list);
+	request->file_priv = NULL;
+
+	if (file) {
+		struct drm_i915_file_private *file_priv = file->driver_priv;
+		WARN_ON(file_priv != ctx->file_priv);
+
+		spin_lock(&file_priv->mm.lock);
+		request->file_priv = file_priv;
+		list_add_tail(&request->client_list,
+			      &file_priv->mm.request_list);
+		spin_unlock(&file_priv->mm.lock);
+	}
+
+	ring->outstanding_lazy_seqno = 0;
+	ring->preallocated_lazy_request = NULL;
+
+	if (!dev_priv->ums.mm_suspended) {
+		i915_queue_hangcheck(ring->dev);
+
+		cancel_delayed_work_sync(&dev_priv->mm.idle_work);
+		queue_delayed_work(dev_priv->wq,
+				   &dev_priv->mm.retire_work,
+				   round_jiffies_up_relative(HZ));
+		intel_mark_busy(dev_priv->dev);
+	}
+
+	if (out_seqno)
+		*out_seqno = request->seqno;
+	return 0;
+}
+
 void intel_logical_ring_advance_and_submit(struct intel_engine_cs *ring,
 					   struct intel_context *ctx)
 {
@@ -118,6 +225,13 @@  static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
 		if (request == NULL)
 			return -ENOMEM;
 
+		/* Hold a reference to the context this request belongs to
+		 * (we will need it when the time comes to emit/retire the
+		 * request).
+		 */
+		request->ctx = ctx;
+		i915_gem_context_reference(request->ctx);
+
 		ring->preallocated_lazy_request = request;
 	}
 
@@ -157,8 +271,6 @@  static int logical_ring_wait_request(struct intel_engine_cs *ring,
 	if (ret)
 		return ret;
 
-	/* TODO: make sure we update the right ringbuffer's last_retired_head
-	 * when retiring requests */
 	i915_gem_retire_requests_ring(ring);
 	ringbuf->head = ringbuf->last_retired_head;
 	ringbuf->last_retired_head = -1;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 686ebf5..4495359 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -5,6 +5,10 @@ 
 void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
 int intel_logical_rings_init(struct drm_device *dev);
 
+int intel_logical_ring_add_request(struct intel_engine_cs *ring,
+				   struct drm_file *file,
+				   struct drm_i915_gem_object *obj,
+				   u32 *out_seqno);
 void intel_logical_ring_advance_and_submit(struct intel_engine_cs *ring,
 					   struct intel_context *ctx);