@@ -1455,6 +1455,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 master_ctl)
{
+ struct intel_engine_cs *ring;
u32 rcs, bcs, vcs;
uint32_t tmp = 0;
irqreturn_t ret = IRQ_NONE;
@@ -1462,16 +1463,22 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(0));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(0), tmp);
ret = IRQ_HANDLED;
+
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
- bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
+ ring = &dev_priv->ring[RCS];
if (rcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[RCS]);
+ notify_ring(dev, ring);
+ if (rcs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT)
+ intel_execlists_handle_ctx_events(ring);
+
+ bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
+ ring = &dev_priv->ring[BCS];
if (bcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[BCS]);
- if ((rcs | bcs) & GEN8_GT_CONTEXT_SWITCH_INTERRUPT)
- DRM_DEBUG_DRIVER("TODO: Context switch\n");
- I915_WRITE(GEN8_GT_IIR(0), tmp);
+ notify_ring(dev, ring);
+ if (bcs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT)
+ intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
@@ -1479,18 +1486,22 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
tmp = I915_READ(GEN8_GT_IIR(1));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(1), tmp);
ret = IRQ_HANDLED;
+
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
+ ring = &dev_priv->ring[VCS];
if (vcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
+ notify_ring(dev, ring);
if (vcs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT)
- DRM_DEBUG_DRIVER("TODO: Context switch\n");
+ intel_execlists_handle_ctx_events(ring);
+
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
+ ring = &dev_priv->ring[VCS2];
if (vcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS2]);
+ notify_ring(dev, ring);
if (vcs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT)
- DRM_DEBUG_DRIVER("TODO: Context switch\n");
- I915_WRITE(GEN8_GT_IIR(1), tmp);
+ intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
@@ -1509,13 +1520,15 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
if (master_ctl & GEN8_GT_VECS_IRQ) {
tmp = I915_READ(GEN8_GT_IIR(3));
if (tmp) {
+ I915_WRITE(GEN8_GT_IIR(3), tmp);
ret = IRQ_HANDLED;
+
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
+ ring = &dev_priv->ring[VECS];
if (vcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VECS]);
+ notify_ring(dev, ring);
if (vcs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT)
- DRM_DEBUG_DRIVER("TODO: Context switch\n");
- I915_WRITE(GEN8_GT_IIR(3), tmp);
+ intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
@@ -49,6 +49,22 @@
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
+#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
+#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
+
+#define RING_EXECLIST_QFULL (1 << 0x2)
+#define RING_EXECLIST1_VALID (1 << 0x3)
+#define RING_EXECLIST0_VALID (1 << 0x4)
+#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
+#define RING_EXECLIST1_ACTIVE (1 << 0x11)
+#define RING_EXECLIST0_ACTIVE (1 << 0x12)
+
+#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
+#define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
+#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
+#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
+#define GEN8_CTX_STATUS_COMPLETE (1 << 4)
+#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
#define CTX_LRI_HEADER_0 0x01
#define CTX_CONTEXT_CONTROL 0x02
@@ -218,6 +234,9 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
{
struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+ assert_spin_locked(&ring->execlist_lock);
if (list_empty(&ring->execlist_queue))
return;
@@ -230,8 +249,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
/* Same ctx: ignore first request, as second request
* will update tail past first request's workload */
list_del(&req0->execlist_link);
- i915_gem_context_unreference(req0->ctx);
- kfree(req0);
+ queue_work(dev_priv->wq, &req0->work);
req0 = cursor;
} else {
req1 = cursor;
@@ -243,6 +261,86 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
req1? req1->ctx : NULL, req1? req1->tail : 0));
}
+static bool execlists_check_remove_request(struct intel_engine_cs *ring,
+ u32 request_id)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct intel_ctx_submit_request *head_req;
+
+ assert_spin_locked(&ring->execlist_lock);
+
+ head_req = list_first_entry_or_null(&ring->execlist_queue,
+ struct intel_ctx_submit_request, execlist_link);
+ if (head_req != NULL) {
+ struct drm_i915_gem_object *ctx_obj =
+ head_req->ctx->engine[ring->id].obj;
+ if (intel_execlists_ctx_id(ctx_obj) == request_id) {
+ list_del(&head_req->execlist_link);
+ queue_work(dev_priv->wq, &head_req->work);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ u32 status_pointer;
+ u8 read_pointer;
+ u8 write_pointer;
+ u32 status;
+ u32 status_id;
+ u32 submit_contexts = 0;
+
+ status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
+
+ read_pointer = ring->next_context_status_buffer;
+ write_pointer = status_pointer & 0x07;
+ if (read_pointer > write_pointer)
+ write_pointer += 6;
+
+ spin_lock(&ring->execlist_lock);
+
+ while (read_pointer < write_pointer) {
+ read_pointer++;
+ status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
+ (read_pointer % 6) * 8);
+ status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
+ (read_pointer % 6) * 8 + 4);
+
+ if (status & GEN8_CTX_STATUS_COMPLETE) {
+ if (execlists_check_remove_request(ring, status_id))
+ submit_contexts++;
+ }
+ }
+
+ if (submit_contexts != 0)
+ execlists_context_unqueue(ring);
+
+ spin_unlock(&ring->execlist_lock);
+
+ WARN(submit_contexts > 2, "More than two context complete events?\n");
+ ring->next_context_status_buffer = write_pointer % 6;
+
+ I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
+ ((u32)ring->next_context_status_buffer & 0x07) << 8);
+}
+
+static void execlists_free_request_task(struct work_struct *work)
+{
+ struct intel_ctx_submit_request *req =
+ container_of(work, struct intel_ctx_submit_request, work);
+ struct drm_device *dev = req->ring->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_context_unreference(req->ctx);
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(req);
+}
+
static int execlists_context_queue(struct intel_engine_cs *ring,
struct intel_context *to,
u32 tail)
@@ -258,6 +356,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
i915_gem_context_reference(req->ctx);
req->ring = ring;
req->tail = tail;
+ INIT_WORK(&req->work, execlists_free_request_task);
spin_lock_irqsave(&ring->execlist_lock, flags);
@@ -1046,6 +1145,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
INIT_LIST_HEAD(&ring->execlist_queue);
spin_lock_init(&ring->execlist_lock);
+ ring->next_context_status_buffer = 0;
ret = intel_lr_context_deferred_create(dctx, ring);
if (ret)
@@ -50,6 +50,9 @@ struct intel_ctx_submit_request {
u32 tail;
struct list_head execlist_link;
+ struct work_struct work;
};
+void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
+
#endif /* _INTEL_LRC_H_ */
@@ -158,6 +158,7 @@ struct intel_engine_cs {
/* Execlists */
spinlock_t execlist_lock;
struct list_head execlist_queue;
+ u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
void (*submit_ctx)(struct intel_engine_cs *ring,
struct intel_context *ctx, u32 value);