Message ID | 1399637360-4277-41-git-send-email-oscar.mateo@intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Fri, May 09, 2014 at 01:09:10PM +0100, oscar.mateo@intel.com wrote: > From: Thomas Daniel <thomas.daniel@intel.com> > > Handle all context status events in the context status buffer on every > context switch interrupt. We only remove work from the execlist queue > after a context status buffer reports that it has completed and we only > attempt to schedule new contexts on interrupt when a previously submitted > context completes (unless no contexts are queued, which means the GPU is > free). > > Signed-off-by: Thomas Daniel <thomas.daniel@intel.com> > > v2: Unreferencing the context when we are freeing the request might free > the backing bo, which requires the struct_mutex to be grabbed, so defer > unreferencing and freeing to a bottom half. > > v3: > - Ack the interrupt inmediately, before trying to handle it (fix for > missing interrupts by Bob Beckett <robert.beckett@intel.com>). This interrupt handling change is interesting since it might explain our irq handling woes on gen5+ with the two-level GT interrupt handling scheme. Can you please roll this out as a prep patch for all the existing gt interrupt sources we handle already for gen5+? Thanks, Daniel > - Update the Context Status Buffer Read Pointer, just in case (spotted > by Damien Lespiau). > > Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> > --- > drivers/gpu/drm/i915/i915_drv.h | 3 + > drivers/gpu/drm/i915/i915_irq.c | 38 +++++++----- > drivers/gpu/drm/i915/intel_lrc.c | 102 +++++++++++++++++++++++++++++++- > drivers/gpu/drm/i915/intel_ringbuffer.c | 1 + > drivers/gpu/drm/i915/intel_ringbuffer.h | 1 + > 5 files changed, 129 insertions(+), 16 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index f2aae6a..07b8bdc 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -1748,6 +1748,8 @@ struct drm_i915_gem_request { > > /** execlist queue entry for this request */ > struct list_head execlist_link; > + /** Struct to handle this request in the bottom half of an interrupt */ > + struct work_struct work; > }; > > struct drm_i915_file_private { > @@ -2449,6 +2451,7 @@ static inline u32 intel_get_lr_contextid(struct drm_i915_gem_object *ctx_obj) > int gen8_switch_context_queue(struct intel_engine *ring, > struct i915_hw_context *to, > u32 tail); > +void gen8_handle_context_events(struct intel_engine *ring); > > /* i915_gem_evict.c */ > int __must_check i915_gem_evict_something(struct drm_device *dev, > diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c > index a28cf6c..fbffead 100644 > --- a/drivers/gpu/drm/i915/i915_irq.c > +++ b/drivers/gpu/drm/i915/i915_irq.c > @@ -1300,6 +1300,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, > struct drm_i915_private *dev_priv, > u32 master_ctl) > { > + struct intel_engine *ring; > u32 rcs, bcs, vcs, vecs; > uint32_t tmp = 0; > irqreturn_t ret = IRQ_NONE; > @@ -1307,16 +1308,22 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, > if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { > tmp = I915_READ(GEN8_GT_IIR(0)); > if (tmp) { > + I915_WRITE(GEN8_GT_IIR(0), tmp); > ret = IRQ_HANDLED; > + > rcs = tmp >> GEN8_RCS_IRQ_SHIFT; > - bcs = tmp >> GEN8_BCS_IRQ_SHIFT; > + ring = &dev_priv->ring[RCS]; > if (rcs & GT_RENDER_USER_INTERRUPT) > - notify_ring(dev, &dev_priv->ring[RCS]); > + notify_ring(dev, ring); > + if (rcs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT) > + gen8_handle_context_events(ring); > + > + bcs = tmp >> GEN8_BCS_IRQ_SHIFT; > + ring = &dev_priv->ring[BCS]; > if (bcs & GT_RENDER_USER_INTERRUPT) > - notify_ring(dev, &dev_priv->ring[BCS]); > - if ((rcs | bcs) & GEN8_GT_CONTEXT_SWITCH_INTERRUPT) > - DRM_DEBUG_DRIVER("TODO: Context switch\n"); > - I915_WRITE(GEN8_GT_IIR(0), tmp); > + notify_ring(dev, ring); > + if (bcs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT) > + gen8_handle_context_events(ring); > } else > DRM_ERROR("The master control interrupt lied (GT0)!\n"); > } > @@ -1324,18 +1331,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, > if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { > tmp = I915_READ(GEN8_GT_IIR(1)); > if (tmp) { > + I915_WRITE(GEN8_GT_IIR(1), tmp); > ret = IRQ_HANDLED; > vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; > + ring = &dev_priv->ring[VCS]; > if (vcs & GT_RENDER_USER_INTERRUPT) > - notify_ring(dev, &dev_priv->ring[VCS]); > + notify_ring(dev, ring); > if (vcs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT) > - DRM_DEBUG_DRIVER("TODO: Context switch\n"); > + gen8_handle_context_events(ring); > vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; > + ring = &dev_priv->ring[VCS2]; > if (vcs & GT_RENDER_USER_INTERRUPT) > - notify_ring(dev, &dev_priv->ring[VCS2]); > + notify_ring(dev, ring); > if (vcs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT) > - DRM_DEBUG_DRIVER("TODO: Context switch\n"); > - I915_WRITE(GEN8_GT_IIR(1), tmp); > + gen8_handle_context_events(ring); > } else > DRM_ERROR("The master control interrupt lied (GT1)!\n"); > } > @@ -1343,13 +1352,14 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, > if (master_ctl & GEN8_GT_VECS_IRQ) { > tmp = I915_READ(GEN8_GT_IIR(3)); > if (tmp) { > + I915_WRITE(GEN8_GT_IIR(3), tmp); > ret = IRQ_HANDLED; > vecs = tmp >> GEN8_VECS_IRQ_SHIFT; > + ring = &dev_priv->ring[VECS]; > if (vecs & GT_RENDER_USER_INTERRUPT) > - notify_ring(dev, &dev_priv->ring[VECS]); > + notify_ring(dev, ring); > if (vecs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT) > - DRM_DEBUG_DRIVER("TODO: Context switch\n"); > - I915_WRITE(GEN8_GT_IIR(3), tmp); > + gen8_handle_context_events(ring); > } else > DRM_ERROR("The master control interrupt lied (GT3)!\n"); > } > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c > index 6da7db9..1ff493a 100644 > --- a/drivers/gpu/drm/i915/intel_lrc.c > +++ b/drivers/gpu/drm/i915/intel_lrc.c > @@ -49,6 +49,22 @@ > #define RING_ELSP(ring) ((ring)->mmio_base+0x230) > #define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234) > #define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) > +#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370) > +#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) > + > +#define RING_EXECLIST_QFULL (1 << 0x2) > +#define RING_EXECLIST1_VALID (1 << 0x3) > +#define RING_EXECLIST0_VALID (1 << 0x4) > +#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) > +#define RING_EXECLIST1_ACTIVE (1 << 0x11) > +#define RING_EXECLIST0_ACTIVE (1 << 0x12) > + > +#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) > +#define GEN8_CTX_STATUS_PREEMPTED (1 << 1) > +#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) > +#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) > +#define GEN8_CTX_STATUS_COMPLETE (1 << 4) > +#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) > > #define CTX_LRI_HEADER_0 0x01 > #define CTX_CONTEXT_CONTROL 0x02 > @@ -203,6 +219,9 @@ static void gen8_switch_context_unqueue(struct intel_engine *ring) > { > struct drm_i915_gem_request *req0 = NULL, *req1 = NULL; > struct drm_i915_gem_request *cursor = NULL, *tmp = NULL; > + struct drm_i915_private *dev_priv = ring->dev->dev_private; > + > + assert_spin_locked(&ring->execlist_lock); > > if (list_empty(&ring->execlist_queue)) > return; > @@ -215,8 +234,7 @@ static void gen8_switch_context_unqueue(struct intel_engine *ring) > /* Same ctx: ignore first request, as second request > * will update tail past first request's workload */ > list_del(&req0->execlist_link); > - i915_gem_context_unreference(req0->ctx); > - kfree(req0); > + queue_work(dev_priv->wq, &req0->work); > req0 = cursor; > } else { > req1 = cursor; > @@ -228,6 +246,85 @@ static void gen8_switch_context_unqueue(struct intel_engine *ring) > req1? req1->ctx : NULL, req1? req1->tail : 0)); > } > > +static bool check_remove_request(struct intel_engine *ring, u32 request_id) > +{ > + struct drm_i915_private *dev_priv = ring->dev->dev_private; > + struct drm_i915_gem_request *head_req; > + > + assert_spin_locked(&ring->execlist_lock); > + > + head_req = list_first_entry_or_null(&ring->execlist_queue, > + struct drm_i915_gem_request, execlist_link); > + if (head_req != NULL) { > + struct drm_i915_gem_object *ctx_obj = > + head_req->ctx->engine[ring->id].obj; > + if (intel_get_lr_contextid(ctx_obj) == request_id) { > + list_del(&head_req->execlist_link); > + queue_work(dev_priv->wq, &head_req->work); > + return true; > + } > + } > + > + return false; > +} > + > +void gen8_handle_context_events(struct intel_engine *ring) > +{ > + struct drm_i915_private *dev_priv = ring->dev->dev_private; > + u32 status_pointer; > + u8 read_pointer; > + u8 write_pointer; > + u32 status; > + u32 status_id; > + u32 submit_contexts = 0; > + > + status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); > + > + read_pointer = ring->next_context_status_buffer; > + write_pointer = status_pointer & 0x07; > + if (read_pointer > write_pointer) > + write_pointer += 6; > + > + spin_lock(&ring->execlist_lock); > + > + while (read_pointer < write_pointer) { > + read_pointer++; > + status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + > + (read_pointer % 6) * 8); > + status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + > + (read_pointer % 6) * 8 + 4); > + > + if (status & GEN8_CTX_STATUS_COMPLETE) { > + if (check_remove_request(ring, status_id)) > + submit_contexts++; > + } > + } > + > + if (submit_contexts != 0) > + gen8_switch_context_unqueue(ring); > + > + spin_unlock(&ring->execlist_lock); > + > + WARN(submit_contexts > 2, "More than two context complete events?\n"); > + ring->next_context_status_buffer = write_pointer % 6; > + > + I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), > + ((u32)ring->next_context_status_buffer & 0x07) << 8); > +} > + > +static void free_request_task(struct work_struct *work) > +{ > + struct drm_i915_gem_request *req = > + container_of(work, struct drm_i915_gem_request, work); > + struct drm_device *dev = req->ring->dev; > + > + mutex_lock(&dev->struct_mutex); > + i915_gem_context_unreference(req->ctx); > + mutex_unlock(&dev->struct_mutex); > + > + kfree(req); > +} > + > int gen8_switch_context_queue(struct intel_engine *ring, > struct i915_hw_context *to, > u32 tail) > @@ -243,6 +340,7 @@ int gen8_switch_context_queue(struct intel_engine *ring, > req->ctx = to; > i915_gem_context_reference(req->ctx); > req->tail = tail; > + INIT_WORK(&req->work, free_request_task); > > spin_lock_irqsave(&ring->execlist_lock, flags); > > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c > index 35ced7c..9cd6ee8 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c > @@ -1573,6 +1573,7 @@ static int intel_init_ring(struct drm_device *dev, > if (ring->status_page.page_addr == NULL) > return -ENOMEM; > ring->status_page.obj = obj; > + ring->next_context_status_buffer = 0; > } else if (I915_NEED_GFX_HWS(dev)) { > ret = init_status_page(ring); > if (ret) > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h > index daf91de..f3ae547 100644 > --- a/drivers/gpu/drm/i915/intel_ringbuffer.h > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h > @@ -178,6 +178,7 @@ struct intel_engine { > > spinlock_t execlist_lock; > struct list_head execlist_queue; > + u8 next_context_status_buffer; > > struct i915_hw_context *default_context; > struct i915_hw_context *last_context; > -- > 1.9.0 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
> -----Original Message----- > From: Daniel Vetter [mailto:daniel.vetter@ffwll.ch] On Behalf Of Daniel > Vetter > Sent: Wednesday, June 11, 2014 12:52 PM > To: Mateo Lozano, Oscar > Cc: intel-gfx@lists.freedesktop.org; Daniel, Thomas > Subject: Re: [Intel-gfx] [PATCH 40/50] drm/i915/bdw: Handle context switch > events > > On Fri, May 09, 2014 at 01:09:10PM +0100, oscar.mateo@intel.com wrote: > > From: Thomas Daniel <thomas.daniel@intel.com> > > > > Handle all context status events in the context status buffer on every > > context switch interrupt. We only remove work from the execlist queue > > after a context status buffer reports that it has completed and we > > only attempt to schedule new contexts on interrupt when a previously > > submitted context completes (unless no contexts are queued, which > > means the GPU is free). > > > > Signed-off-by: Thomas Daniel <thomas.daniel@intel.com> > > > > v2: Unreferencing the context when we are freeing the request might > > free the backing bo, which requires the struct_mutex to be grabbed, so > > defer unreferencing and freeing to a bottom half. > > > > v3: > > - Ack the interrupt inmediately, before trying to handle it (fix for > > missing interrupts by Bob Beckett <robert.beckett@intel.com>). > > This interrupt handling change is interesting since it might explain our irq > handling woes on gen5+ with the two-level GT interrupt handling scheme. > Can you please roll this out as a prep patch for all the existing gt interrupt > sources we handle already for gen5+? > > Thanks, Daniel Can do.
> > > - Ack the interrupt inmediately, before trying to handle it (fix for > > > missing interrupts by Bob Beckett <robert.beckett@intel.com>). > > > > This interrupt handling change is interesting since it might explain > > our irq handling woes on gen5+ with the two-level GT interrupt handling > scheme. > > Can you please roll this out as a prep patch for all the existing gt > > interrupt sources we handle already for gen5+? > > > > Thanks, Daniel > > Can do. One question, though: why only the GT interrupts? what about DE, PM, etc...? It looks like the BSpec is pretty clear on this: 1 - Disable Master Interrupt Control 2 - Find the category of interrupt that is pending 3 - Find the source(s) of the interrupt and ***clear the Interrupt Identity bits (IIR)*** 4 - Process the interrupt(s) that had bits set in the IIRs 5 - Re-enable Master Interrupt Control
On Wed, Jun 11, 2014 at 03:23:33PM +0000, Mateo Lozano, Oscar wrote: > > > > - Ack the interrupt inmediately, before trying to handle it (fix for > > > > missing interrupts by Bob Beckett <robert.beckett@intel.com>). > > > > > > This interrupt handling change is interesting since it might explain > > > our irq handling woes on gen5+ with the two-level GT interrupt handling > > scheme. > > > Can you please roll this out as a prep patch for all the existing gt > > > interrupt sources we handle already for gen5+? > > > > > > Thanks, Daniel > > > > Can do. > > One question, though: why only the GT interrupts? what about DE, PM, etc...? > > It looks like the BSpec is pretty clear on this: > > 1 - Disable Master Interrupt Control > 2 - Find the category of interrupt that is pending > 3 - Find the source(s) of the interrupt and ***clear the Interrupt Identity bits (IIR)*** > 4 - Process the interrupt(s) that had bits set in the IIRs > 5 - Re-enable Master Interrupt Control Yeah, makes sense to do it for all. When I've looked at it the funky part are the SDE interrupts where we (at least on pre-bdw) have this crazy hack. I guess at least that one we should leave in since apparently it works. -Daniel
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f2aae6a..07b8bdc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1748,6 +1748,8 @@ struct drm_i915_gem_request { /** execlist queue entry for this request */ struct list_head execlist_link; + /** Struct to handle this request in the bottom half of an interrupt */ + struct work_struct work; }; struct drm_i915_file_private { @@ -2449,6 +2451,7 @@ static inline u32 intel_get_lr_contextid(struct drm_i915_gem_object *ctx_obj) int gen8_switch_context_queue(struct intel_engine *ring, struct i915_hw_context *to, u32 tail); +void gen8_handle_context_events(struct intel_engine *ring); /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a28cf6c..fbffead 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1300,6 +1300,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, struct drm_i915_private *dev_priv, u32 master_ctl) { + struct intel_engine *ring; u32 rcs, bcs, vcs, vecs; uint32_t tmp = 0; irqreturn_t ret = IRQ_NONE; @@ -1307,16 +1308,22 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { tmp = I915_READ(GEN8_GT_IIR(0)); if (tmp) { + I915_WRITE(GEN8_GT_IIR(0), tmp); ret = IRQ_HANDLED; + rcs = tmp >> GEN8_RCS_IRQ_SHIFT; - bcs = tmp >> GEN8_BCS_IRQ_SHIFT; + ring = &dev_priv->ring[RCS]; if (rcs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[RCS]); + notify_ring(dev, ring); + if (rcs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT) + gen8_handle_context_events(ring); + + bcs = tmp >> GEN8_BCS_IRQ_SHIFT; + ring = &dev_priv->ring[BCS]; if (bcs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[BCS]); - if ((rcs | bcs) & GEN8_GT_CONTEXT_SWITCH_INTERRUPT) - DRM_DEBUG_DRIVER("TODO: Context switch\n"); - I915_WRITE(GEN8_GT_IIR(0), tmp); + notify_ring(dev, ring); + if (bcs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT) + gen8_handle_context_events(ring); } else DRM_ERROR("The master control interrupt lied (GT0)!\n"); } @@ -1324,18 +1331,20 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { tmp = I915_READ(GEN8_GT_IIR(1)); if (tmp) { + I915_WRITE(GEN8_GT_IIR(1), tmp); ret = IRQ_HANDLED; vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; + ring = &dev_priv->ring[VCS]; if (vcs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VCS]); + notify_ring(dev, ring); if (vcs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT) - DRM_DEBUG_DRIVER("TODO: Context switch\n"); + gen8_handle_context_events(ring); vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; + ring = &dev_priv->ring[VCS2]; if (vcs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VCS2]); + notify_ring(dev, ring); if (vcs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT) - DRM_DEBUG_DRIVER("TODO: Context switch\n"); - I915_WRITE(GEN8_GT_IIR(1), tmp); + gen8_handle_context_events(ring); } else DRM_ERROR("The master control interrupt lied (GT1)!\n"); } @@ -1343,13 +1352,14 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, if (master_ctl & GEN8_GT_VECS_IRQ) { tmp = I915_READ(GEN8_GT_IIR(3)); if (tmp) { + I915_WRITE(GEN8_GT_IIR(3), tmp); ret = IRQ_HANDLED; vecs = tmp >> GEN8_VECS_IRQ_SHIFT; + ring = &dev_priv->ring[VECS]; if (vecs & GT_RENDER_USER_INTERRUPT) - notify_ring(dev, &dev_priv->ring[VECS]); + notify_ring(dev, ring); if (vecs & GEN8_GT_CONTEXT_SWITCH_INTERRUPT) - DRM_DEBUG_DRIVER("TODO: Context switch\n"); - I915_WRITE(GEN8_GT_IIR(3), tmp); + gen8_handle_context_events(ring); } else DRM_ERROR("The master control interrupt lied (GT3)!\n"); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 6da7db9..1ff493a 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -49,6 +49,22 @@ #define RING_ELSP(ring) ((ring)->mmio_base+0x230) #define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234) #define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) +#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370) +#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) + +#define RING_EXECLIST_QFULL (1 << 0x2) +#define RING_EXECLIST1_VALID (1 << 0x3) +#define RING_EXECLIST0_VALID (1 << 0x4) +#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) +#define RING_EXECLIST1_ACTIVE (1 << 0x11) +#define RING_EXECLIST0_ACTIVE (1 << 0x12) + +#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) +#define GEN8_CTX_STATUS_PREEMPTED (1 << 1) +#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) +#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) +#define GEN8_CTX_STATUS_COMPLETE (1 << 4) +#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) #define CTX_LRI_HEADER_0 0x01 #define CTX_CONTEXT_CONTROL 0x02 @@ -203,6 +219,9 @@ static void gen8_switch_context_unqueue(struct intel_engine *ring) { struct drm_i915_gem_request *req0 = NULL, *req1 = NULL; struct drm_i915_gem_request *cursor = NULL, *tmp = NULL; + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + assert_spin_locked(&ring->execlist_lock); if (list_empty(&ring->execlist_queue)) return; @@ -215,8 +234,7 @@ static void gen8_switch_context_unqueue(struct intel_engine *ring) /* Same ctx: ignore first request, as second request * will update tail past first request's workload */ list_del(&req0->execlist_link); - i915_gem_context_unreference(req0->ctx); - kfree(req0); + queue_work(dev_priv->wq, &req0->work); req0 = cursor; } else { req1 = cursor; @@ -228,6 +246,85 @@ static void gen8_switch_context_unqueue(struct intel_engine *ring) req1? req1->ctx : NULL, req1? req1->tail : 0)); } +static bool check_remove_request(struct intel_engine *ring, u32 request_id) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_gem_request *head_req; + + assert_spin_locked(&ring->execlist_lock); + + head_req = list_first_entry_or_null(&ring->execlist_queue, + struct drm_i915_gem_request, execlist_link); + if (head_req != NULL) { + struct drm_i915_gem_object *ctx_obj = + head_req->ctx->engine[ring->id].obj; + if (intel_get_lr_contextid(ctx_obj) == request_id) { + list_del(&head_req->execlist_link); + queue_work(dev_priv->wq, &head_req->work); + return true; + } + } + + return false; +} + +void gen8_handle_context_events(struct intel_engine *ring) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + u32 status_pointer; + u8 read_pointer; + u8 write_pointer; + u32 status; + u32 status_id; + u32 submit_contexts = 0; + + status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); + + read_pointer = ring->next_context_status_buffer; + write_pointer = status_pointer & 0x07; + if (read_pointer > write_pointer) + write_pointer += 6; + + spin_lock(&ring->execlist_lock); + + while (read_pointer < write_pointer) { + read_pointer++; + status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + + (read_pointer % 6) * 8); + status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + + (read_pointer % 6) * 8 + 4); + + if (status & GEN8_CTX_STATUS_COMPLETE) { + if (check_remove_request(ring, status_id)) + submit_contexts++; + } + } + + if (submit_contexts != 0) + gen8_switch_context_unqueue(ring); + + spin_unlock(&ring->execlist_lock); + + WARN(submit_contexts > 2, "More than two context complete events?\n"); + ring->next_context_status_buffer = write_pointer % 6; + + I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), + ((u32)ring->next_context_status_buffer & 0x07) << 8); +} + +static void free_request_task(struct work_struct *work) +{ + struct drm_i915_gem_request *req = + container_of(work, struct drm_i915_gem_request, work); + struct drm_device *dev = req->ring->dev; + + mutex_lock(&dev->struct_mutex); + i915_gem_context_unreference(req->ctx); + mutex_unlock(&dev->struct_mutex); + + kfree(req); +} + int gen8_switch_context_queue(struct intel_engine *ring, struct i915_hw_context *to, u32 tail) @@ -243,6 +340,7 @@ int gen8_switch_context_queue(struct intel_engine *ring, req->ctx = to; i915_gem_context_reference(req->ctx); req->tail = tail; + INIT_WORK(&req->work, free_request_task); spin_lock_irqsave(&ring->execlist_lock, flags); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 35ced7c..9cd6ee8 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1573,6 +1573,7 @@ static int intel_init_ring(struct drm_device *dev, if (ring->status_page.page_addr == NULL) return -ENOMEM; ring->status_page.obj = obj; + ring->next_context_status_buffer = 0; } else if (I915_NEED_GFX_HWS(dev)) { ret = init_status_page(ring); if (ret) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index daf91de..f3ae547 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -178,6 +178,7 @@ struct intel_engine { spinlock_t execlist_lock; struct list_head execlist_queue; + u8 next_context_status_buffer; struct i915_hw_context *default_context; struct i915_hw_context *last_context;