Message ID | 1412604925-11290-22-git-send-email-John.C.Harrison@Intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Mon, Oct 06, 2014 at 03:15:25PM +0100, John.C.Harrison@Intel.com wrote: > From: John Harrison <John.C.Harrison@Intel.com> > > For: VIZ-4377 > Signed-off-by: John.C.Harrison@Intel.com I think this should be split up into the different parts: - s/obj->ring/obj->last_read_req->ring/ for all the cases that just want the current ring. - s/obj->ring/obj->last_read_req/ I think in a bunch of places the code would actually be more readable if we'd check for obj->active instead. - All the oddball special cases probably deserve their own commit + nice explanation in the commit message about why the change is correct. Cheers, Daniel > --- > drivers/gpu/drm/i915/i915_debugfs.c | 9 +++++---- > drivers/gpu/drm/i915/i915_drv.h | 2 -- > drivers/gpu/drm/i915/i915_gem.c | 32 +++++++++++++++++++------------ > drivers/gpu/drm/i915/i915_gem_context.c | 12 +++++++++++- > drivers/gpu/drm/i915/i915_gpu_error.c | 3 ++- > drivers/gpu/drm/i915/intel_display.c | 15 ++++++++------- > 6 files changed, 46 insertions(+), 27 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c > index df53515..b1d989f 100644 > --- a/drivers/gpu/drm/i915/i915_debugfs.c > +++ b/drivers/gpu/drm/i915/i915_debugfs.c > @@ -168,8 +168,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) > *t = '\0'; > seq_printf(m, " (%s mappable)", s); > } > - if (obj->ring != NULL) > - seq_printf(m, " (%s)", obj->ring->name); > + if (obj->last_read_req != NULL) > + seq_printf(m, " (%s)", > + i915_gem_request_get_ring(obj->last_read_req)->name); > if (obj->frontbuffer_bits) > seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); > } > @@ -336,7 +337,7 @@ static int per_file_stats(int id, void *ptr, void *data) > if (ppgtt->file_priv != stats->file_priv) > continue; > > - if (obj->ring) /* XXX per-vma statistic */ > + if (obj->last_read_req) /* XXX per-vma statistic */ > stats->active += obj->base.size; > else > stats->inactive += obj->base.size; > @@ -346,7 +347,7 @@ static int per_file_stats(int id, void *ptr, void *data) > } else { > if (i915_gem_obj_ggtt_bound(obj)) { > stats->global += obj->base.size; > - if (obj->ring) > + if (obj->last_read_req) > stats->active += obj->base.size; > else > stats->inactive += obj->base.size; > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index 0790593..cdbbdeb 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -1859,8 +1859,6 @@ struct drm_i915_gem_object { > void *dma_buf_vmapping; > int vmapping_count; > > - struct intel_engine_cs *ring; > - > /** Breadcrumb of last rendering to the buffer. */ > struct drm_i915_gem_request *last_read_req; > struct drm_i915_gem_request *last_write_req; > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index d40dad7..561fb81 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -2184,14 +2184,18 @@ static void > i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, > struct intel_engine_cs *ring) > { > - struct drm_i915_gem_request *req = intel_ring_get_request(ring); > + struct drm_i915_gem_request *req; > + struct intel_engine_cs *old_ring; > > BUG_ON(ring == NULL); > - if (obj->ring != ring && obj->last_write_req) { > + > + req = intel_ring_get_request(ring); > + old_ring = i915_gem_request_get_ring(obj->last_read_req); > + > + if (old_ring != ring && obj->last_write_req) { > /* Keep the request relative to the current ring */ > obj->last_write_req = req; > } > - obj->ring = ring; > > /* Add a reference if we're newly entering the active list. */ > if (!obj->active) { > @@ -2230,7 +2234,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) > intel_fb_obj_flush(obj, true); > > list_del_init(&obj->ring_list); > - obj->ring = NULL; > > obj->last_read_req = NULL; > obj->last_write_req = NULL; > @@ -2247,9 +2250,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) > static void > i915_gem_object_retire(struct drm_i915_gem_object *obj) > { > - struct intel_engine_cs *ring = obj->ring; > - > - if (ring == NULL) > + if (obj->last_read_req == NULL) > return; > > if (i915_gem_request_completed(obj->last_read_req, true)) > @@ -2769,14 +2770,17 @@ i915_gem_idle_work_handler(struct work_struct *work) > static int > i915_gem_object_flush_active(struct drm_i915_gem_object *obj) > { > + struct intel_engine_cs *ring; > int ret; > > if (obj->active) { > + ring = i915_gem_request_get_ring(obj->last_read_req); > + > ret = i915_gem_check_olr(obj->last_read_req); > if (ret) > return ret; > > - i915_gem_retire_requests_ring(obj->ring); > + i915_gem_retire_requests_ring(ring); > } > > return 0; > @@ -2876,9 +2880,11 @@ int > i915_gem_object_sync(struct drm_i915_gem_object *obj, > struct intel_engine_cs *to) > { > - struct intel_engine_cs *from = obj->ring; > + struct intel_engine_cs *from; > int ret, idx; > > + from = i915_gem_request_get_ring(obj->last_read_req); > + > if (from == NULL || to == from) > return 0; > > @@ -3889,7 +3895,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, > bool was_pin_display; > int ret; > > - if (pipelined != obj->ring) { > + if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) { > ret = i915_gem_object_sync(obj, pipelined); > if (ret) > return ret; > @@ -4303,9 +4309,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, > ret = i915_gem_object_flush_active(obj); > > args->busy = obj->active; > - if (obj->ring) { > + if (obj->last_read_req) { > + struct intel_engine_cs *ring; > BUILD_BUG_ON(I915_NUM_RINGS > 16); > - args->busy |= intel_ring_flag(obj->ring) << 16; > + ring = i915_gem_request_get_ring(obj->last_read_req); > + args->busy |= intel_ring_flag(ring) << 16; > } > > drm_gem_object_unreference(&obj->base); > diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c > index a5221d8..8f24831 100644 > --- a/drivers/gpu/drm/i915/i915_gem_context.c > +++ b/drivers/gpu/drm/i915/i915_gem_context.c > @@ -613,7 +613,17 @@ static int do_switch(struct intel_engine_cs *ring, > * swapped, but there is no way to do that yet. > */ > from->legacy_hw_ctx.rcs_state->dirty = 1; > - BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring); > + > + /* BUG_ON(i915_gem_request_get_ring( > + from->legacy_hw_ctx.rcs_state->last_read_req) != ring); */ > + /* NB: last_read_req has already been updated to the current > + * request however, that request has not yet been submitted. > + * Thus last_read_req->ring is guaranteed to be null! > + * NB2: Doing the check before the update of last_read_req > + * (which happens in i915_vma_move_to_active() just above), > + * also fails because last_read_req is almost always null on > + * entry! > + */ > > /* obj is kept alive until the next request by its active ref */ > i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); > diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c > index 9545d96..b9ecbd9 100644 > --- a/drivers/gpu/drm/i915/i915_gpu_error.c > +++ b/drivers/gpu/drm/i915/i915_gpu_error.c > @@ -681,7 +681,8 @@ static void capture_bo(struct drm_i915_error_buffer *err, > err->dirty = obj->dirty; > err->purgeable = obj->madv != I915_MADV_WILLNEED; > err->userptr = obj->userptr.mm != NULL; > - err->ring = obj->ring ? obj->ring->id : -1; > + err->ring = obj->last_read_req ? > + i915_gem_request_get_ring(obj->last_read_req)->id : -1; > err->cache_level = obj->cache_level; > } > > diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c > index 9ca8f94..8238aac 100644 > --- a/drivers/gpu/drm/i915/intel_display.c > +++ b/drivers/gpu/drm/i915/intel_display.c > @@ -9728,7 +9728,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, > else if (i915.enable_execlists) > return true; > else > - return ring != obj->ring; > + return ring != i915_gem_request_get_ring(obj->last_read_req); > } > > static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) > @@ -9769,8 +9769,6 @@ static int intel_postpone_flip(struct drm_i915_gem_object *obj) > if (!obj->last_write_req) > return 0; > > - ring = obj->ring; > - > if (i915_gem_request_completed(obj->last_write_req, true)) > return 0; > > @@ -9778,6 +9776,7 @@ static int intel_postpone_flip(struct drm_i915_gem_object *obj) > if (ret) > return ret; > > + ring = i915_gem_request_get_ring(obj->last_write_req); > if (WARN_ON(!ring->irq_get(ring))) > return 0; > > @@ -9837,14 +9836,15 @@ static int intel_queue_mmio_flip(struct drm_device *dev, > spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); > intel_crtc->mmio_flip.req = obj->last_write_req; > i915_gem_request_reference(intel_crtc->mmio_flip.req); > - intel_crtc->mmio_flip.ring_id = obj->ring->id; > + BUG_ON(ring != i915_gem_request_get_ring(intel_crtc->mmio_flip.req)); > + intel_crtc->mmio_flip.ring_id = ring->id; > spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); > > /* > * Double check to catch cases where irq fired before > * mmio flip data was ready > */ > - intel_notify_mmio_flip(obj->ring); > + intel_notify_mmio_flip(ring); > return 0; > } > > @@ -10022,7 +10022,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, > } else if (IS_IVYBRIDGE(dev)) { > ring = &dev_priv->ring[BCS]; > } else if (INTEL_INFO(dev)->gen >= 7) { > - ring = obj->ring; > + ring = i915_gem_request_get_ring(obj->last_read_req); > if (ring == NULL || ring->id != RCS) > ring = &dev_priv->ring[BCS]; > } else { > @@ -10043,7 +10043,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, > goto cleanup_unpin; > > work->flip_queued_req = obj->last_write_req; > - work->flip_queued_ring = obj->ring; > + work->flip_queued_ring = > + i915_gem_request_get_ring(obj->last_write_req); > } else { > ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, > page_flip_flags); > -- > 1.7.9.5 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
On 19/10/2014 15:12, Daniel Vetter wrote: > On Mon, Oct 06, 2014 at 03:15:25PM +0100, John.C.Harrison@Intel.com wrote: >> From: John Harrison <John.C.Harrison@Intel.com> >> >> For: VIZ-4377 >> Signed-off-by: John.C.Harrison@Intel.com > I think this should be split up into the different parts: > > - s/obj->ring/obj->last_read_req->ring/ for all the cases that just want > the current ring. > - s/obj->ring/obj->last_read_req/ I think in a bunch of places the code > would actually be more readable if we'd check for obj->active instead. > - All the oddball special cases probably deserve their own commit + nice > explanation in the commit message about why the change is correct. Can you explain which is which? As for why the change is correct, it is not a functional change. obj->ring was assigned with obj->last_read_seqno and cleared with obj->last_read_seqno. Thus querying obj->ring is equivalent to querying obj->last_read_req->ring in all cases. Given that the ring is now available from obj->lrr, it seemed redundant to also have it explicitly saved in obj->ring, hence the patch to remove it. AFAICT, there are two examples in debugfs that should just be querying obj->active. The rest are pretty much wanting the currently in use ring and/or are about to use last_read_req anyway. It seems more sensible to say 'if(obj->lrr) { do_stuff(obj->lrr); }' than 'if(obj->active) { do_stuff(obj->lrr); }'. Nothing looks particularly 'oddball' to me! > Cheers, Daniel >> --- >> drivers/gpu/drm/i915/i915_debugfs.c | 9 +++++---- >> drivers/gpu/drm/i915/i915_drv.h | 2 -- >> drivers/gpu/drm/i915/i915_gem.c | 32 +++++++++++++++++++------------ >> drivers/gpu/drm/i915/i915_gem_context.c | 12 +++++++++++- >> drivers/gpu/drm/i915/i915_gpu_error.c | 3 ++- >> drivers/gpu/drm/i915/intel_display.c | 15 ++++++++------- >> 6 files changed, 46 insertions(+), 27 deletions(-) >> >> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c >> index df53515..b1d989f 100644 >> --- a/drivers/gpu/drm/i915/i915_debugfs.c >> +++ b/drivers/gpu/drm/i915/i915_debugfs.c >> @@ -168,8 +168,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) >> *t = '\0'; >> seq_printf(m, " (%s mappable)", s); >> } >> - if (obj->ring != NULL) >> - seq_printf(m, " (%s)", obj->ring->name); >> + if (obj->last_read_req != NULL) >> + seq_printf(m, " (%s)", >> + i915_gem_request_get_ring(obj->last_read_req)->name); >> if (obj->frontbuffer_bits) >> seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); >> } >> @@ -336,7 +337,7 @@ static int per_file_stats(int id, void *ptr, void *data) >> if (ppgtt->file_priv != stats->file_priv) >> continue; >> >> - if (obj->ring) /* XXX per-vma statistic */ >> + if (obj->last_read_req) /* XXX per-vma statistic */ >> stats->active += obj->base.size; >> else >> stats->inactive += obj->base.size; >> @@ -346,7 +347,7 @@ static int per_file_stats(int id, void *ptr, void *data) >> } else { >> if (i915_gem_obj_ggtt_bound(obj)) { >> stats->global += obj->base.size; >> - if (obj->ring) >> + if (obj->last_read_req) >> stats->active += obj->base.size; >> else >> stats->inactive += obj->base.size; >> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h >> index 0790593..cdbbdeb 100644 >> --- a/drivers/gpu/drm/i915/i915_drv.h >> +++ b/drivers/gpu/drm/i915/i915_drv.h >> @@ -1859,8 +1859,6 @@ struct drm_i915_gem_object { >> void *dma_buf_vmapping; >> int vmapping_count; >> >> - struct intel_engine_cs *ring; >> - >> /** Breadcrumb of last rendering to the buffer. */ >> struct drm_i915_gem_request *last_read_req; >> struct drm_i915_gem_request *last_write_req; >> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c >> index d40dad7..561fb81 100644 >> --- a/drivers/gpu/drm/i915/i915_gem.c >> +++ b/drivers/gpu/drm/i915/i915_gem.c >> @@ -2184,14 +2184,18 @@ static void >> i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, >> struct intel_engine_cs *ring) >> { >> - struct drm_i915_gem_request *req = intel_ring_get_request(ring); >> + struct drm_i915_gem_request *req; >> + struct intel_engine_cs *old_ring; >> >> BUG_ON(ring == NULL); >> - if (obj->ring != ring && obj->last_write_req) { >> + >> + req = intel_ring_get_request(ring); >> + old_ring = i915_gem_request_get_ring(obj->last_read_req); >> + >> + if (old_ring != ring && obj->last_write_req) { >> /* Keep the request relative to the current ring */ >> obj->last_write_req = req; >> } >> - obj->ring = ring; >> >> /* Add a reference if we're newly entering the active list. */ >> if (!obj->active) { >> @@ -2230,7 +2234,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) >> intel_fb_obj_flush(obj, true); >> >> list_del_init(&obj->ring_list); >> - obj->ring = NULL; >> >> obj->last_read_req = NULL; >> obj->last_write_req = NULL; >> @@ -2247,9 +2250,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) >> static void >> i915_gem_object_retire(struct drm_i915_gem_object *obj) >> { >> - struct intel_engine_cs *ring = obj->ring; >> - >> - if (ring == NULL) >> + if (obj->last_read_req == NULL) >> return; >> >> if (i915_gem_request_completed(obj->last_read_req, true)) >> @@ -2769,14 +2770,17 @@ i915_gem_idle_work_handler(struct work_struct *work) >> static int >> i915_gem_object_flush_active(struct drm_i915_gem_object *obj) >> { >> + struct intel_engine_cs *ring; >> int ret; >> >> if (obj->active) { >> + ring = i915_gem_request_get_ring(obj->last_read_req); >> + >> ret = i915_gem_check_olr(obj->last_read_req); >> if (ret) >> return ret; >> >> - i915_gem_retire_requests_ring(obj->ring); >> + i915_gem_retire_requests_ring(ring); >> } >> >> return 0; >> @@ -2876,9 +2880,11 @@ int >> i915_gem_object_sync(struct drm_i915_gem_object *obj, >> struct intel_engine_cs *to) >> { >> - struct intel_engine_cs *from = obj->ring; >> + struct intel_engine_cs *from; >> int ret, idx; >> >> + from = i915_gem_request_get_ring(obj->last_read_req); >> + >> if (from == NULL || to == from) >> return 0; >> >> @@ -3889,7 +3895,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, >> bool was_pin_display; >> int ret; >> >> - if (pipelined != obj->ring) { >> + if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) { >> ret = i915_gem_object_sync(obj, pipelined); >> if (ret) >> return ret; >> @@ -4303,9 +4309,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, >> ret = i915_gem_object_flush_active(obj); >> >> args->busy = obj->active; >> - if (obj->ring) { >> + if (obj->last_read_req) { >> + struct intel_engine_cs *ring; >> BUILD_BUG_ON(I915_NUM_RINGS > 16); >> - args->busy |= intel_ring_flag(obj->ring) << 16; >> + ring = i915_gem_request_get_ring(obj->last_read_req); >> + args->busy |= intel_ring_flag(ring) << 16; >> } >> >> drm_gem_object_unreference(&obj->base); >> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c >> index a5221d8..8f24831 100644 >> --- a/drivers/gpu/drm/i915/i915_gem_context.c >> +++ b/drivers/gpu/drm/i915/i915_gem_context.c >> @@ -613,7 +613,17 @@ static int do_switch(struct intel_engine_cs *ring, >> * swapped, but there is no way to do that yet. >> */ >> from->legacy_hw_ctx.rcs_state->dirty = 1; >> - BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring); >> + >> + /* BUG_ON(i915_gem_request_get_ring( >> + from->legacy_hw_ctx.rcs_state->last_read_req) != ring); */ >> + /* NB: last_read_req has already been updated to the current >> + * request however, that request has not yet been submitted. >> + * Thus last_read_req->ring is guaranteed to be null! >> + * NB2: Doing the check before the update of last_read_req >> + * (which happens in i915_vma_move_to_active() just above), >> + * also fails because last_read_req is almost always null on >> + * entry! >> + */ >> >> /* obj is kept alive until the next request by its active ref */ >> i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); >> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c >> index 9545d96..b9ecbd9 100644 >> --- a/drivers/gpu/drm/i915/i915_gpu_error.c >> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c >> @@ -681,7 +681,8 @@ static void capture_bo(struct drm_i915_error_buffer *err, >> err->dirty = obj->dirty; >> err->purgeable = obj->madv != I915_MADV_WILLNEED; >> err->userptr = obj->userptr.mm != NULL; >> - err->ring = obj->ring ? obj->ring->id : -1; >> + err->ring = obj->last_read_req ? >> + i915_gem_request_get_ring(obj->last_read_req)->id : -1; >> err->cache_level = obj->cache_level; >> } >> >> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c >> index 9ca8f94..8238aac 100644 >> --- a/drivers/gpu/drm/i915/intel_display.c >> +++ b/drivers/gpu/drm/i915/intel_display.c >> @@ -9728,7 +9728,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, >> else if (i915.enable_execlists) >> return true; >> else >> - return ring != obj->ring; >> + return ring != i915_gem_request_get_ring(obj->last_read_req); >> } >> >> static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) >> @@ -9769,8 +9769,6 @@ static int intel_postpone_flip(struct drm_i915_gem_object *obj) >> if (!obj->last_write_req) >> return 0; >> >> - ring = obj->ring; >> - >> if (i915_gem_request_completed(obj->last_write_req, true)) >> return 0; >> >> @@ -9778,6 +9776,7 @@ static int intel_postpone_flip(struct drm_i915_gem_object *obj) >> if (ret) >> return ret; >> >> + ring = i915_gem_request_get_ring(obj->last_write_req); >> if (WARN_ON(!ring->irq_get(ring))) >> return 0; >> >> @@ -9837,14 +9836,15 @@ static int intel_queue_mmio_flip(struct drm_device *dev, >> spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); >> intel_crtc->mmio_flip.req = obj->last_write_req; >> i915_gem_request_reference(intel_crtc->mmio_flip.req); >> - intel_crtc->mmio_flip.ring_id = obj->ring->id; >> + BUG_ON(ring != i915_gem_request_get_ring(intel_crtc->mmio_flip.req)); >> + intel_crtc->mmio_flip.ring_id = ring->id; >> spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); >> >> /* >> * Double check to catch cases where irq fired before >> * mmio flip data was ready >> */ >> - intel_notify_mmio_flip(obj->ring); >> + intel_notify_mmio_flip(ring); >> return 0; >> } >> >> @@ -10022,7 +10022,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, >> } else if (IS_IVYBRIDGE(dev)) { >> ring = &dev_priv->ring[BCS]; >> } else if (INTEL_INFO(dev)->gen >= 7) { >> - ring = obj->ring; >> + ring = i915_gem_request_get_ring(obj->last_read_req); >> if (ring == NULL || ring->id != RCS) >> ring = &dev_priv->ring[BCS]; >> } else { >> @@ -10043,7 +10043,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, >> goto cleanup_unpin; >> >> work->flip_queued_req = obj->last_write_req; >> - work->flip_queued_ring = obj->ring; >> + work->flip_queued_ring = >> + i915_gem_request_get_ring(obj->last_write_req); >> } else { >> ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, >> page_flip_flags); >> -- >> 1.7.9.5 >> >> _______________________________________________ >> Intel-gfx mailing list >> Intel-gfx@lists.freedesktop.org >> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
On Tue, Oct 28, 2014 at 03:09:07PM +0000, John Harrison wrote: > On 19/10/2014 15:12, Daniel Vetter wrote: > >On Mon, Oct 06, 2014 at 03:15:25PM +0100, John.C.Harrison@Intel.com wrote: > >>From: John Harrison <John.C.Harrison@Intel.com> > >> > >>For: VIZ-4377 > >>Signed-off-by: John.C.Harrison@Intel.com > >I think this should be split up into the different parts: > > > >- s/obj->ring/obj->last_read_req->ring/ for all the cases that just want > > the current ring. > >- s/obj->ring/obj->last_read_req/ I think in a bunch of places the code > > would actually be more readable if we'd check for obj->active instead. > >- All the oddball special cases probably deserve their own commit + nice > > explanation in the commit message about why the change is correct. > > Can you explain which is which? As for why the change is correct, it is not > a functional change. obj->ring was assigned with obj->last_read_seqno and > cleared with obj->last_read_seqno. Thus querying obj->ring is equivalent to > querying obj->last_read_req->ring in all cases. Given that the ring is now > available from obj->lrr, it seemed redundant to also have it explicitly > saved in obj->ring, hence the patch to remove it. > > AFAICT, there are two examples in debugfs that should just be querying > obj->active. The rest are pretty much wanting the currently in use ring > and/or are about to use last_read_req anyway. It seems more sensible to say > 'if(obj->lrr) { do_stuff(obj->lrr); }' than 'if(obj->active) { > do_stuff(obj->lrr); }'. Nothing looks particularly 'oddball' to me! [snip] > >>+ if (obj->last_read_req) /* XXX per-vma statistic */ > >> stats->active += obj->base.size; Stuff like this here is what I've meant. Checking for obj->active here is better, and you might as well ditch the XXX comment too. You're right that it's all semantically equivalent and your change here is the less risky since there's no way behaviour can change. But code is mostly written so that other humans can understand it (when debugging it), so conveying as much meaning as possible is important. And this looks like a good opportunity to review all the various places and make sure the code is sane. We have accumulated a bit of cruft from the per-vma rework and there's more cruft on the horizon with potentially multiple read fences, so I think this is worth it. -Daniel
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index df53515..b1d989f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -168,8 +168,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) *t = '\0'; seq_printf(m, " (%s mappable)", s); } - if (obj->ring != NULL) - seq_printf(m, " (%s)", obj->ring->name); + if (obj->last_read_req != NULL) + seq_printf(m, " (%s)", + i915_gem_request_get_ring(obj->last_read_req)->name); if (obj->frontbuffer_bits) seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); } @@ -336,7 +337,7 @@ static int per_file_stats(int id, void *ptr, void *data) if (ppgtt->file_priv != stats->file_priv) continue; - if (obj->ring) /* XXX per-vma statistic */ + if (obj->last_read_req) /* XXX per-vma statistic */ stats->active += obj->base.size; else stats->inactive += obj->base.size; @@ -346,7 +347,7 @@ static int per_file_stats(int id, void *ptr, void *data) } else { if (i915_gem_obj_ggtt_bound(obj)) { stats->global += obj->base.size; - if (obj->ring) + if (obj->last_read_req) stats->active += obj->base.size; else stats->inactive += obj->base.size; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0790593..cdbbdeb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1859,8 +1859,6 @@ struct drm_i915_gem_object { void *dma_buf_vmapping; int vmapping_count; - struct intel_engine_cs *ring; - /** Breadcrumb of last rendering to the buffer. */ struct drm_i915_gem_request *last_read_req; struct drm_i915_gem_request *last_write_req; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d40dad7..561fb81 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2184,14 +2184,18 @@ static void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, struct intel_engine_cs *ring) { - struct drm_i915_gem_request *req = intel_ring_get_request(ring); + struct drm_i915_gem_request *req; + struct intel_engine_cs *old_ring; BUG_ON(ring == NULL); - if (obj->ring != ring && obj->last_write_req) { + + req = intel_ring_get_request(ring); + old_ring = i915_gem_request_get_ring(obj->last_read_req); + + if (old_ring != ring && obj->last_write_req) { /* Keep the request relative to the current ring */ obj->last_write_req = req; } - obj->ring = ring; /* Add a reference if we're newly entering the active list. */ if (!obj->active) { @@ -2230,7 +2234,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) intel_fb_obj_flush(obj, true); list_del_init(&obj->ring_list); - obj->ring = NULL; obj->last_read_req = NULL; obj->last_write_req = NULL; @@ -2247,9 +2250,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) static void i915_gem_object_retire(struct drm_i915_gem_object *obj) { - struct intel_engine_cs *ring = obj->ring; - - if (ring == NULL) + if (obj->last_read_req == NULL) return; if (i915_gem_request_completed(obj->last_read_req, true)) @@ -2769,14 +2770,17 @@ i915_gem_idle_work_handler(struct work_struct *work) static int i915_gem_object_flush_active(struct drm_i915_gem_object *obj) { + struct intel_engine_cs *ring; int ret; if (obj->active) { + ring = i915_gem_request_get_ring(obj->last_read_req); + ret = i915_gem_check_olr(obj->last_read_req); if (ret) return ret; - i915_gem_retire_requests_ring(obj->ring); + i915_gem_retire_requests_ring(ring); } return 0; @@ -2876,9 +2880,11 @@ int i915_gem_object_sync(struct drm_i915_gem_object *obj, struct intel_engine_cs *to) { - struct intel_engine_cs *from = obj->ring; + struct intel_engine_cs *from; int ret, idx; + from = i915_gem_request_get_ring(obj->last_read_req); + if (from == NULL || to == from) return 0; @@ -3889,7 +3895,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, bool was_pin_display; int ret; - if (pipelined != obj->ring) { + if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) { ret = i915_gem_object_sync(obj, pipelined); if (ret) return ret; @@ -4303,9 +4309,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, ret = i915_gem_object_flush_active(obj); args->busy = obj->active; - if (obj->ring) { + if (obj->last_read_req) { + struct intel_engine_cs *ring; BUILD_BUG_ON(I915_NUM_RINGS > 16); - args->busy |= intel_ring_flag(obj->ring) << 16; + ring = i915_gem_request_get_ring(obj->last_read_req); + args->busy |= intel_ring_flag(ring) << 16; } drm_gem_object_unreference(&obj->base); diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index a5221d8..8f24831 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -613,7 +613,17 @@ static int do_switch(struct intel_engine_cs *ring, * swapped, but there is no way to do that yet. */ from->legacy_hw_ctx.rcs_state->dirty = 1; - BUG_ON(from->legacy_hw_ctx.rcs_state->ring != ring); + + /* BUG_ON(i915_gem_request_get_ring( + from->legacy_hw_ctx.rcs_state->last_read_req) != ring); */ + /* NB: last_read_req has already been updated to the current + * request however, that request has not yet been submitted. + * Thus last_read_req->ring is guaranteed to be null! + * NB2: Doing the check before the update of last_read_req + * (which happens in i915_vma_move_to_active() just above), + * also fails because last_read_req is almost always null on + * entry! + */ /* obj is kept alive until the next request by its active ref */ i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 9545d96..b9ecbd9 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -681,7 +681,8 @@ static void capture_bo(struct drm_i915_error_buffer *err, err->dirty = obj->dirty; err->purgeable = obj->madv != I915_MADV_WILLNEED; err->userptr = obj->userptr.mm != NULL; - err->ring = obj->ring ? obj->ring->id : -1; + err->ring = obj->last_read_req ? + i915_gem_request_get_ring(obj->last_read_req)->id : -1; err->cache_level = obj->cache_level; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 9ca8f94..8238aac 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9728,7 +9728,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, else if (i915.enable_execlists) return true; else - return ring != obj->ring; + return ring != i915_gem_request_get_ring(obj->last_read_req); } static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) @@ -9769,8 +9769,6 @@ static int intel_postpone_flip(struct drm_i915_gem_object *obj) if (!obj->last_write_req) return 0; - ring = obj->ring; - if (i915_gem_request_completed(obj->last_write_req, true)) return 0; @@ -9778,6 +9776,7 @@ static int intel_postpone_flip(struct drm_i915_gem_object *obj) if (ret) return ret; + ring = i915_gem_request_get_ring(obj->last_write_req); if (WARN_ON(!ring->irq_get(ring))) return 0; @@ -9837,14 +9836,15 @@ static int intel_queue_mmio_flip(struct drm_device *dev, spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); intel_crtc->mmio_flip.req = obj->last_write_req; i915_gem_request_reference(intel_crtc->mmio_flip.req); - intel_crtc->mmio_flip.ring_id = obj->ring->id; + BUG_ON(ring != i915_gem_request_get_ring(intel_crtc->mmio_flip.req)); + intel_crtc->mmio_flip.ring_id = ring->id; spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); /* * Double check to catch cases where irq fired before * mmio flip data was ready */ - intel_notify_mmio_flip(obj->ring); + intel_notify_mmio_flip(ring); return 0; } @@ -10022,7 +10022,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, } else if (IS_IVYBRIDGE(dev)) { ring = &dev_priv->ring[BCS]; } else if (INTEL_INFO(dev)->gen >= 7) { - ring = obj->ring; + ring = i915_gem_request_get_ring(obj->last_read_req); if (ring == NULL || ring->id != RCS) ring = &dev_priv->ring[BCS]; } else { @@ -10043,7 +10043,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, goto cleanup_unpin; work->flip_queued_req = obj->last_write_req; - work->flip_queued_ring = obj->ring; + work->flip_queued_ring = + i915_gem_request_get_ring(obj->last_write_req); } else { ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, page_flip_flags);