Message ID | 1375315222-4785-27-git-send-email-ben@bwidawsk.net (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Wed, Jul 31, 2013 at 05:00:19PM -0700, Ben Widawsky wrote: > Even though we track object activeness and not VMA, because we have the > active_list be based on the VM, it makes the most sense to use VMAs in > the APIs. > > NOTE: Daniel intends to eventually rip out active/inactive LRUs, but for > now, leave them be. > > Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Ah, here's the patch that addresses two of my earlier questions. I guess the split was to due the execbuf conversion that needed to happen first. Looks good. -Daniel > --- > drivers/gpu/drm/i915/i915_drv.h | 5 ++--- > drivers/gpu/drm/i915/i915_gem.c | 11 +++++++++-- > drivers/gpu/drm/i915/i915_gem_context.c | 8 ++++---- > drivers/gpu/drm/i915/i915_gem_execbuffer.c | 4 +--- > 4 files changed, 16 insertions(+), 12 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index ee5164e..695f1e5 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -1735,9 +1735,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) > int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); > int i915_gem_object_sync(struct drm_i915_gem_object *obj, > struct intel_ring_buffer *to); > -void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, > - struct intel_ring_buffer *ring); > - > +void i915_vma_move_to_active(struct i915_vma *vma, > + struct intel_ring_buffer *ring); > int i915_gem_dumb_create(struct drm_file *file_priv, > struct drm_device *dev, > struct drm_mode_create_dumb *args); > diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c > index a4ba819..24c1a91 100644 > --- a/drivers/gpu/drm/i915/i915_gem.c > +++ b/drivers/gpu/drm/i915/i915_gem.c > @@ -1866,11 +1866,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) > return 0; > } > > -void > +static void > i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, > struct intel_ring_buffer *ring) > { > - struct drm_device *dev = obj->base.dev; > + struct drm_device *dev = ring->dev; > struct drm_i915_private *dev_priv = dev->dev_private; > u32 seqno = intel_ring_get_seqno(ring); > > @@ -1905,6 +1905,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, > } > } > > +void i915_vma_move_to_active(struct i915_vma *vma, > + struct intel_ring_buffer *ring) > +{ > + list_move_tail(&vma->mm_list, &vma->vm->active_list); > + return i915_gem_object_move_to_active(vma->obj, ring); > +} > + > static void > i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) > { > diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c > index 88b0f52..147399c 100644 > --- a/drivers/gpu/drm/i915/i915_gem_context.c > +++ b/drivers/gpu/drm/i915/i915_gem_context.c > @@ -436,11 +436,11 @@ static int do_switch(struct i915_hw_context *to) > * MI_SET_CONTEXT instead of when the next seqno has completed. > */ > if (from != NULL) { > - struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private; > - struct i915_address_space *ggtt = &dev_priv->gtt.base; > + struct drm_i915_private *dev_priv = ring->dev->dev_private; > + struct i915_vma *vma = > + i915_gem_obj_to_vma(from->obj, &dev_priv->gtt.base); > from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; > - list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list); > - i915_gem_object_move_to_active(from->obj, ring); > + i915_vma_move_to_active(vma, ring); > /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the > * whole damn pipeline, we don't need to explicitly mark the > * object dirty. The only exception is that the context must be > diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c > index 1c9d504..b8bb7f5 100644 > --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c > +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c > @@ -833,9 +833,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, > obj->base.read_domains = obj->base.pending_read_domains; > obj->fenced_gpu_access = obj->pending_fenced_gpu_access; > > - /* FIXME: This lookup gets fixed later <-- danvet */ > - list_move_tail(&vma->mm_list, &vma->vm->active_list); > - i915_gem_object_move_to_active(obj, ring); > + i915_vma_move_to_active(vma, ring); > if (obj->base.write_domain) { > obj->dirty = 1; > obj->last_write_seqno = intel_ring_get_seqno(ring); > -- > 1.8.3.4 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ee5164e..695f1e5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1735,9 +1735,8 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int i915_gem_object_sync(struct drm_i915_gem_object *obj, struct intel_ring_buffer *to); -void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, - struct intel_ring_buffer *ring); - +void i915_vma_move_to_active(struct i915_vma *vma, + struct intel_ring_buffer *ring); int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a4ba819..24c1a91 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1866,11 +1866,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) return 0; } -void +static void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, struct intel_ring_buffer *ring) { - struct drm_device *dev = obj->base.dev; + struct drm_device *dev = ring->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 seqno = intel_ring_get_seqno(ring); @@ -1905,6 +1905,13 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, } } +void i915_vma_move_to_active(struct i915_vma *vma, + struct intel_ring_buffer *ring) +{ + list_move_tail(&vma->mm_list, &vma->vm->active_list); + return i915_gem_object_move_to_active(vma->obj, ring); +} + static void i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 88b0f52..147399c 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -436,11 +436,11 @@ static int do_switch(struct i915_hw_context *to) * MI_SET_CONTEXT instead of when the next seqno has completed. */ if (from != NULL) { - struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private; - struct i915_address_space *ggtt = &dev_priv->gtt.base; + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct i915_vma *vma = + i915_gem_obj_to_vma(from->obj, &dev_priv->gtt.base); from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; - list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list); - i915_gem_object_move_to_active(from->obj, ring); + i915_vma_move_to_active(vma, ring); /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the * whole damn pipeline, we don't need to explicitly mark the * object dirty. The only exception is that the context must be diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1c9d504..b8bb7f5 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -833,9 +833,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, obj->base.read_domains = obj->base.pending_read_domains; obj->fenced_gpu_access = obj->pending_fenced_gpu_access; - /* FIXME: This lookup gets fixed later <-- danvet */ - list_move_tail(&vma->mm_list, &vma->vm->active_list); - i915_gem_object_move_to_active(obj, ring); + i915_vma_move_to_active(vma, ring); if (obj->base.write_domain) { obj->dirty = 1; obj->last_write_seqno = intel_ring_get_seqno(ring);
Even though we track object activeness and not VMA, because we have the active_list be based on the VM, it makes the most sense to use VMAs in the APIs. NOTE: Daniel intends to eventually rip out active/inactive LRUs, but for now, leave them be. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_drv.h | 5 ++--- drivers/gpu/drm/i915/i915_gem.c | 11 +++++++++-- drivers/gpu/drm/i915/i915_gem_context.c | 8 ++++---- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 4 +--- 4 files changed, 16 insertions(+), 12 deletions(-)