@@ -1738,9 +1738,8 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
bool i915_gem_object_is_active(struct drm_i915_gem_object *obj);
-void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- struct intel_ring_buffer *ring);
+void i915_gem_vma_move_to_active(struct i915_vma *vma,
+ struct intel_ring_buffer *ring);
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@@ -1879,14 +1879,13 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
}
void
-i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
- struct i915_address_space *vm,
- struct intel_ring_buffer *ring)
+i915_gem_vma_move_to_active(struct i915_vma *vma,
+ struct intel_ring_buffer *ring)
{
- struct drm_device *dev = obj->base.dev;
+ struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 seqno = intel_ring_get_seqno(ring);
- struct i915_vma *vma;
+ struct drm_i915_gem_object *obj = vma->obj;
BUG_ON(ring == NULL);
if (obj->ring != ring && obj->last_write_seqno) {
@@ -1895,15 +1894,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
}
obj->ring = ring;
- /* Move from whatever list we were on to the tail of execution. */
- vma = i915_gem_obj_to_vma(obj, vm);
/* Add a reference if we're newly entering the active list. */
if (!vma->active) {
drm_gem_object_reference(&obj->base);
vma->active = 1;
}
- list_move_tail(&vma->mm_list, &vm->active_list);
+ /* Move from whatever list we were on to the tail of execution. */
+ list_move_tail(&vma->mm_list, &vma->vm->active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
obj->last_read_seqno = seqno;
@@ -441,9 +441,10 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
+ struct i915_vma *vma =
+ i915_gem_obj_to_vma(from->obj, &dev_priv->gtt.base);
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
- i915_gem_object_move_to_active(from->obj, &dev_priv->gtt.base,
- ring);
+ i915_gem_vma_move_to_active(vma, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
@@ -798,7 +798,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
- i915_gem_object_move_to_active(obj, vma->vm, ring);
+ i915_gem_vma_move_to_active(vma, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
obj->last_write_seqno = intel_ring_get_seqno(ring);
Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_drv.h | 5 ++--- drivers/gpu/drm/i915/i915_gem.c | 14 ++++++-------- drivers/gpu/drm/i915/i915_gem_context.c | 5 +++-- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- 4 files changed, 12 insertions(+), 14 deletions(-)