@@ -2558,6 +2558,9 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
+void i915_gem_vma_bind(struct i915_vma *vma, enum i915_cache_level,
+ unsigned flags);
+void i915_gem_vma_unbind(struct i915_vma *vma);
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
struct i915_vma *
@@ -2989,7 +2989,7 @@ int i915_vma_unbind(struct i915_vma *vma)
trace_i915_vma_unbind(vma);
- vma->unbind_vma(vma);
+ i915_gem_vma_unbind(vma);
list_del_init(&vma->mm_list);
if (i915_is_ggtt(vma->vm))
@@ -3509,8 +3509,8 @@ search_free:
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
trace_i915_vma_bind(vma, flags);
- vma->bind_vma(vma, obj->cache_level,
- flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
+ i915_gem_vma_bind(vma, obj->cache_level,
+ flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0);
return vma;
@@ -3717,8 +3717,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
list_for_each_entry(vma, &obj->vma_list, vma_link)
if (drm_mm_node_allocated(&vma->node))
- vma->bind_vma(vma, cache_level,
- obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
+ i915_gem_vma_bind(vma, cache_level,
+ obj->has_global_gtt_mapping ? GLOBAL_BIND : 0);
}
list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -4115,7 +4115,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
}
if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping)
- vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ i915_gem_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
vma->pin_count++;
if (flags & PIN_MAPPABLE)
@@ -581,7 +581,7 @@ static int do_switch(struct intel_engine_cs *ring,
if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) {
struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state,
&dev_priv->gtt.base);
- vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
+ i915_gem_vma_bind(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND);
}
/* GEN8 does *not* require an explicit reload if the PDPs have been
@@ -361,7 +361,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct i915_vma *vma =
list_first_entry(&target_i915_obj->vma_list,
typeof(*vma), vma_link);
- vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
+ i915_gem_vma_bind(vma, target_i915_obj->cache_level,
+ GLOBAL_BIND);
}
/* Validate that the target is in a valid r/w GPU domain */
@@ -1337,7 +1337,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
* without telling our object about it. So we need to fake it.
*/
obj->has_global_gtt_mapping = 0;
- vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
+ i915_gem_vma_bind(vma, obj->cache_level, GLOBAL_BIND);
}
@@ -2134,6 +2134,17 @@ int i915_gem_gtt_init(struct drm_device *dev)
return 0;
}
+void i915_gem_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
+ unsigned flags)
+{
+ vma->bind_vma(vma, cache_level, flags);
+}
+
+void i915_gem_vma_unbind(struct i915_vma *vma)
+{
+ vma->unbind_vma(vma);
+}
+
static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{