@@ -1190,9 +1190,13 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file);
uint32_t
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+
/* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
/* i915_gem_evict.c */
@@ -2831,7 +2831,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return ret;
}
- ret = i915_gem_gtt_bind_object(obj);
+ ret = i915_gem_gtt_bind_object(obj, obj->cache_level);
if (ret) {
i915_gem_object_put_pages_gtt(obj);
drm_mm_put_block(obj->gtt_space);
@@ -3002,6 +3002,39 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
return 0;
}
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ int ret;
+
+ if (obj->cache_level == cache_level)
+ return 0;
+
+ if (obj->gtt_space) {
+ ret = i915_gem_object_flush_gpu(obj);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_gtt_bind_object(obj, cache_level);
+ if (ret)
+ return ret;
+ }
+
+ if (cache_level == I915_CACHE_NONE) {
+ /* If we're coming frm LLC cached, then we haven't
+ * actually been tracking whether the data is in the
+ * CPU cache or not, since we only allow one bit set
+ * in obj->write_domain. Just set it to the CPU cache
+ * for now.
+ */
+ BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ obj->cache_level = cache_level;
+ return 0;
+}
+
/*
* Prepare buffer for display plane. Use uninterruptible for possible flush
* wait, as in modesetting process we're not supposed to be interrupted.
@@ -70,10 +70,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
intel_gtt_chipset_flush();
}
-int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t agp_type = cache_level_to_agp_type(cache_level);
int ret;
if (dev_priv->mm.gtt->needs_dmar) {
@@ -87,12 +89,12 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
intel_gtt_insert_sg_entries(obj->sg_list,
obj->num_sg,
obj->gtt_space->start >> PAGE_SHIFT,
- cache_level_to_agp_type(obj->cache_level));
+ agp_type);
} else
intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT,
obj->pages,
- cache_level_to_agp_type(obj->cache_level));
+ agp_type);
return 0;
}
@@ -236,7 +236,8 @@ init_pipe_control(struct intel_ring_buffer *ring)
ret = -ENOMEM;
goto err;
}
- obj->cache_level = I915_CACHE_LLC;
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_object_pin(obj, 4096, true);
if (ret)
@@ -759,7 +760,8 @@ static int init_status_page(struct intel_ring_buffer *ring)
ret = -ENOMEM;
goto err;
}
- obj->cache_level = I915_CACHE_LLC;
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_object_pin(obj, 4096, true);
if (ret != 0) {