@@ -697,6 +697,13 @@ struct intel_context {
struct list_head link;
};
+enum fb_op_origin {
+ ORIGIN_GTT,
+ ORIGIN_CPU,
+ ORIGIN_CS,
+ ORIGIN_FLIP,
+};
+
struct i915_fbc {
unsigned long size;
unsigned threshold;
@@ -2318,7 +2318,7 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
list_move_tail(&vma->mm_list, &vma->vm->inactive_list);
}
- intel_fb_obj_flush(obj, true);
+ intel_fb_obj_flush(obj, true, ORIGIN_CS);
list_del_init(&obj->ring_list);
@@ -3676,7 +3676,7 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_GTT);
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
@@ -3699,7 +3699,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_CPU);
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
@@ -3756,7 +3756,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
}
if (write)
- intel_fb_obj_invalidate(obj, NULL);
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
@@ -4086,7 +4086,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
}
if (write)
- intel_fb_obj_invalidate(obj, NULL);
+ intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU);
trace_i915_gem_object_change_domain(obj,
old_read_domains,
@@ -968,7 +968,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
obj->dirty = 1;
i915_gem_request_assign(&obj->last_write_req, req);
- intel_fb_obj_invalidate(obj, ring);
+ intel_fb_obj_invalidate(obj, ring, ORIGIN_CS);
/* update for the implicit flush after a batch */
obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
@@ -841,13 +841,15 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
/* intel_frontbuffer.c */
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring);
+ struct intel_engine_cs *ring,
+ enum fb_op_origin origin);
void intel_frontbuffer_flip_prepare(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_device *dev,
unsigned frontbuffer_bits);
void intel_frontbuffer_flush(struct drm_device *dev,
- unsigned frontbuffer_bits);
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin);
/**
* intel_frontbuffer_flip - synchronous frontbuffer flip
* @dev: DRM device
@@ -863,10 +865,11 @@ static inline
void intel_frontbuffer_flip(struct drm_device *dev,
unsigned frontbuffer_bits)
{
- intel_frontbuffer_flush(dev, frontbuffer_bits);
+ intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
}
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
+ enum fb_op_origin origin);
/* intel_audio.c */
@@ -127,6 +127,7 @@ static void intel_mark_fb_busy(struct drm_device *dev,
* intel_fb_obj_invalidate - invalidate frontbuffer object
* @obj: GEM object to invalidate
* @ring: set for asynchronous rendering
+ * @origin: which operation caused the invalidation
*
* This function gets called every time rendering on the given object starts and
* frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
@@ -135,7 +136,8 @@ static void intel_mark_fb_busy(struct drm_device *dev,
* scheduled.
*/
void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
- struct intel_engine_cs *ring)
+ struct intel_engine_cs *ring,
+ enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -163,6 +165,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
* intel_frontbuffer_flush - flush frontbuffer
* @dev: DRM device
* @frontbuffer_bits: frontbuffer plane tracking bits
+ * @origin: which operation caused the flush
*
* This function gets called every time rendering on the given planes has
* completed and frontbuffer caching can be started again. Flushes will get
@@ -171,7 +174,8 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
* Can be called without any locks held.
*/
void intel_frontbuffer_flush(struct drm_device *dev,
- unsigned frontbuffer_bits)
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -199,13 +203,14 @@ void intel_frontbuffer_flush(struct drm_device *dev,
* intel_fb_obj_flush - flush frontbuffer object
* @obj: GEM object to flush
* @retire: set when retiring asynchronous rendering
+ * @origin: which operation caused the flush
*
* This function gets called every time rendering on the given object has
* completed and frontbuffer caching can be started again. If @retire is true
* then any delayed flushes will be unblocked.
*/
void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- bool retire)
+ bool retire, enum fb_op_origin origin)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -227,7 +232,7 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
mutex_unlock(&dev_priv->fb_tracking.lock);
}
- intel_frontbuffer_flush(dev, frontbuffer_bits);
+ intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
}
/**
@@ -275,5 +280,5 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
mutex_unlock(&dev_priv->fb_tracking.lock);
- intel_frontbuffer_flush(dev, frontbuffer_bits);
+ intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
}