@@ -2815,9 +2815,12 @@ int __must_check i915_gpu_idle(struct drm_device *dev);
int __must_check i915_gem_suspend(struct drm_device *dev);
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
- struct drm_i915_gem_object *batch_obj);
+ struct drm_i915_gem_object *batch_obj,
+ bool flush_caches);
#define i915_add_request(ring) \
- __i915_add_request(ring, NULL, NULL)
+ __i915_add_request(ring, NULL, NULL, true)
+#define i915_add_request_no_flush(ring) \
+ __i915_add_request(ring, NULL, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
@@ -2408,7 +2408,8 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
int __i915_add_request(struct intel_engine_cs *ring,
struct drm_file *file,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ bool flush_caches)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
@@ -2433,12 +2434,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
* is that the flush _must_ happen before the next request, no matter
* what.
*/
- if (i915.enable_execlists) {
- ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
- if (ret)
- return ret;
- } else {
- ret = intel_ring_flush_all_caches(ring);
+ if (flush_caches) {
+ if (i915.enable_execlists)
+ ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
+ else
+ ret = intel_ring_flush_all_caches(ring);
if (ret)
return ret;
}
@@ -2450,15 +2450,12 @@ int __i915_add_request(struct intel_engine_cs *ring,
*/
request->postfix = intel_ring_get_tail(ringbuf);
- if (i915.enable_execlists) {
+ if (i915.enable_execlists)
ret = ring->emit_request(ringbuf, request);
- if (ret)
- return ret;
- } else {
+ else
ret = ring->add_request(ring);
- if (ret)
- return ret;
- }
+ if (ret)
+ return ret;
request->head = request_start;
request->tail = intel_ring_get_tail(ringbuf);
@@ -997,7 +997,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
/* Add a breadcrumb for the completion of the batch buffer */
return __i915_add_request(params->ring, params->file,
- params->batch_obj);
+ params->batch_obj, true);
}
static int
@@ -173,7 +173,7 @@ int i915_gem_render_state_init(struct intel_engine_cs *ring)
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, NULL, so.obj);
+ ret = __i915_add_request(ring, NULL, so.obj, true);
/* __i915_add_request moves object to inactive if it fails */
out:
i915_gem_render_state_fini(&so);
@@ -1612,7 +1612,7 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring);
- ret = __i915_add_request(ring, file, so.obj);
+ ret = __i915_add_request(ring, file, so.obj, true);
/* intel_logical_ring_add_request moves object to inactive if it
* fails */
out: