@@ -2643,7 +2643,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
u32 *seqno);
#define i915_add_request(ring, seqno) \
__i915_add_request(ring, NULL, NULL, seqno)
-int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
@@ -1214,10 +1214,9 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
}
/**
- * __i915_wait_seqno - wait until execution of seqno has finished
- * @ring: the ring expected to report seqno
- * @seqno: duh!
- * @reset_counter: reset sequence associated with the given seqno
+ * __i915_wait_request - wait until execution of request has finished
+ * @req: duh!
+ * @reset_counter: reset sequence associated with the given request
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
@@ -1228,15 +1227,16 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
* reset_counter _must_ be read before, and an appropriate smp_rmb must be
* inserted.
*
- * Returns 0 if the seqno was found within the alloted time. Else returns the
+ * Returns 0 if the request was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
-int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
+int __i915_wait_request(struct drm_i915_gem_request *req,
unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct drm_i915_file_private *file_priv)
{
+ struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
@@ -1248,7 +1248,8 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
- if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
+ if (i915_seqno_passed(ring->get_seqno(ring, true),
+ i915_gem_request_get_seqno(req)))
return 0;
timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
@@ -1265,7 +1266,8 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
return -ENODEV;
/* Record current time in case interrupted by signal, or wedged */
- trace_i915_gem_request_wait_begin(ring, seqno);
+ trace_i915_gem_request_wait_begin(i915_gem_request_get_ring(req),
+ i915_gem_request_get_seqno(req));
before = ktime_get_raw_ns();
for (;;) {
struct timer_list timer;
@@ -1284,7 +1286,8 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
break;
}
- if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
+ if (i915_seqno_passed(ring->get_seqno(ring, false),
+ i915_gem_request_get_seqno(req))) {
ret = 0;
break;
}
@@ -1316,7 +1319,8 @@ int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
}
}
now = ktime_get_raw_ns();
- trace_i915_gem_request_wait_end(ring, seqno);
+ trace_i915_gem_request_wait_end(i915_gem_request_get_ring(req),
+ i915_gem_request_get_seqno(req));
if (!irq_test_in_progress)
ring->irq_put(ring);
@@ -1362,8 +1366,8 @@ i915_wait_request(struct drm_i915_gem_request *req)
return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- return __i915_wait_seqno(req->ring, i915_gem_request_get_seqno(req),
- reset_counter, interruptible, NULL, NULL);
+ return __i915_wait_request(req, reset_counter,
+ interruptible, NULL, NULL);
}
static int
@@ -1417,7 +1421,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_i915_gem_request *req;
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_engine_cs *ring = obj->ring;
unsigned reset_counter;
int ret;
@@ -1439,8 +1442,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex);
- ret = __i915_wait_seqno(ring, i915_gem_request_get_seqno(req),
- reset_counter, true, NULL, file_priv);
+ ret = __i915_wait_request(req, reset_counter, true, NULL, file_priv);
mutex_lock(&dev->struct_mutex);
i915_gem_request_unreference(req);
if (ret)
@@ -2930,9 +2932,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
struct drm_i915_gem_request *req;
- struct intel_engine_cs *ring = NULL;
unsigned reset_counter;
- u32 seqno = 0;
int ret = 0;
if (args->flags != 0)
@@ -2957,9 +2957,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto out;
req = obj->last_read_req;
- seqno = i915_gem_request_get_seqno(req);
- WARN_ON(seqno == 0);
- ring = obj->ring;
/* Do this after OLR check to make sure we make forward progress polling
* on this IOCTL with a timeout <=0 (like busy ioctl)
@@ -2974,8 +2971,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
i915_gem_request_reference(req);
mutex_unlock(&dev->struct_mutex);
- ret = __i915_wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
- file->driver_priv);
+ ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns,
+ file->driver_priv);
mutex_lock(&dev->struct_mutex);
i915_gem_request_unreference(req);
mutex_unlock(&dev->struct_mutex);
@@ -4144,9 +4141,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
- ret = __i915_wait_seqno(i915_gem_request_get_ring(target),
- i915_gem_request_get_seqno(target),
- reset_counter, true, NULL, NULL);
+ ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -9432,10 +9432,9 @@ static void intel_mmio_flip_work_func(struct work_struct *work)
mmio_flip = &intel_crtc->mmio_flip;
if (mmio_flip->req)
- WARN_ON(__i915_wait_seqno(i915_gem_request_get_ring(mmio_flip->req),
- i915_gem_request_get_seqno(mmio_flip->req),
- intel_crtc->reset_counter,
- false, NULL, NULL) != 0);
+ WARN_ON(__i915_wait_request(mmio_flip->req,
+ intel_crtc->reset_counter,
+ false, NULL, NULL) != 0);
intel_do_mmio_flip(intel_crtc);
if (mmio_flip->req)