@@ -3350,8 +3350,13 @@ void __i915_add_request(struct drm_i915_gem_request *req,
__i915_add_request(req, NULL, true)
#define i915_add_request_no_flush(req) \
__i915_add_request(req, NULL, false)
+
+/* flags used by users of __i915_wait_request */
+#define I915_WAIT_REQUEST_INTERRUPTIBLE (1 << 0)
+#define I915_WAIT_REQUEST_LOCKED (1 << 1)
+
int __i915_wait_request(struct drm_i915_gem_request *req,
- bool interruptible,
+ u32 flags,
s64 *timeout,
struct intel_rps_client *rps);
int __must_check i915_wait_request(struct drm_i915_gem_request *req);
@@ -1455,7 +1455,9 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
/**
* __i915_wait_request - wait until execution of request has finished
* @req: duh!
- * @interruptible: do an interruptible wait (normally yes)
+ * @flags: flags to define the nature of wait
+ * I915_WAIT_INTERRUPTIBLE - do an interruptible wait (normally yes)
+ * I915_WAIT_LOCKED - caller is holding struct_mutex
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
* @rps: RPS client
*
@@ -1470,7 +1472,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
* errno with remaining time filled in timeout argument.
*/
int __i915_wait_request(struct drm_i915_gem_request *req,
- bool interruptible,
+ u32 flags,
s64 *timeout,
struct intel_rps_client *rps)
{
@@ -1478,6 +1480,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
struct drm_i915_private *dev_priv = req->i915;
const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
+ bool interruptible = flags & I915_WAIT_REQUEST_INTERRUPTIBLE;
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
unsigned long timeout_expire;
@@ -1526,6 +1529,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
for (;;) {
struct timer_list timer;
int reset_pending;
+ bool locked = flags & I915_WAIT_REQUEST_LOCKED;
prepare_to_wait(&engine->irq_queue, &wait, state);
@@ -1543,7 +1547,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
reset_pending = i915_engine_reset_pending(&dev_priv->gpu_error,
NULL);
- if (reset_pending) {
+ if (reset_pending || locked) {
ret = -EAGAIN;
break;
}
@@ -1705,14 +1709,15 @@ int
i915_wait_request(struct drm_i915_gem_request *req)
{
struct drm_i915_private *dev_priv = req->i915;
- bool interruptible;
+ u32 flags;
int ret;
- interruptible = dev_priv->mm.interruptible;
-
BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
- ret = __i915_wait_request(req, interruptible, NULL, NULL);
+ flags = dev_priv->mm.interruptible ? I915_WAIT_REQUEST_INTERRUPTIBLE : 0;
+ flags |= I915_WAIT_REQUEST_LOCKED;
+
+ ret = __i915_wait_request(req, flags, NULL, NULL);
if (ret)
return ret;
@@ -1824,7 +1829,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
mutex_unlock(&dev->struct_mutex);
ret = 0;
for (i = 0; ret == 0 && i < n; i++)
- ret = __i915_wait_request(requests[i], true, NULL, rps);
+ ret = __i915_wait_request(requests[i],
+ I915_WAIT_REQUEST_INTERRUPTIBLE,
+ NULL, rps);
mutex_lock(&dev->struct_mutex);
for (i = 0; i < n; i++) {
@@ -3442,7 +3449,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
for (i = 0; i < n; i++) {
if (ret == 0)
- ret = __i915_wait_request(req[i], true,
+ ret = __i915_wait_request(req[i], I915_WAIT_REQUEST_INTERRUPTIBLE,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
to_rps_client(file));
i915_gem_request_unreference(req[i]);
@@ -3473,8 +3480,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ u32 flags;
+
+ flags = i915->mm.interruptible ? I915_WAIT_REQUEST_INTERRUPTIBLE : 0;
+ flags |= I915_WAIT_REQUEST_LOCKED;
+
ret = __i915_wait_request(from_req,
- i915->mm.interruptible,
+ flags,
NULL,
&i915->rps.semaphores);
if (ret)
@@ -4476,7 +4488,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (target == NULL)
return 0;
- ret = __i915_wait_request(target, true, NULL, NULL);
+ ret = __i915_wait_request(target, I915_WAIT_REQUEST_INTERRUPTIBLE, NULL, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@@ -11546,7 +11546,7 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
if (work->flip_queued_req)
WARN_ON(__i915_wait_request(work->flip_queued_req,
- false, NULL,
+ 0, NULL,
&dev_priv->rps.mmioflips));
/* For framebuffer backed by dmabuf, wait for fence */
@@ -13602,7 +13602,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
continue;
ret = __i915_wait_request(intel_plane_state->wait_req,
- true, NULL, NULL);
+ I915_WAIT_REQUEST_INTERRUPTIBLE,
+ NULL, NULL);
if (ret) {
/* Any hang should be swallowed by the wait */
WARN_ON(ret == -EIO);
@@ -2414,6 +2414,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
int intel_engine_idle(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req;
+ u32 flags;
/* Wait upon the last request to be completed */
if (list_empty(&engine->request_list))
@@ -2423,10 +2424,11 @@ int intel_engine_idle(struct intel_engine_cs *engine)
struct drm_i915_gem_request,
list);
+ flags = req->i915->mm.interruptible ? I915_WAIT_REQUEST_INTERRUPTIBLE : 0;
+ flags |= I915_WAIT_REQUEST_LOCKED;
+
/* Make sure we do not trigger any retires */
- return __i915_wait_request(req,
- req->i915->mm.interruptible,
- NULL, NULL);
+ return __i915_wait_request(req, flags, NULL, NULL);
}
int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)