@@ -1452,6 +1452,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
*/
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
+ intel_atomic_wedged(dev);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -2250,6 +2250,30 @@ void intel_atomic_notify_ring(struct drm_device *dev,
spin_unlock_irqrestore(&dev_priv->flip.lock, flags);
}
+void intel_atomic_wedged(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_flip *intel_flip;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->flip.lock, flags);
+
+ list_for_each_entry(intel_flip, &dev_priv->flip.list, base.list) {
+ struct intel_ring_buffer *ring = intel_flip->ring;
+
+ if (ring) {
+ intel_flip->ring = NULL;
+ ring->irq_put(ring);
+ }
+ }
+
+ /* all flips are "ready" so no need to check with intel_atomic_flips_ready() */
+ if (!list_empty(&dev_priv->flip.list))
+ queue_work(dev_priv->flip.wq, &dev_priv->flip.work);
+
+ spin_unlock_irqrestore(&dev_priv->flip.lock, flags);
+}
+
static void atomic_pipe_commit(struct drm_device *dev,
struct intel_atomic_state *state,
int pipe)
@@ -2427,6 +2451,11 @@ static void atomic_pipe_commit(struct drm_device *dev,
if (rings_mask == 0)
return;
+ if (atomic_read(&dev_priv->mm.wedged)) {
+ intel_atomic_wedged(dev);
+ return;
+ }
+
/*
* Double check to catch cases where the irq
* fired before the flip was placed onto flip.list.
@@ -723,6 +723,7 @@ extern void intel_atomic_free_events(struct drm_device *dev, struct drm_file *fi
extern void intel_atomic_handle_vblank(struct drm_device *dev, int pipe);
extern void intel_atomic_clear_flips(struct drm_crtc *crtc);
extern void intel_atomic_notify_ring(struct drm_device *dev, struct intel_ring_buffer *ring);
+extern void intel_atomic_wedged(struct drm_device *dev);
extern void intel_enable_primary(struct drm_crtc *crtc);
extern void intel_disable_primary(struct drm_crtc *crtc);