@@ -1099,8 +1099,12 @@ struct i915_gpu_error {
*/
wait_queue_head_t reset_queue;
- /* For gpu hang simulation. */
- unsigned int stop_rings;
+ /* For gpu hang simulation.
+ * MSB for controlling DRM_ERROR with ctx bans
+ */
+ u32 stop_rings;
+
+#define I915_SQUELCH_CTX_BAN_ERROR (1 << 31)
/* For missed irq/seqno simulation. */
unsigned int test_irq_rings;
@@ -2146,6 +2150,16 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
}
+static inline u32 i915_stopped_rings(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->gpu_error.stop_rings & ~I915_SQUELCH_CTX_BAN_ERROR;
+}
+
+static inline bool i915_squelch_ban_error(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->gpu_error.stop_rings & I915_SQUELCH_CTX_BAN_ERROR;
+}
+
void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
@@ -2277,8 +2277,9 @@ static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
if (!i915_gem_context_is_default(ctx)) {
DRM_DEBUG("context hanging too fast, banning!\n");
return true;
- } else if (dev_priv->gpu_error.stop_rings == 0) {
- DRM_ERROR("gpu hanging too fast, banning!\n");
+ } else if (!i915_stopped_rings(dev_priv)) {
+ if (!i915_squelch_ban_error(dev_priv))
+ DRM_ERROR("gpu hanging too fast, banning!\n");
return true;
}
}
@@ -46,7 +46,7 @@ void __intel_ring_advance(struct intel_ring_buffer *ring)
struct drm_i915_private *dev_priv = ring->dev->dev_private;
ring->tail &= ring->size - 1;
- if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
+ if (i915_stopped_rings(dev_priv) & intel_ring_flag(ring))
return;
ring->write_tail(ring, ring->tail);
}