@@ -123,28 +123,34 @@ static int context_init(struct drm_i915_gem_context *ctx,
*/
last = ring->context_switch(ring, ctx, 0,
I915_CONTEXT_SAVE_ONLY);
- if (!last) {
- ret = EIO;
+ if (last != ctx) {
+ DRM_ERROR("Context switch state invalid");
+ ret = -EIO;
goto err_out;
}
last = ring->context_switch(ring, ctx, 0,
I915_CONTEXT_SAVE_ONLY |
I915_CONTEXT_FORCED_SWITCH);
- if (!last) {
- ret = EIO;
+ if (last != ctx) {
+ DRM_ERROR("First context switch fail");
+ ret = -EIO;
goto err_out;
}
last = ring->context_switch(ring, ctx, 0,
I915_CONTEXT_NORMAL_SWITCH |
I915_CONTEXT_FORCED_SWITCH);
+ if (last != ctx) {
+ DRM_ERROR("Final context switch fail");
+ ret = -EIO;
+ goto err_out;
+ }
} else {
last = ring->context_switch(ring, ctx, 0,
I915_CONTEXT_SAVE_ONLY);
- }
-
- if (!last) {
- ret = EIO;
- goto err_out;
+ if (!last) {
+ ret = -EIO;
+ goto err_out;
+ }
}
if (!last->is_default)
@@ -346,7 +352,11 @@ void i915_context_load(struct drm_device *dev)
ret = logical_context_alloc(dev, NULL, 0, &dev_priv->default_context);
if (ret)
dev_priv->contexts_disabled = true;
+ else {
+ DRM_DEBUG_DRIVER("HW context support initialized\n");
+ }
mutex_unlock(&dev->struct_mutex);
+
}
void i915_context_unload(struct drm_device *dev)
@@ -909,6 +909,7 @@ struct drm_i915_gem_context {
uint8_t ring_enable;
struct drm_i915_gem_object *obj;
bool is_default;
+ bool is_initialized;
struct drm_gem_object **bufs;
int slot_count;
@@ -734,6 +734,102 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
return 0;
}
+static int do_ring_switch(struct intel_ring_buffer *ring,
+ struct drm_i915_gem_context *new_context,
+ u32 hw_flags)
+{
+ struct drm_device *dev = ring->dev;
+ int ret = 0;
+
+ if (!new_context->is_initialized) {
+ ret = intel_ring_begin(ring, 6);
+ if (ret)
+ return ret;
+ intel_ring_emit(ring, MI_NOOP | (1 << 22) | new_context->id);
+ intel_ring_emit(ring, MI_FLUSH);
+ } else {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+ }
+
+ DRM_DEBUG_DRIVER("Context switch %d -> %d\n",
+ ring->last_context ? ring->last_context->id : -1,
+ new_context->id);
+
+ if (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev))
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
+ else
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_emit(ring, MI_SET_CONTEXT);
+ intel_ring_emit(ring, new_context->obj->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ hw_flags);
+
+ if (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev))
+ intel_ring_emit(ring, MI_SUSPEND_FLUSH);
+ /* TODO: we may need a NOOP here */
+ else
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
+
+ return ret;
+
+}
+
+static struct drm_i915_gem_context *
+render_ring_context_switch(struct intel_ring_buffer *ring,
+ struct drm_i915_gem_context *new_context,
+ u32 seqno, u32 flags)
+{
+ struct drm_device *dev = ring->dev;
+ bool force = (flags & I915_CONTEXT_FORCED_SWITCH) ? true : false;
+ struct drm_i915_gem_context *last = NULL;
+ uint32_t hw_flags = 0;
+
+ if (WARN_ON(new_context->obj->gtt_offset == 0))
+ return NULL;
+
+ /* last_context is only protected by struct_mutex */
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ if (!force && (ring->last_context == new_context)) {
+ if (WARN_ON(seqno == 0))
+ seqno = ring->get_seqno(ring);
+
+ if (!new_context->is_default)
+ i915_gem_object_move_to_active(new_context->obj,
+ ring, seqno);
+ return new_context;
+ }
+
+ if (flags & I915_CONTEXT_SAVE_ONLY)
+ hw_flags = MI_RESTORE_INHIBIT;
+
+ if (do_ring_switch(ring, new_context, hw_flags))
+ return NULL;
+
+ last = ring->last_context;
+ ring->last_context = new_context;
+
+ if (last && !last->is_default)
+ i915_gem_object_move_to_active(last->obj, ring, seqno);
+
+ /* XXX The first call to this default and last are NULL. Unfortunately
+ * the only mechanism we have to determine failure is */
+ if (!new_context->is_initialized && !last) {
+ WARN_ON(!new_context->is_default);
+ last = new_context;
+ } else
+ new_context->is_initialized = true;
+
+ return last;
+}
+
static void cleanup_status_page(struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
@@ -1008,6 +1104,7 @@ static const struct intel_ring_buffer render_ring = {
.irq_put = render_ring_put_irq,
.dispatch_execbuffer = render_ring_dispatch_execbuffer,
.cleanup = render_ring_cleanup,
+ .context_switch = render_ring_context_switch,
};
/* ring buffer for bit-stream decoder */