@@ -449,11 +449,6 @@ int i915_gem_context_init(struct drm_device *dev)
/* NB: RCS will hold a ref for all rings */
ring->default_context = ctx;
-
- /* FIXME: we only want to do this for initialized rings, but for that
- * we first need the new logical ring stuff */
- if (dev_priv->lrc_enabled)
- intel_lr_context_deferred_create(ctx, ring);
}
DRM_DEBUG_DRIVER("%s context support initialized\n",
@@ -88,10 +88,60 @@ bool intel_enable_execlists(struct drm_device *dev)
void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
{
+ if (!intel_ring_initialized(ring))
+ return;
+
+ /* TODO: make sure the ring is stopped */
+ ring->preallocated_lazy_request = NULL;
+ ring->outstanding_lazy_seqno = 0;
+
+ if (ring->cleanup)
+ ring->cleanup(ring);
+
+ i915_cmd_parser_fini_ring(ring);
+
+ if (ring->status_page.obj) {
+ kunmap(sg_page(ring->status_page.obj->pages->sgl));
+ ring->status_page.obj = NULL;
+ }
}
static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
{
+ int ret;
+ struct intel_context *dctx = ring->default_context;
+ struct drm_i915_gem_object *dctx_obj;
+
+ /* Intentionally left blank. */
+ ring->buffer = NULL;
+
+ ring->dev = dev;
+ INIT_LIST_HEAD(&ring->active_list);
+ INIT_LIST_HEAD(&ring->request_list);
+ init_waitqueue_head(&ring->irq_queue);
+
+ ret = intel_lr_context_deferred_create(dctx, ring);
+ if (ret)
+ return ret;
+
+ /* The status page is offset 0 from the context object in LRCs. */
+ dctx_obj = dctx->engine[ring->id].obj;
+ ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj);
+ ring->status_page.page_addr = kmap(sg_page(dctx_obj->pages->sgl));
+ if (ring->status_page.page_addr == NULL)
+ return -ENOMEM;
+ ring->status_page.obj = dctx_obj;
+
+ ret = i915_cmd_parser_init_ring(ring);
+ if (ret)
+ return ret;
+
+ if (ring->init) {
+ ret = ring->init(ring);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -370,6 +420,8 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
int ret;
WARN_ON(ctx->render_obj != NULL);
+ if (ctx->engine[ring->id].obj)
+ return 0;
context_size = round_up(get_lr_context_size(ring), 4096);
@@ -40,6 +40,23 @@
*/
#define CACHELINE_BYTES 64
+bool
+intel_ring_initialized(struct intel_engine_cs *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ if (!dev)
+ return false;
+
+ if (intel_enable_execlists(dev)) {
+ struct intel_context *dctx = ring->default_context;
+ struct drm_i915_gem_object *dctx_obj = dctx->engine[ring->id].obj;
+
+ return dctx_obj;
+ } else
+ return ring->buffer && ring->buffer->obj;
+}
+
static inline int __ring_space(int head, int tail, int size)
{
int space = head - (tail + I915_RING_FREE_SPACE);
@@ -219,11 +219,7 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
-static inline bool
-intel_ring_initialized(struct intel_engine_cs *ring)
-{
- return ring->buffer && ring->buffer->obj;
-}
+bool intel_ring_initialized(struct intel_engine_cs *ring);
static inline unsigned
intel_ring_flag(struct intel_engine_cs *ring)