@@ -92,97 +92,24 @@ void gen8_gem_context_free(struct i915_hw_context *ctx)
}
}
-struct i915_hw_context *
-gen8_gem_create_context(struct drm_device *dev,
- struct intel_engine *ring,
- struct drm_i915_file_private *file_priv,
- bool create_vm)
+static int
+intel_populate_lrc(struct i915_hw_context *ctx,
+ struct intel_engine *ring)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_hw_context *ctx = NULL;
- struct drm_i915_gem_object *ring_obj = NULL;
- struct i915_hw_ppgtt *ppgtt = NULL;
- struct intel_ringbuffer *ringbuf = NULL;
struct page *page;
uint32_t *reg_state;
+ struct i915_hw_ppgtt *ppgtt = NULL;
int ret;
- ctx = i915_gem_create_context(dev, file_priv, create_vm);
- if (IS_ERR_OR_NULL(ctx))
- return ctx;
-
- if (file_priv) {
- ret = i915_gem_obj_ggtt_pin(ctx->obj, GEN8_CONTEXT_ALIGN, 0);
- if (ret) {
- DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
- i915_gem_context_unreference(ctx);
- return ERR_PTR(ret);
- }
- }
-
- ring_obj = i915_gem_alloc_object(dev, 32 * PAGE_SIZE);
- if (!ring_obj) {
- i915_gem_object_ggtt_unpin(ctx->obj);
- i915_gem_context_unreference(ctx);
- return ERR_PTR(-ENOMEM);
- }
-
- /* TODO: For now we put this in the mappable region so that we can reuse
- * the existing ringbuffer code which ioremaps it. When we start
- * creating many contexts, this will no longer work and we must switch
- * to a kmapish interface.
- */
- ret = i915_gem_obj_ggtt_pin(ring_obj, PAGE_SIZE, PIN_MAPPABLE);
- if (ret) {
- drm_gem_object_unreference(&ring_obj->base);
- i915_gem_object_ggtt_unpin(ctx->obj);
- i915_gem_context_unreference(ctx);
- return ERR_PTR(ret);
- }
-
- /* Failure at this point is almost impossible */
- ret = i915_gem_object_set_to_gtt_domain(ring_obj, true);
- if (ret)
- goto destroy_ring_obj;
-
- if (file_priv) {
- ringbuf = kzalloc(sizeof(struct intel_ringbuffer), GFP_KERNEL);
- if (!ringbuf) {
- DRM_ERROR("Failed to allocate ringbuffer\n");
- ret = -ENOMEM;
- goto destroy_ring_obj;
- }
-
- ringbuf->size = 32 * PAGE_SIZE;
- ringbuf->effective_size = ringbuf->size;
- ringbuf->head = 0;
- ringbuf->tail = 0;
- ringbuf->space = ringbuf->size;
- ringbuf->last_retired_head = -1;
- ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
- i915_gem_obj_ggtt_offset(ring_obj),
- ringbuf->size);
- if (ringbuf->virtual_start == NULL) {
- DRM_ERROR("Failed to map ringbuffer\n");
- ret = -EINVAL;
- goto destroy_ringbuf;
- }
-
- ctx->ringbuf = ringbuf;
- } else {
- ctx->ringbuf = &ring->default_ringbuf;
- }
- ctx->ringbuf->obj = ring_obj;
-
ppgtt = ctx_to_ppgtt(ctx);
ret = i915_gem_object_set_to_cpu_domain(ctx->obj, true);
if (ret)
- goto unmap_ringbuf;
+ return ret;
ret = i915_gem_object_get_pages(ctx->obj);
if (ret)
- goto unmap_ringbuf;
+ return ret;
i915_gem_object_pin_pages(ctx->obj);
@@ -204,7 +131,7 @@ gen8_gem_create_context(struct drm_device *dev,
reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
reg_state[CTX_RING_TAIL+1] = 0;
reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
- reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
+ reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ctx->ringbuf->obj);
reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
reg_state[CTX_RING_BUFFER_CONTROL+1] = (31 * PAGE_SIZE) | RING_VALID;
reg_state[CTX_BB_HEAD_U] = ring->mmio_base + 0x168;
@@ -265,6 +192,94 @@ gen8_gem_create_context(struct drm_device *dev,
set_page_dirty(page);
i915_gem_object_unpin_pages(ctx->obj);
+ return 0;
+}
+
+struct i915_hw_context *
+gen8_gem_create_context(struct drm_device *dev,
+ struct intel_engine *ring,
+ struct drm_i915_file_private *file_priv,
+ bool create_vm)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_hw_context *ctx = NULL;
+ struct drm_i915_gem_object *ring_obj = NULL;
+ struct intel_ringbuffer *ringbuf = NULL;
+ int ret;
+
+ ctx = i915_gem_create_context(dev, file_priv, create_vm);
+ if (IS_ERR_OR_NULL(ctx))
+ return ctx;
+
+ if (file_priv) {
+ ret = i915_gem_obj_ggtt_pin(ctx->obj, GEN8_CONTEXT_ALIGN, 0);
+ if (ret) {
+ DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
+ i915_gem_context_unreference(ctx);
+ return ERR_PTR(ret);
+ }
+ }
+
+ ring_obj = i915_gem_alloc_object(dev, 32 * PAGE_SIZE);
+ if (!ring_obj) {
+ i915_gem_object_ggtt_unpin(ctx->obj);
+ i915_gem_context_unreference(ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* TODO: For now we put this in the mappable region so that we can reuse
+ * the existing ringbuffer code which ioremaps it. When we start
+ * creating many contexts, this will no longer work and we must switch
+ * to a kmapish interface.
+ */
+ ret = i915_gem_obj_ggtt_pin(ring_obj, PAGE_SIZE, PIN_MAPPABLE);
+ if (ret) {
+ drm_gem_object_unreference(&ring_obj->base);
+ i915_gem_object_ggtt_unpin(ctx->obj);
+ i915_gem_context_unreference(ctx);
+ return ERR_PTR(ret);
+ }
+
+ /* Failure at this point is almost impossible */
+ ret = i915_gem_object_set_to_gtt_domain(ring_obj, true);
+ if (ret)
+ goto destroy_ring_obj;
+
+ if (file_priv) {
+ ringbuf = kzalloc(sizeof(struct intel_ringbuffer), GFP_KERNEL);
+ if (!ringbuf) {
+ DRM_ERROR("Failed to allocate ringbuffer\n");
+ ret = -ENOMEM;
+ goto destroy_ring_obj;
+ }
+
+ ringbuf->size = 32 * PAGE_SIZE;
+ ringbuf->effective_size = ringbuf->size;
+ ringbuf->head = 0;
+ ringbuf->tail = 0;
+ ringbuf->space = ringbuf->size;
+ ringbuf->last_retired_head = -1;
+ ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
+ i915_gem_obj_ggtt_offset(ring_obj),
+ ringbuf->size);
+ if (ringbuf->virtual_start == NULL) {
+ DRM_ERROR("Failed to map ringbuffer\n");
+ ret = -EINVAL;
+ goto destroy_ringbuf;
+ }
+
+ ctx->ringbuf = ringbuf;
+ } else {
+ ctx->ringbuf = &ring->default_ringbuf;
+ }
+ ctx->ringbuf->obj = ring_obj;
+
+ if (ring) {
+ ret = intel_populate_lrc(ctx, ring);
+ if (ret)
+ goto unmap_ringbuf;
+ }
+
return ctx;
unmap_ringbuf: