@@ -2322,6 +2322,7 @@ void gen8_gem_context_fini(struct drm_device *dev);
struct i915_hw_context *gen8_gem_create_context(struct drm_device *dev,
struct intel_engine *ring,
struct drm_i915_file_private *file_priv, bool create_vm);
+void gen8_gem_context_free(struct i915_hw_context *ctx);
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
@@ -75,15 +75,31 @@
#define CTX_R_PWR_CLK_STATE 0x42
#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
+void gen8_gem_context_free(struct i915_hw_context *ctx)
+{
+ /* Global default contexts ringbuffers are take care of
+ * in the fini cleanup code */
+ if (ctx->file_priv) {
+ iounmap(ctx->ringbuf->virtual_start);
+ i915_gem_object_ggtt_unpin(ctx->ringbuf->obj);
+ drm_gem_object_unreference(&ctx->ringbuf->obj->base);
+ ctx->ringbuf->obj = NULL;
+ kfree(ctx->ringbuf);
+ ctx->ringbuf = NULL;
+ }
+}
+
struct i915_hw_context *
gen8_gem_create_context(struct drm_device *dev,
struct intel_engine *ring,
struct drm_i915_file_private *file_priv,
bool create_vm)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *ctx = NULL;
struct drm_i915_gem_object *ring_obj = NULL;
struct i915_hw_ppgtt *ppgtt = NULL;
+ struct intel_ringbuffer *ringbuf = NULL;
struct page *page;
uint32_t *reg_state;
int ret;
@@ -94,7 +110,8 @@ gen8_gem_create_context(struct drm_device *dev,
ring_obj = i915_gem_alloc_object(dev, 32 * PAGE_SIZE);
if (!ring_obj) {
- i915_gem_object_ggtt_unpin(ctx->obj);
+ if (!file_priv)
+ i915_gem_object_ggtt_unpin(ctx->obj);
i915_gem_context_unreference(ctx);
return ERR_PTR(-ENOMEM);
}
@@ -107,7 +124,8 @@ gen8_gem_create_context(struct drm_device *dev,
ret = i915_gem_obj_ggtt_pin(ring_obj, PAGE_SIZE, PIN_MAPPABLE);
if (ret) {
drm_gem_object_unreference(&ring_obj->base);
- i915_gem_object_ggtt_unpin(ctx->obj);
+ if (!file_priv)
+ i915_gem_object_ggtt_unpin(ctx->obj);
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);
}
@@ -117,18 +135,44 @@ gen8_gem_create_context(struct drm_device *dev,
if (ret)
goto destroy_ring_obj;
- ctx->ringbuf = &ring->default_ringbuf;
+ if (file_priv) {
+ ringbuf = kzalloc(sizeof(struct intel_ringbuffer), GFP_KERNEL);
+ if (!ringbuf) {
+ DRM_ERROR("Failed to allocate ringbuffer\n");
+ ret = -ENOMEM;
+ goto destroy_ring_obj;
+ }
+
+ ringbuf->size = 32 * PAGE_SIZE;
+ ringbuf->effective_size = ringbuf->size;
+ ringbuf->head = 0;
+ ringbuf->tail = 0;
+ ringbuf->space = ringbuf->size;
+ ringbuf->last_retired_head = -1;
+ ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
+ i915_gem_obj_ggtt_offset(ring_obj),
+ ringbuf->size);
+ if (ringbuf->virtual_start == NULL) {
+ DRM_ERROR("Failed to map ringbuffer\n");
+ ret = -EINVAL;
+ goto destroy_ringbuf;
+ }
+
+ ctx->ringbuf = ringbuf;
+ } else {
+ ctx->ringbuf = &ring->default_ringbuf;
+ }
ctx->ringbuf->obj = ring_obj;
ppgtt = ctx_to_ppgtt(ctx);
ret = i915_gem_object_set_to_cpu_domain(ctx->obj, true);
if (ret)
- goto destroy_ring_obj;
+ goto unmap_ringbuf;
ret = i915_gem_object_get_pages(ctx->obj);
if (ret)
- goto destroy_ring_obj;
+ goto unmap_ringbuf;
i915_gem_object_pin_pages(ctx->obj);
@@ -213,12 +257,23 @@ gen8_gem_create_context(struct drm_device *dev,
return ctx;
+unmap_ringbuf:
+ if (ringbuf)
+ iounmap(ringbuf->virtual_start);
+
+destroy_ringbuf:
+ if (ringbuf) {
+ ringbuf->obj = NULL;
+ kfree(ringbuf);
+ }
+ ctx->ringbuf = NULL;
+
destroy_ring_obj:
i915_gem_object_ggtt_unpin(ring_obj);
drm_gem_object_unreference(&ring_obj->base);
ctx->ringbuf->obj = NULL;
- ctx->ringbuf = NULL;
- i915_gem_object_ggtt_unpin(ctx->obj);
+ if (!file_priv)
+ i915_gem_object_ggtt_unpin(ctx->obj);
i915_gem_context_unreference(ctx);
return ERR_PTR(ret);