@@ -840,7 +840,7 @@ struct intel_context {
} legacy_hw_ctx;
/* Execlists */
- struct {
+ struct intel_engine_ctx {
struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf;
int pin_count;
@@ -316,16 +316,16 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
intel_lr_context_descriptor_update(struct intel_context *ctx,
struct intel_engine_cs *engine)
{
+ struct intel_engine_ctx *ectx = &ctx->engine[engine->id];
uint64_t lrca, desc;
- lrca = ctx->engine[engine->id].lrc_vma->node.start +
- LRC_PPHWSP_PN * PAGE_SIZE;
+ lrca = ectx->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
- desc = engine->ctx_desc_template; /* bits 0-11 */
+ desc = engine->ctx_desc_template; /* bits 0-11 */
desc |= lrca; /* bits 12-31 */
desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
- ctx->engine[engine->id].lrc_desc = desc;
+ ectx->lrc_desc = desc;
}
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
@@ -361,8 +361,7 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
{
struct intel_engine_cs *engine = rq0->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = rq0->i915;
uint64_t desc[2];
if (rq1) {
@@ -468,11 +467,8 @@ static void execlists_context_unqueue__locked(struct intel_engine_cs *engine)
* resubmit the request. See gen8_emit_request() for where we
* prepare the padding after the end of the request.
*/
- struct intel_ringbuffer *ringbuf;
-
- ringbuf = req0->ctx->engine[engine->id].ringbuf;
req0->tail += 8;
- req0->tail &= ringbuf->size - 1;
+ req0->tail &= req0->ringbuf->size - 1;
}
execlists_submit_requests(req0, req1);
@@ -669,7 +665,8 @@ static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct list_head *vmas)
{
- const unsigned other_rings = ~intel_engine_flag(req->engine);
+ struct intel_engine_cs *engine = req->engine;
+ const unsigned other_rings = ~intel_engine_flag(engine);
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -679,7 +676,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
struct drm_i915_gem_object *obj = vma->obj;
if (obj->active & other_rings) {
- ret = i915_gem_object_sync(obj, req->engine, &req);
+ ret = i915_gem_object_sync(obj, engine, &req);
if (ret)
return ret;
}
@@ -701,9 +698,11 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
{
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_context *ctx = request->ctx;
int ret = 0;
- request->ringbuf = request->ctx->engine[request->engine->id].ringbuf;
+ request->ringbuf = ctx->engine[engine->id].ringbuf;
if (i915.enable_guc_submission) {
/*
@@ -718,8 +717,8 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
return ret;
}
- if (request->ctx != request->i915->kernel_context)
- ret = intel_lr_context_pin(request->ctx, request->engine);
+ if (ctx != request->i915->kernel_context)
+ ret = intel_lr_context_pin(ctx, engine);
return ret;
}
@@ -781,6 +780,7 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
struct intel_ringbuffer *ringbuf = request->ringbuf;
struct drm_i915_private *dev_priv = request->i915;
struct intel_engine_cs *engine = request->engine;
+ struct intel_context *ctx = request->ctx;
intel_logical_ring_advance(ringbuf);
request->tail = ringbuf->tail;
@@ -798,12 +798,12 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
if (intel_engine_stopped(engine))
return 0;
- if (engine->last_context != request->ctx) {
+ if (engine->last_context != ctx) {
if (engine->last_context)
intel_lr_context_unpin(engine->last_context, engine);
- if (request->ctx != request->i915->kernel_context) {
- intel_lr_context_pin(request->ctx, engine);
- engine->last_context = request->ctx;
+ if (ctx != dev_priv->kernel_context) {
+ intel_lr_context_pin(ctx, engine);
+ engine->last_context = ctx;
} else {
engine->last_context = NULL;
}
@@ -948,9 +948,8 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas)
{
- struct drm_device *dev = params->dev;
struct intel_engine_cs *engine = params->engine;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = to_i915(params->dev);
struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
u64 exec_start;
int instp_mode;
@@ -1024,6 +1023,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
void intel_execlists_retire_requests(struct intel_engine_cs *engine)
{
+ struct intel_context *dctx = to_i915(engine->dev)->kernel_context;
struct drm_i915_gem_request *req, *tmp;
struct list_head retired_list;
@@ -1038,10 +1038,8 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine)
list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
struct intel_context *ctx = req->ctx;
- struct drm_i915_gem_object *ctx_obj =
- ctx->engine[engine->id].state;
- if (ctx_obj && (ctx != req->i915->kernel_context))
+ if (ctx != dctx && ctx->engine[engine->id].state)
intel_lr_context_unpin(ctx, engine);
list_del(&req->execlist_link);
@@ -1091,17 +1089,18 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
- struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
+ struct drm_i915_private *dev_priv = ctx->i915;
+ struct intel_engine_ctx *ectx = &ctx->engine[engine->id];
+ struct drm_i915_gem_object *ctx_obj = ectx->state;
+ struct intel_ringbuffer *ringbuf = ectx->ringbuf;
struct page *lrc_state_page;
uint32_t *lrc_reg_state;
int ret;
- WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
- PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+ PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret)
return ret;
@@ -1111,15 +1110,15 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
goto unpin_ctx_obj;
}
- ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf);
+ ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
if (ret)
goto unpin_ctx_obj;
- ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+ ectx->lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
intel_lr_context_descriptor_update(ctx, engine);
lrc_reg_state = kmap(lrc_state_page);
lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
- ctx->engine[engine->id].lrc_reg_state = lrc_reg_state;
+ ectx->lrc_reg_state = lrc_reg_state;
ctx_obj->dirty = true;
/* Invalidate GuC TLB. */
@@ -1137,9 +1136,10 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
static int intel_lr_context_pin(struct intel_context *ctx,
struct intel_engine_cs *engine)
{
+ struct intel_engine_ctx *ectx = &ctx->engine[engine->id];
int ret = 0;
- if (ctx->engine[engine->id].pin_count++ == 0) {
+ if (ectx->pin_count++ == 0) {
ret = intel_lr_context_do_pin(ctx, engine);
if (ret)
goto reset_pin_count;
@@ -1149,23 +1149,24 @@ static int intel_lr_context_pin(struct intel_context *ctx,
return ret;
reset_pin_count:
- ctx->engine[engine->id].pin_count = 0;
+ ectx->pin_count = 0;
return ret;
}
void intel_lr_context_unpin(struct intel_context *ctx,
struct intel_engine_cs *engine)
{
- struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
+ struct intel_engine_ctx *ectx = &ctx->engine[engine->id];
WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
- if (--ctx->engine[engine->id].pin_count == 0) {
- kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
- intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
- i915_gem_object_ggtt_unpin(ctx_obj);
- ctx->engine[engine->id].lrc_vma = NULL;
- ctx->engine[engine->id].lrc_desc = 0;
- ctx->engine[engine->id].lrc_reg_state = NULL;
+
+ if (--ectx->pin_count == 0) {
+ intel_unpin_ringbuffer_obj(ectx->ringbuf);
+ i915_gem_object_ggtt_unpin(ectx->state);
+ kunmap(kmap_to_page(ectx->lrc_reg_state));
+ ectx->lrc_reg_state = NULL;
+ ectx->lrc_vma = NULL;
+ ectx->lrc_desc = 0;
i915_gem_context_unreference(ctx);
}
@@ -1176,8 +1177,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
int ret, i;
struct intel_engine_cs *engine = req->engine;
struct intel_ringbuffer *ringbuf = req->ringbuf;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = req->i915;
struct i915_workarounds *w = &dev_priv->workarounds;
if (w->count == 0)
@@ -1563,12 +1563,11 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ struct intel_engine_ctx *ectx = &dev_priv->kernel_context->engine[engine->id];
unsigned int next_context_status_buffer_hw;
- lrc_setup_hardware_status_page(engine,
- dev_priv->kernel_context->engine[engine->id].state);
+ lrc_setup_hardware_status_page(engine, ectx->state);
I915_WRITE_IMR(engine,
~(engine->irq_enable_mask | engine->irq_keep_mask));
@@ -2512,8 +2511,9 @@ void intel_lr_context_free(struct intel_context *ctx)
int i;
for (i = I915_NUM_ENGINES; --i >= 0; ) {
- struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
- struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
+ struct intel_engine_ctx *ectx = &ctx->engine[i];
+ struct intel_ringbuffer *ringbuf = ectx->ringbuf;
+ struct drm_i915_gem_object *ctx_obj = ectx->state;
if (!ctx_obj)
continue;
@@ -2523,7 +2523,7 @@ void intel_lr_context_free(struct intel_context *ctx)
i915_gem_object_ggtt_unpin(ctx_obj);
}
- WARN_ON(ctx->engine[i].pin_count);
+ WARN_ON(ectx->pin_count);
intel_ringbuffer_free(ringbuf);
drm_gem_object_unreference(&ctx_obj->base);
}
@@ -2603,13 +2603,14 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *engine)
{
struct drm_device *dev = engine->dev;
+ struct intel_engine_ctx *ectx = &ctx->engine[engine->id];
struct drm_i915_gem_object *ctx_obj;
uint32_t context_size;
struct intel_ringbuffer *ringbuf;
int ret;
WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
- WARN_ON(ctx->engine[engine->id].state);
+ WARN_ON(ectx->state);
context_size = round_up(intel_lr_context_size(engine), 4096);
@@ -2634,8 +2635,8 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
goto error_ringbuf;
}
- ctx->engine[engine->id].ringbuf = ringbuf;
- ctx->engine[engine->id].state = ctx_obj;
+ ectx->ringbuf = ringbuf;
+ ectx->state = ctx_obj;
if (ctx != ctx->i915->kernel_context && engine->init_context) {
struct drm_i915_gem_request *req;
@@ -2662,23 +2663,22 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
intel_ringbuffer_free(ringbuf);
error_deref_obj:
drm_gem_object_unreference(&ctx_obj->base);
- ctx->engine[engine->id].ringbuf = NULL;
- ctx->engine[engine->id].state = NULL;
+ ectx->ringbuf = NULL;
+ ectx->state = NULL;
return ret;
}
void intel_lr_context_reset(struct drm_device *dev,
- struct intel_context *ctx)
+ struct intel_context *ctx)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
int i;
for_each_engine(engine, dev_priv, i) {
- struct drm_i915_gem_object *ctx_obj =
- ctx->engine[engine->id].state;
- struct intel_ringbuffer *ringbuf =
- ctx->engine[engine->id].ringbuf;
+ struct intel_engine_ctx *ectx = &ctx->engine[engine->id];
+ struct drm_i915_gem_object *ctx_obj = ectx->state;
+ struct intel_ringbuffer *ringbuf = ectx->ringbuf;
uint32_t *reg_state;
struct page *page;