@@ -2434,6 +2434,15 @@ int gen8_create_lr_context(struct i915_hw_context *ctx,
struct i915_hw_context *
gen8_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_engine *ring, const u32 ctx_id);
+static inline u32 intel_get_lr_contextid(struct drm_i915_gem_object *ctx_obj)
+{
+ u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj);
+
+ /* LRCA is required to be 4K aligned and LRCA context image is always at
+ * least 2 pages, so the more significant 19 bits are globally unique
+ * (which leaves one HwCtxId bit free) */
+ return lrca >> 13;
+}
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
@@ -47,6 +47,7 @@
#define GEN8_LR_CONTEXT_ALIGN 4096
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
+#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
#define CTX_LRI_HEADER_0 0x01
@@ -78,6 +79,100 @@
#define CTX_R_PWR_CLK_STATE 0x42
#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
+#define GEN8_CTX_VALID (1<<0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
+#define GEN8_CTX_FORCE_RESTORE (1<<2)
+#define GEN8_CTX_L3LLC_COHERENT (1<<5)
+#define GEN8_CTX_PRIVILEGE (1<<8)
+enum {
+ ADVANCED_CONTEXT=0,
+ LEGACY_CONTEXT,
+ ADVANCED_AD_CONTEXT,
+ LEGACY_64B_CONTEXT
+};
+#define GEN8_CTX_MODE_SHIFT 3
+enum {
+ FAULT_AND_HANG=0,
+ FAULT_AND_HALT, /* Debug only */
+ FAULT_AND_STREAM,
+ FAULT_AND_CONTINUE /* Unsupported */
+};
+#define GEN8_CTX_FAULT_SHIFT 6
+#define GEN8_CTX_LRCA_SHIFT 12
+#define GEN8_CTX_UNUSED_SHIFT 32
+
+static inline uint64_t get_descriptor(struct drm_i915_gem_object *ctx_obj)
+{
+ uint64_t desc;
+
+ BUG_ON(i915_gem_obj_ggtt_offset(ctx_obj) & 0xFFFFFFFF00000000ULL);
+
+ desc = GEN8_CTX_VALID;
+ desc |= LEGACY_CONTEXT << GEN8_CTX_MODE_SHIFT;
+ desc |= i915_gem_obj_ggtt_offset(ctx_obj);
+ desc |= GEN8_CTX_L3LLC_COHERENT;
+ desc |= (u64)intel_get_lr_contextid(ctx_obj) << GEN8_CTX_UNUSED_SHIFT;
+ desc |= GEN8_CTX_PRIVILEGE;
+
+ /* TODO: WaDisableLiteRestore when we start using semaphore
+ * signalling between Command Streamers */
+ /* desc |= GEN8_CTX_FORCE_RESTORE; */
+
+ return desc;
+}
+
+static void submit_execlist(struct intel_engine *ring,
+ struct drm_i915_gem_object *ctx_obj0,
+ struct drm_i915_gem_object *ctx_obj1)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ uint64_t temp = 0;
+ uint32_t desc[4];
+
+ /* XXX: You must always write both descriptors in the order below. */
+ if (ctx_obj1)
+ temp = get_descriptor(ctx_obj1);
+ else
+ temp = 0;
+ desc[1] = (u32)(temp >> 32);
+ desc[0] = (u32)temp;
+
+ temp = get_descriptor(ctx_obj0);
+ desc[3] = (u32)(temp >> 32);
+ desc[2] = (u32)temp;
+
+ I915_WRITE(RING_ELSP(ring), desc[1]);
+ I915_WRITE(RING_ELSP(ring), desc[0]);
+ I915_WRITE(RING_ELSP(ring), desc[3]);
+ /* The context is automatically loaded after the following */
+ I915_WRITE(RING_ELSP(ring), desc[2]);
+
+ /* ELSP is a write only register, so this serves as a posting read */
+ POSTING_READ(RING_EXECLIST_STATUS(ring));
+}
+
+static int gen8_switch_context(struct intel_engine *ring,
+ struct i915_hw_context *to0, u32 tail0,
+ struct i915_hw_context *to1, u32 tail1)
+{
+ struct drm_i915_gem_object *ctx_obj0;
+ struct drm_i915_gem_object *ctx_obj1 = NULL;
+
+ ctx_obj0 = to0->engine[ring->id].obj;
+ BUG_ON(!ctx_obj0);
+ BUG_ON(!i915_gem_obj_is_pinned(ctx_obj0));
+
+ if (to1) {
+ ctx_obj1 = to1->engine[ring->id].obj;
+ BUG_ON(!ctx_obj1);
+ BUG_ON(!i915_gem_obj_is_pinned(ctx_obj1));
+ }
+
+ submit_execlist(ring, ctx_obj0, ctx_obj1);
+
+ return 0;
+}
+
struct i915_hw_context *
gen8_gem_validate_context(struct drm_device *dev, struct drm_file *file,
struct intel_engine *ring, const u32 ctx_id)