@@ -469,7 +469,10 @@ enum i915_map_type i915_coherent_map_type(struct drm_i915_private *i915,
struct drm_i915_gem_object *obj,
bool always_coherent)
{
- if (i915_gem_object_is_lmem(obj))
+ /*
+ * Wa_22016122933: always return I915_MAP_WC for MTL
+ */
+ if (i915_gem_object_is_lmem(obj) || IS_METEORLAKE(i915))
return I915_MAP_WC;
if (HAS_LLC(i915) || always_coherent)
return I915_MAP_WB;
@@ -110,6 +110,13 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
if (obj->base.size < gsc->fw.size)
return -ENOSPC;
+ /*
+ * Wa_22016122933: For MTL the shared memory needs to be mapped
+ * as WC on CPU side and UC (PAT index 2) on GPU side
+ */
+ if (IS_METEORLAKE(i915))
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
dst = i915_gem_object_pin_map_unlocked(obj,
i915_coherent_map_type(i915, obj, true));
if (IS_ERR(dst))
@@ -125,6 +132,12 @@ static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
memset(dst, 0, obj->base.size);
memcpy(dst, src, gsc->fw.size);
+ /*
+ * Wa_22016122933: Making sure the data in dst is
+ * visible to GSC right away
+ */
+ intel_guc_write_barrier(>->uc.guc);
+
i915_gem_object_unpin_map(gsc->fw.obj);
i915_gem_object_unpin_map(obj);
@@ -743,6 +743,13 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(obj))
return ERR_CAST(obj);
+ /*
+ * Wa_22016122933: For MTL the shared memory needs to be mapped
+ * as WC on CPU side and UC (PAT index 2) on GPU side
+ */
+ if (IS_METEORLAKE(gt->i915))
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
if (IS_ERR(vma))
goto err;
@@ -415,12 +415,6 @@ static int ct_write(struct intel_guc_ct *ct,
}
GEM_BUG_ON(tail > size);
- /*
- * make sure H2G buffer update and LRC tail update (if this triggering a
- * submission) are visible before updating the descriptor tail
- */
- intel_guc_write_barrier(ct_to_guc(ct));
-
/* update local copies */
ctb->tail = tail;
GEM_BUG_ON(atomic_read(&ctb->space) < len + GUC_CTB_HDR_LEN);
@@ -429,6 +423,12 @@ static int ct_write(struct intel_guc_ct *ct,
/* now update descriptor */
WRITE_ONCE(desc->tail, tail);
+ /*
+ * make sure H2G buffer update and LRC tail update (if this triggering a
+ * submission) are visible before updating the descriptor tail
+ */
+ intel_guc_write_barrier(ct_to_guc(ct));
+
return 0;
corrupted:
@@ -902,6 +902,12 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
/* now update descriptor */
WRITE_ONCE(desc->head, head);
+ /*
+ * Wa_22016122933: Making sure the head update is
+ * visible to GuC right away
+ */
+ intel_guc_write_barrier(ct_to_guc(ct));
+
return available - len;
corrupted: