@@ -300,7 +300,7 @@ intel_dpt_create(struct intel_framebuffer *fb)
vm->vma_ops.bind_vma = dpt_bind_vma;
vm->vma_ops.unbind_vma = dpt_unbind_vma;
- vm->pte_encode = gen8_ggtt_pte_encode;
+ vm->pte_encode = vm->gt->ggtt->vm.pte_encode;
dpt->obj = dpt_obj;
dpt->obj->is_dpt = true;
@@ -55,6 +55,34 @@ static u64 gen8_pte_encode(dma_addr_t addr,
return pte;
}
+static u64 mtl_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~GEN8_PAGE_RW;
+
+ if (flags & PTE_LM)
+ pte |= GEN12_PPGTT_PTE_LM;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ pte |= GEN12_PPGTT_PTE_PAT1;
+ break;
+ case I915_CACHE_LLC:
+ case I915_CACHE_L3_LLC:
+ pte |= GEN12_PPGTT_PTE_PAT0 | GEN12_PPGTT_PTE_PAT1;
+ break;
+ case I915_CACHE_WT:
+ pte |= GEN12_PPGTT_PTE_PAT0;
+ break;
+ }
+
+ return pte;
+}
+
static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
struct drm_i915_private *i915 = ppgtt->vm.i915;
@@ -427,7 +455,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
u32 flags)
{
struct i915_page_directory *pd;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
+ const gen8_pte_t pte_encode = ppgtt->vm.pte_encode(0, cache_level, flags);
gen8_pte_t *vaddr;
pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2));
@@ -580,7 +608,7 @@ static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 flags)
{
- const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
+ const gen8_pte_t pte_encode = vm->pte_encode(0, cache_level, flags);
unsigned int rem = sg_dma_len(iter->sg);
u64 start = vma_res->start;
@@ -743,7 +771,7 @@ static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
GEM_BUG_ON(pt->is_compact);
vaddr = px_vaddr(pt);
- vaddr[gen8_pd_index(idx, 0)] = gen8_pte_encode(addr, level, flags);
+ vaddr[gen8_pd_index(idx, 0)] = vm->pte_encode(addr, level, flags);
drm_clflush_virt_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
}
@@ -773,7 +801,7 @@ static void __xehpsdv_ppgtt_insert_entry_lm(struct i915_address_space *vm,
}
vaddr = px_vaddr(pt);
- vaddr[gen8_pd_index(idx, 0) / 16] = gen8_pte_encode(addr, level, flags);
+ vaddr[gen8_pd_index(idx, 0) / 16] = vm->pte_encode(addr, level, flags);
}
static void xehpsdv_ppgtt_insert_entry(struct i915_address_space *vm,
@@ -820,8 +848,8 @@ static int gen8_init_scratch(struct i915_address_space *vm)
pte_flags |= PTE_LM;
vm->scratch[0]->encode =
- gen8_pte_encode(px_dma(vm->scratch[0]),
- I915_CACHE_NONE, pte_flags);
+ vm->pte_encode(px_dma(vm->scratch[0]),
+ I915_CACHE_NONE, pte_flags);
for (i = 1; i <= vm->top; i++) {
struct drm_i915_gem_object *obj;
@@ -963,7 +991,10 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
*/
ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
- ppgtt->vm.pte_encode = gen8_pte_encode;
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
+ ppgtt->vm.pte_encode = mtl_pte_encode;
+ else
+ ppgtt->vm.pte_encode = gen8_pte_encode;
ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
@@ -220,6 +220,33 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
}
}
+static u64 mtl_ggtt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
+
+ WARN_ON_ONCE(addr & ~GEN12_GGTT_PTE_ADDR_MASK);
+
+ if (flags & PTE_LM)
+ pte |= GEN12_GGTT_PTE_LM;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ pte |= MTL_GGTT_PTE_PAT1;
+ break;
+ case I915_CACHE_LLC:
+ case I915_CACHE_L3_LLC:
+ pte |= MTL_GGTT_PTE_PAT0 | MTL_GGTT_PTE_PAT1;
+ break;
+ case I915_CACHE_WT:
+ pte |= MTL_GGTT_PTE_PAT0;
+ break;
+ }
+
+ return pte;
+}
+
u64 gen8_ggtt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags)
@@ -247,7 +274,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
- gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
+ gen8_set_pte(pte, ggtt->vm.pte_encode(addr, level, flags));
ggtt->invalidate(ggtt);
}
@@ -257,8 +284,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level,
u32 flags)
{
- const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ const gen8_pte_t pte_encode = ggtt->vm.pte_encode(0, level, flags);
gen8_pte_t __iomem *gte;
gen8_pte_t __iomem *end;
struct sgt_iter iter;
@@ -981,7 +1008,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
- ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
+ ggtt->vm.pte_encode = mtl_ggtt_pte_encode;
+ else
+ ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
return ggtt_probe_common(ggtt, size);
}
@@ -88,9 +88,17 @@ typedef u64 gen8_pte_t;
#define BYT_PTE_SNOOPED_BY_CPU_CACHES REG_BIT(2)
#define BYT_PTE_WRITEABLE REG_BIT(1)
+#define MTL_PPGTT_PTE_PAT3 BIT_ULL(62)
#define GEN12_PPGTT_PTE_LM BIT_ULL(11)
-
-#define GEN12_GGTT_PTE_LM BIT_ULL(1)
+#define GEN12_PPGTT_PTE_PAT2 BIT_ULL(7)
+#define GEN12_PPGTT_PTE_PAT1 BIT_ULL(4)
+#define GEN12_PPGTT_PTE_PAT0 BIT_ULL(3)
+
+#define GEN12_GGTT_PTE_LM BIT_ULL(1)
+#define MTL_GGTT_PTE_PAT0 BIT_ULL(52)
+#define MTL_GGTT_PTE_PAT1 BIT_ULL(53)
+#define GEN12_GGTT_PTE_ADDR_MASK GENMASK_ULL(45, 12)
+#define MTL_GGTT_PTE_PAT_MASK GENMASK_ULL(53, 52)
#define GEN12_PDE_64K BIT(6)
#define GEN12_PTE_PS64 BIT(8)