@@ -70,10 +70,9 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
return has_full_ppgtt ? 2 : has_aliasing_ppgtt ? 1 : 0;
}
-
-static void ppgtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
+static int ppgtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
static void ppgtt_unbind_vma(struct i915_vma *vma);
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
@@ -231,37 +230,78 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
(px)->page, 0, 4096, \
PCI_DMA_BIDIRECTIONAL))
-static void free_pt_single(struct i915_pagetab *pt, struct drm_device *dev)
+static void __free_pt_single(struct i915_pagetab *pt, struct drm_device *dev,
+ int scratch)
{
+ if (WARN(scratch ^ pt->scratch,
+ "Tried to free scratch = %d. Is scratch = %d\n",
+ scratch, pt->scratch))
+ return;
+
if (WARN_ON(!pt->page))
return;
+ if (!scratch) {
+ const size_t count = INTEL_INFO(dev)->gen >= 8 ?
+ GEN8_PTES_PER_PT : GEN6_PTES_PER_PT;
+ WARN(!bitmap_empty(pt->used_ptes, count),
+ "Free page table with %d used pages\n",
+ bitmap_weight(pt->used_ptes, count));
+ }
+
i915_dma_unmap_single(pt, dev);
__free_page(pt->page);
+ kfree(pt->used_ptes);
kfree(pt);
}
+#define free_pt_single(pt, dev) \
+ __free_pt_single(pt, dev, false)
+#define free_pt_scratch(pt, dev) \
+ __free_pt_single(pt, dev, true)
+
static struct i915_pagetab *alloc_pt_single(struct drm_device *dev)
{
struct i915_pagetab *pt;
- int ret;
+ const size_t count = INTEL_INFO(dev)->gen >= 8 ?
+ GEN8_PTES_PER_PT : GEN6_PTES_PER_PT;
+ int ret = -ENOMEM;
pt = kzalloc(sizeof(*pt), GFP_KERNEL);
if (!pt)
return ERR_PTR(-ENOMEM);
+ pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
+ GFP_KERNEL);
+
+ if (!pt->used_ptes)
+ goto fail_bitmap;
+
pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!pt->page) {
- kfree(pt);
- return ERR_PTR(-ENOMEM);
- }
+ if (!pt->page)
+ goto fail_page;
ret = i915_dma_map_px_single(pt, dev);
- if (ret) {
- __free_page(pt->page);
- kfree(pt);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto fail_dma;
+
+ return pt;
+
+fail_dma:
+ __free_page(pt->page);
+fail_page:
+ kfree(pt->used_ptes);
+fail_bitmap:
+ kfree(pt);
+
+ return ERR_PTR(ret);
+}
+
+static inline struct i915_pagetab *alloc_pt_scratch(struct drm_device *dev)
+{
+ struct i915_pagetab *pt = alloc_pt_single(dev);
+ if (!IS_ERR(pt))
+ pt->scratch = 1;
return pt;
}
@@ -380,7 +420,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
int used_pd = ppgtt->num_pd_entries / I915_PDES_PER_PD;
for (i = used_pd - 1; i >= 0; i--) {
- dma_addr_t addr = ppgtt->pdp.pagedir[i]->daddr;
+ dma_addr_t addr = ppgtt->pdp.pagedirs[i]->daddr;
ret = gen8_write_pdp(ring, i, addr);
if (ret)
return ret;
@@ -407,7 +447,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
I915_CACHE_LLC, use_scratch);
while (num_entries) {
- struct i915_pagedir *pd = ppgtt->pdp.pagedir[pdpe];
+ struct i915_pagedir *pd = ppgtt->pdp.pagedirs[pdpe];
struct i915_pagetab *pt = pd->page_tables[pde];
struct page *page_table = pt->page;
@@ -454,7 +494,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
break;
if (pt_vaddr == NULL) {
- struct i915_pagedir *pd = ppgtt->pdp.pagedir[pdpe];
+ struct i915_pagedir *pd = ppgtt->pdp.pagedirs[pdpe];
struct i915_pagetab *pt = pd->page_tables[pde];
struct page *page_table = pt->page;
pt_vaddr = kmap_atomic(page_table);
@@ -500,8 +540,8 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
int i;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
- gen8_free_page_tables(ppgtt->pdp.pagedir[i], ppgtt->base.dev);
- free_pd_single(ppgtt->pdp.pagedir[i], ppgtt->base.dev);
+ gen8_free_page_tables(ppgtt->pdp.pagedirs[i], ppgtt->base.dev);
+ free_pd_single(ppgtt->pdp.pagedirs[i], ppgtt->base.dev);
}
}
@@ -518,7 +558,7 @@ static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
int i, ret;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
- ret = alloc_pt_range(ppgtt->pdp.pagedir[i],
+ ret = alloc_pt_range(ppgtt->pdp.pagedirs[i],
0, I915_PDES_PER_PD, ppgtt->base.dev);
if (ret)
goto unwind_out;
@@ -528,7 +568,7 @@ static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
unwind_out:
while (i--)
- gen8_free_page_tables(ppgtt->pdp.pagedir[i], ppgtt->base.dev);
+ gen8_free_page_tables(ppgtt->pdp.pagedirs[i], ppgtt->base.dev);
return -ENOMEM;
}
@@ -539,8 +579,8 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
int i;
for (i = 0; i < max_pdp; i++) {
- ppgtt->pdp.pagedir[i] = alloc_pd_single(ppgtt->base.dev);
- if (IS_ERR(ppgtt->pdp.pagedir[i]))
+ ppgtt->pdp.pagedirs[i] = alloc_pd_single(ppgtt->base.dev);
+ if (IS_ERR(ppgtt->pdp.pagedirs[i]))
goto unwind_out;
}
@@ -551,7 +591,7 @@ static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
unwind_out:
while (i--)
- free_pd_single(ppgtt->pdp.pagedir[i],
+ free_pd_single(ppgtt->pdp.pagedirs[i],
ppgtt->base.dev);
return -ENOMEM;
@@ -613,9 +653,9 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
* will never need to touch the PDEs again.
*/
for (i = 0; i < max_pdp; i++) {
- struct i915_pagedir *pd = ppgtt->pdp.pagedir[i];
+ struct i915_pagedir *pd = ppgtt->pdp.pagedirs[i];
gen8_ppgtt_pde_t *pd_vaddr;
- pd_vaddr = kmap_atomic(ppgtt->pdp.pagedir[i]->page);
+ pd_vaddr = kmap_atomic(ppgtt->pdp.pagedirs[i]->page);
for (j = 0; j < I915_PDES_PER_PD; j++) {
struct i915_pagetab *pt = pd->page_tables[j];
dma_addr_t addr = pt->daddr;
@@ -713,15 +753,13 @@ static void gen6_map_single(struct i915_pagedir *pd,
/* Map all the page tables found in the ppgtt structure to incrementing page
* directories. */
static void gen6_map_page_range(struct drm_i915_private *dev_priv,
- struct i915_pagedir *pd, unsigned pde, size_t n)
+ struct i915_pagedir *pd, uint32_t start, uint32_t length)
{
- if (WARN_ON(pde + n > I915_PDES_PER_PD))
- n = I915_PDES_PER_PD - pde;
-
- n += pde;
+ struct i915_pagetab *pt;
+ uint32_t pde, temp;
- for (; pde < n; pde++)
- gen6_map_single(pd, pde, pd->page_tables[pde]);
+ gen6_for_each_pde(pt, pd, start, length, temp, pde)
+ gen6_map_single(pd, pde, pt);
/* Make sure write is complete before other code can use this page
* table. Also require for WC mapped PTEs */
@@ -927,6 +965,51 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
kunmap_atomic(pt_vaddr);
}
+static int gen6_alloc_va_range(struct i915_address_space *vm,
+ uint64_t start, uint64_t length)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_pagetab *pt;
+ uint32_t pde, temp;
+
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+ int j;
+
+ DECLARE_BITMAP(tmp_bitmap, GEN6_PTES_PER_PT);
+ bitmap_zero(tmp_bitmap, GEN6_PTES_PER_PT);
+ bitmap_set(tmp_bitmap, gen6_pte_index(start),
+ gen6_pte_count(start, length));
+
+ /* TODO: To be done in the next patch. Map the page/insert
+ * entries here */
+ for_each_set_bit(j, tmp_bitmap, GEN6_PTES_PER_PT) {
+ if (test_bit(j, pt->used_ptes)) {
+ /* Check that we're changing cache levels */
+ }
+ }
+
+ bitmap_or(pt->used_ptes, pt->used_ptes, tmp_bitmap,
+ GEN6_PTES_PER_PT);
+ }
+
+ return 0;
+}
+
+static void gen6_teardown_va_range(struct i915_address_space *vm,
+ uint64_t start, uint64_t length)
+{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
+ struct i915_pagetab *pt;
+ uint32_t pde, temp;
+
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
+ bitmap_clear(pt->used_ptes, gen6_pte_index(start),
+ gen6_pte_count(start, length));
+ }
+}
+
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
@@ -934,6 +1017,7 @@ static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
for (i = 0; i < ppgtt->num_pd_entries; i++)
free_pt_single(ppgtt->pd.page_tables[i], ppgtt->base.dev);
+ free_pt_scratch(ppgtt->scratch_pt, ppgtt->base.dev);
free_pd_single(&ppgtt->pd, ppgtt->base.dev);
}
@@ -959,6 +1043,9 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+ ppgtt->scratch_pt = alloc_pt_scratch(ppgtt->base.dev);
+ if (IS_ERR(ppgtt->scratch_pt))
+ return PTR_ERR(ppgtt->scratch_pt);
alloc:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
&ppgtt->node, GEN6_PD_SIZE,
@@ -972,20 +1059,25 @@ alloc:
0, dev_priv->gtt.base.total,
0);
if (ret)
- return ret;
+ goto err_out;
retried = true;
goto alloc;
}
if (ret)
- return ret;
+ goto err_out;
+
if (ppgtt->node.start < dev_priv->gtt.mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
ppgtt->num_pd_entries = I915_PDES_PER_PD;
return 0;
+
+err_out:
+ free_pt_scratch(ppgtt->scratch_pt, ppgtt->base.dev);
+ return ret;
}
static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
@@ -1027,6 +1119,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
if (ret)
return ret;
+ ppgtt->base.allocate_va_range = gen6_alloc_va_range;
+ ppgtt->base.teardown_va_range = gen6_teardown_va_range;
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
@@ -1040,7 +1134,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t);
- gen6_map_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->num_pd_entries);
+ gen6_map_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
ppgtt->node.size >> 20,
@@ -1156,17 +1250,28 @@ void i915_ppgtt_release(struct kref *kref)
kfree(ppgtt);
}
-static void
+static int
ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
{
+ int ret;
+
/* Currently applicable only to VLV */
if (vma->obj->gt_ro)
flags |= PTE_READ_ONLY;
+ if (vma->vm->allocate_va_range) {
+ ret = vma->vm->allocate_va_range(vma->vm,
+ vma->node.start,
+ vma->node.size);
+ if (ret)
+ return ret;
+ }
+
vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
cache_level, flags);
+ return 0;
}
static void ppgtt_unbind_vma(struct i915_vma *vma)
@@ -1175,6 +1280,9 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
vma->node.start,
vma->obj->base.size,
true);
+ if (vma->vm->teardown_va_range)
+ vma->vm->teardown_va_range(vma->vm,
+ vma->node.start, vma->node.size);
}
extern int intel_iommu_gfx_mapped;
@@ -1495,9 +1603,9 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
}
-static void i915_ggtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 unused)
+static int i915_ggtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 unused)
{
const unsigned long entry = vma->node.start >> PAGE_SHIFT;
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
@@ -1506,6 +1614,8 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
BUG_ON(!i915_is_ggtt(vma->vm));
intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
vma->obj->has_global_gtt_mapping = 1;
+
+ return 0;
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
@@ -1528,9 +1638,9 @@ static void i915_ggtt_unbind_vma(struct i915_vma *vma)
intel_gtt_clear_range(first, size);
}
-static void ggtt_bind_vma(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+static int ggtt_bind_vma(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1562,7 +1672,7 @@ static void ggtt_bind_vma(struct i915_vma *vma,
}
if (!(flags & ALIASING_BIND))
- return;
+ return 0;
if (dev_priv->mm.aliasing_ppgtt &&
(!obj->has_aliasing_ppgtt_mapping ||
@@ -1574,6 +1684,8 @@ static void ggtt_bind_vma(struct i915_vma *vma,
cache_level, flags);
vma->obj->has_aliasing_ppgtt_mapping = 1;
}
+
+ return 0;
}
static void ggtt_unbind_vma(struct i915_vma *vma)
@@ -172,9 +172,33 @@ struct i915_vma {
/* Only use this if you know you want a strictly aliased binding */
#define ALIASING_BIND (1<<1)
#define PTE_READ_ONLY (1<<2)
- void (*bind_vma)(struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags);
+ int (*bind_vma)(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags);
+};
+
+
+struct i915_pagetab {
+ struct page *page;
+ dma_addr_t daddr;
+
+ unsigned long *used_ptes;
+ unsigned int scratch:1;
+};
+
+struct i915_pagedir {
+ struct page *page; /* NULL for GEN6-GEN7 */
+ union {
+ uint32_t pd_offset;
+ dma_addr_t daddr;
+ };
+
+ struct i915_pagetab *page_tables[I915_PDES_PER_PD];
+};
+
+struct i915_pagedirpo {
+ /* struct page *page; */
+ struct i915_pagedir *pagedirs[GEN8_LEGACY_PDPES];
};
struct i915_address_space {
@@ -216,6 +240,12 @@ struct i915_address_space {
gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
bool valid, u32 flags); /* Create a valid PTE */
+ int (*allocate_va_range)(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length);
+ void (*teardown_va_range)(struct i915_address_space *vm,
+ uint64_t start,
+ uint64_t length);
void (*clear_range)(struct i915_address_space *vm,
uint64_t start,
uint64_t length,
@@ -227,6 +257,29 @@ struct i915_address_space {
void (*cleanup)(struct i915_address_space *vm);
};
+struct i915_hw_ppgtt {
+ struct i915_address_space base;
+ struct kref ref;
+ struct drm_mm_node node;
+ unsigned num_pd_entries;
+ unsigned num_pd_pages; /* gen8+ */
+ union {
+ struct i915_pagedirpo pdp;
+ struct i915_pagedir pd;
+ };
+
+ struct i915_pagetab *scratch_pt;
+
+ struct drm_i915_file_private *file_priv;
+
+ gen6_gtt_pte_t __iomem *pd_addr;
+
+ int (*enable)(struct i915_hw_ppgtt *ppgtt);
+ int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
+ struct intel_engine_cs *ring);
+ void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
+};
+
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
@@ -255,46 +308,22 @@ struct i915_gtt {
unsigned long *mappable_end);
};
-struct i915_pagetab {
- struct page *page;
- dma_addr_t daddr;
-};
-
-struct i915_pagedir {
- struct page *page; /* NULL for GEN6-GEN7 */
- union {
- uint32_t pd_offset;
- dma_addr_t daddr;
- };
-
- struct i915_pagetab *page_tables[I915_PDES_PER_PD]; /* PDEs */
-};
-
-struct i915_pagedirpo {
- /* struct page *page; */
- struct i915_pagedir *pagedir[GEN8_LEGACY_PDPES];
-};
-
-struct i915_hw_ppgtt {
- struct i915_address_space base;
- struct kref ref;
- struct drm_mm_node node;
- unsigned num_pd_entries;
- unsigned num_pd_pages; /* gen8+ */
- union {
- struct i915_pagedirpo pdp;
- struct i915_pagedir pd;
- };
-
- struct drm_i915_file_private *file_priv;
-
- gen6_gtt_pte_t __iomem *pd_addr;
-
- int (*enable)(struct i915_hw_ppgtt *ppgtt);
- int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
- struct intel_engine_cs *ring);
- void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
-};
+/* For each pde iterates over every pde between from start until start + length.
+ * If start, and start+length are not perfectly divisible, the macro will round
+ * down, and up as needed. The macro modifies pde, start, and length. Dev is
+ * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0,
+ * and length = 2G effectively iterates over every PDE in the system. On gen8+
+ * it simply iterates over every page directory entry in a page directory.
+ *
+ * XXX: temp is not actually needed, but it saves doing the ALIGN operation.
+ */
+#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \
+ for (iter = gen6_pde_index(start), pt = (pd)->page_tables[iter]; \
+ length > 0 && iter < I915_PDES_PER_PD; \
+ pt = (pd)->page_tables[++iter], \
+ temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \
+ temp = min(temp, (unsigned)length), \
+ start += temp, length -= temp)
static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
{
@@ -1719,14 +1719,14 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
reg_state[CTX_PDP1_LDW] = GEN8_RING_PDP_LDW(ring, 1);
reg_state[CTX_PDP0_UDW] = GEN8_RING_PDP_UDW(ring, 0);
reg_state[CTX_PDP0_LDW] = GEN8_RING_PDP_LDW(ring, 0);
- reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[3]->daddr);
- reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[3]->daddr);
- reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[2]->daddr);
- reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[2]->daddr);
- reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[1]->daddr);
- reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[1]->daddr);
- reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pdp.pagedir[0]->daddr);
- reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pdp.pagedir[0]->daddr);
+ reg_state[CTX_PDP3_UDW+1] = upper_32_bits(ppgtt->pdp.pagedirs[3]->daddr);
+ reg_state[CTX_PDP3_LDW+1] = lower_32_bits(ppgtt->pdp.pagedirs[3]->daddr);
+ reg_state[CTX_PDP2_UDW+1] = upper_32_bits(ppgtt->pdp.pagedirs[2]->daddr);
+ reg_state[CTX_PDP2_LDW+1] = lower_32_bits(ppgtt->pdp.pagedirs[2]->daddr);
+ reg_state[CTX_PDP1_UDW+1] = upper_32_bits(ppgtt->pdp.pagedirs[1]->daddr);
+ reg_state[CTX_PDP1_LDW+1] = lower_32_bits(ppgtt->pdp.pagedirs[1]->daddr);
+ reg_state[CTX_PDP0_UDW+1] = upper_32_bits(ppgtt->pdp.pagedirs[0]->daddr);
+ reg_state[CTX_PDP0_LDW+1] = lower_32_bits(ppgtt->pdp.pagedirs[0]->daddr);
if (ring->id == RCS) {
reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
reg_state[CTX_R_PWR_CLK_STATE] = 0x20c8;