@@ -209,6 +209,52 @@ void i915_error_free_ppgtt(struct i915_gpu_state *error, int idx)
free_page((unsigned long)e_pml4->storage);
}
+void i915_error_page_walk(struct i915_address_space *vm,
+ u64 offset,
+ gen8_pte_t *entry,
+ phys_addr_t *paddr)
+{
+ if (i915_is_ggtt(vm)) {
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ uint index = offset >> PAGE_SHIFT;
+
+ gen8_pte_t __iomem *pte =
+ (gen8_pte_t __iomem *)ggtt->gsm + index;
+
+ *entry = readq(pte);
+ *paddr = ggtt->gsm_paddr + index * sizeof(u64);
+ } else {
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_pml4 *pml4;
+ struct i915_page_directory_pointer *pdp;
+ struct i915_page_directory *pd;
+ struct i915_page_table *pt;
+ u32 pml4e, pdpe, pde, pte;
+ u64 *vaddr;
+
+ pml4e = gen8_pml4e_index(offset);
+ if (i915_vm_is_48bit(&ppgtt->base)) {
+ pml4 = &ppgtt->pml4;
+ pdp = pml4->pdps[pml4e];
+ } else {
+ GEM_BUG_ON(pml4e != 0);
+ pdp = &ppgtt->pdp;
+ }
+
+ pdpe = gen8_pdpe_index(offset);
+ pd = pdp->page_directory[pdpe];
+
+ pde = gen8_pde_index(offset);
+ pt = pd->page_table[pde];
+
+ pte = gen8_pte_index(offset);
+ vaddr = kmap_atomic(px_base(pt)->page);
+ *entry = vaddr[pte];
+ kunmap_atomic(vaddr);
+ *paddr = px_dma(pt) + pte * sizeof(u64);
+ }
+}
+
int i915_error_state_to_aub(struct drm_i915_error_state_buf *m,
const struct i915_gpu_state *error)
{
@@ -30,6 +30,10 @@ void i915_error_record_ppgtt(struct i915_gpu_state *error,
struct i915_address_space *vm,
int idx);
void i915_error_free_ppgtt(struct i915_gpu_state *error, int idx);
+void i915_error_page_walk(struct i915_address_space *vm,
+ u64 offset,
+ gen8_pte_t *entry,
+ phys_addr_t *paddr);
int i915_error_state_to_aub(struct drm_i915_error_state_buf *m,
const struct i915_gpu_state *error);
@@ -45,6 +49,13 @@ static inline void i915_error_free_ppgtt(struct i915_gpu_state *error, int idx)
{
}
+static inline void i915_error_page_walk(struct i915_address_space *vm,
+ u64 offset,
+ gen8_pte_t *entry,
+ phys_addr_t *paddr)
+{
+}
+
static inline int i915_error_state_to_aub(struct drm_i915_error_state_buf *m,
const struct i915_gpu_state *error)
{
@@ -995,7 +995,12 @@ struct i915_gpu_state {
u32 tiling:2;
int page_count;
int unused;
- u32 *pages[0];
+ struct drm_i915_error_page {
+ phys_addr_t pte_paddr;
+ gen8_pte_t pte;
+ phys_addr_t paddr;
+ u32 *storage;
+ } pages[0];
} *ringbuffer, *batchbuffer, *wa_batchbuffer,
*renderstate, *ctx, *hws_page;
@@ -179,7 +179,7 @@ static void i915_error_puts(struct drm_i915_error_state_buf *e, const char *str)
#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
#define err_puts(e, s) i915_error_puts(e, s)
-#ifdef CONFIG_DRM_I915_COMPRESS_ERROR
+#if defined(CONFIG_DRM_I915_COMPRESS_ERROR) && !defined(CONFIG_DRM_I915_AUB_CRASH_DUMP)
struct compress {
struct z_stream_s zstream;
@@ -227,7 +227,7 @@ static int compress_page(struct compress *c,
if (!page)
return -ENOMEM;
- dst->pages[dst->page_count++] = (void *)page;
+ dst->pages[dst->page_count++].storage = (void *)page;
zstream->next_out = (void *)page;
zstream->avail_out = PAGE_SIZE;
@@ -290,7 +290,7 @@ static int compress_page(struct compress *c,
ptr = (void *)page;
if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE))
memcpy(ptr, src, PAGE_SIZE);
- dst->pages[dst->page_count++] = ptr;
+ dst->pages[dst->page_count++].storage = ptr;
return 0;
}
@@ -539,7 +539,7 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
len = ascii85_encode_len(len);
for (i = 0; i < len; i++) {
- if (ascii85_encode(obj->pages[page][i], out))
+ if (ascii85_encode(obj->pages[page].storage[i], out))
err_puts(m, out);
else
err_puts(m, "z");
@@ -827,7 +827,7 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
return;
for (page = 0; page < obj->page_count; page++)
- free_page((unsigned long)obj->pages[page]);
+ free_page((unsigned long)obj->pages[page].storage);
kfree(obj);
}
@@ -901,7 +901,7 @@ void __i915_gpu_state_free(struct kref *error_ref)
num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT;
num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *),
+ dst = kmalloc(sizeof(*dst) + num_pages * sizeof(*dst->pages),
GFP_ATOMIC | __GFP_NOWARN);
if (!dst)
return NULL;
@@ -924,6 +924,14 @@ void __i915_gpu_state_free(struct kref *error_ref)
ggtt->base.insert_page(&ggtt->base, dma, slot,
I915_CACHE_NONE, 0);
+ if (INTEL_GEN(i915) >= 8) {
+ dst->pages[dst->page_count].paddr = dma;
+ i915_error_page_walk(vma->vm, dst->gtt_offset +
+ dst->page_count * PAGE_SIZE,
+ &dst->pages[dst->page_count].pte,
+ &dst->pages[dst->page_count].pte_paddr);
+ }
+
s = io_mapping_map_atomic_wc(&ggtt->mappable, slot);
ret = compress_page(&compress, (void __force *)s, dst);
io_mapping_unmap_atomic(s);
@@ -935,7 +943,7 @@ void __i915_gpu_state_free(struct kref *error_ref)
unwind:
while (dst->page_count--)
- free_page((unsigned long)dst->pages[dst->page_count]);
+ free_page((unsigned long)dst->pages[dst->page_count].storage);
kfree(dst);
dst = NULL;
@@ -1104,7 +1112,7 @@ static void gen8_record_semaphore_state(struct i915_gpu_state *error,
signal_offset =
(GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
- tmp = error->semaphore->pages[0];
+ tmp = error->semaphore->pages[0].storage;
idx = gen8_engine_sync_index(engine, to);
ee->semaphore_mboxes[idx] = tmp[signal_offset];
With every page of physical data we store: its physical address, the PTE that points to it and the physical address of the PTE itself. We will be using all this information later. Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> Cc: Chris Wilson <chris@chris-wsilon.co.uk> --- drivers/gpu/drm/i915/i915_aubcrash.c | 46 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_aubcrash.h | 11 +++++++++ drivers/gpu/drm/i915/i915_drv.h | 7 +++++- drivers/gpu/drm/i915/i915_gpu_error.c | 24 ++++++++++++------ 4 files changed, 79 insertions(+), 9 deletions(-)