@@ -335,6 +335,22 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
return full ? -ENOSPC : 0;
}
+static void save_pagefault_to_vm(struct xe_device *xe, struct xe_pagefault *pf)
+{
+ struct xe_vm *vm;
+ struct xe_pagefault *store;
+
+ vm = asid_to_vm(xe, pf->asid);
+ if (IS_ERR(vm))
+ return;
+
+ spin_lock(&vm->pfs.lock);
+ store = kzalloc(sizeof(*pf), GFP_KERNEL);
+ memcpy(store, pf, sizeof(*pf));
+ xe_vm_add_pf_entry(vm, store);
+ spin_unlock(&vm->pfs.lock);
+}
+
#define USM_QUEUE_MAX_RUNTIME_MS 20
static void pf_queue_work_func(struct work_struct *w)
@@ -353,6 +369,7 @@ static void pf_queue_work_func(struct work_struct *w)
ret = handle_pagefault(gt, &pf);
if (unlikely(ret)) {
print_pagefault(xe, &pf);
+ save_pagefault_to_vm(xe, &pf);
pf.fault_unsuccessful = 1;
drm_dbg(&xe->drm, "Fault response: Unsuccessful %d\n", ret);
}
@@ -746,6 +746,46 @@ int xe_vm_userptr_check_repin(struct xe_vm *vm)
list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN;
}
+static void free_pf_entry(struct xe_vm *vm, struct xe_vm_pf_entry *e)
+{
+ list_del(&e->list);
+ kfree(e->pf);
+ kfree(e);
+ vm->pfs.len--;
+}
+
+void xe_vm_add_pf_entry(struct xe_vm *vm, struct xe_pagefault *pf)
+{
+ struct xe_vm_pf_entry *e = NULL;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ xe_assert(vm->xe, e);
+
+ spin_lock(&vm->pfs.lock);
+ list_add_tail(&e->list, &vm->pfs.list);
+ vm->pfs.len++;
+ /**
+ * Limit the number of pfs in the pf list to prevent memory overuse.
+ */
+ if (vm->pfs.len > MAX_PFS) {
+ struct xe_vm_pf_entry *rem =
+ list_first_entry(&vm->pfs.list, struct xe_vm_pf_entry, list);
+
+ free_pf_entry(vm, rem);
+ }
+ spin_unlock(&vm->pfs.lock);
+}
+
+void xe_vm_remove_pf_entries(struct xe_vm *vm)
+{
+ struct xe_vm_pf_entry *e, *tmp;
+
+ spin_lock(&vm->pfs.lock);
+ list_for_each_entry_safe(e, tmp, &vm->pfs.list, list)
+ free_pf_entry(vm, e);
+ spin_unlock(&vm->pfs.lock);
+}
+
static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
{
int i;
@@ -1448,6 +1488,9 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
init_rwsem(&vm->userptr.notifier_lock);
spin_lock_init(&vm->userptr.invalidated_lock);
+ INIT_LIST_HEAD(&vm->pfs.list);
+ spin_lock_init(&vm->pfs.lock);
+
ttm_lru_bulk_move_init(&vm->lru_bulk_move);
INIT_WORK(&vm->destroy_work, vm_destroy_work_func);
@@ -1672,6 +1715,8 @@ void xe_vm_close_and_put(struct xe_vm *vm)
}
up_write(&xe->usm.lock);
+ xe_vm_remove_pf_entries(vm);
+
for_each_tile(tile, xe, id)
xe_range_fence_tree_fini(&vm->rftree[id]);
@@ -12,6 +12,8 @@
#include "xe_map.h"
#include "xe_vm_types.h"
+#define MAX_PFS 50
+
struct drm_device;
struct drm_printer;
struct drm_file;
@@ -244,6 +246,10 @@ int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
+void xe_vm_add_pf_entry(struct xe_vm *vm, struct xe_pagefault *pf);
+
+void xe_vm_remove_pf_entries(struct xe_vm *vm);
+
bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
@@ -18,6 +18,7 @@
#include "xe_range_fence.h"
struct xe_bo;
+struct xe_pagefault;
struct xe_sync_entry;
struct xe_user_fence;
struct xe_vm;
@@ -135,6 +136,13 @@ struct xe_userptr_vma {
struct xe_device;
+struct xe_vm_pf_entry {
+ /** @pf: observed pagefault */
+ struct xe_pagefault *pf;
+ /** @list: link into @xe_vm.pfs.list */
+ struct list_head list;
+};
+
struct xe_vm {
/** @gpuvm: base GPUVM used to track VMAs */
struct drm_gpuvm gpuvm;
@@ -274,6 +282,18 @@ struct xe_vm {
bool capture_once;
} error_capture;
+ /**
+ * @pfs: List of all pagefaults associated with this VM
+ */
+ struct {
+ /** @lock: lock protecting @bans.list */
+ spinlock_t lock;
+ /** @list: list of xe_exec_queue_ban_entry entries */
+ struct list_head list;
+ /** @len: length of @bans.list */
+ unsigned int len;
+ } pfs;
+
/**
* @tlb_flush_seqno: Required TLB flush seqno for the next exec.
* protected by the vm resv.
Add additional information to each VM so they can report up to the last 50 seen pagefaults. Only failed pagefaults are saved this way, as successful pagefaults should recover and not need to be reported to userspace. Signed-off-by: Jonathan Cavitt <jonathan.cavitt@intel.com> Suggested-by: Matthew Brost <matthew.brost@intel.com> --- drivers/gpu/drm/xe/xe_gt_pagefault.c | 17 +++++++++++ drivers/gpu/drm/xe/xe_vm.c | 45 ++++++++++++++++++++++++++++ drivers/gpu/drm/xe/xe_vm.h | 6 ++++ drivers/gpu/drm/xe/xe_vm_types.h | 20 +++++++++++++ 4 files changed, 88 insertions(+)