@@ -814,7 +814,7 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
return 0;
range_debug(range, "PAGE FAULT");
- dpagemap = xe_tile_local_pagemap(tile);
+ dpagemap = vma->svm.pref_dpagemap ? : xe_tile_local_pagemap(tile);
/* XXX: Add migration policy, for now migrate range once */
if (!range->skip_migrate && range->base.flags.migrate_devmem &&
@@ -1271,4 +1271,20 @@ xe_pagemap_find_or_create(struct xe_device *xe, struct xe_pagemap_cache *cache,
mutex_unlock(&cache->mutex);
return xpagemap;
}
+
#endif
+
+/**
+ * xe_svm_vma_fini() - Finalize the svm part of a vma
+ * @svma: The struct xe_svm_vma to finalize
+ *
+ * Release the resources associated with the svm
+ * metadata of a gpu vma.
+ */
+void xe_svm_vma_fini(struct xe_svm_vma *svma)
+{
+ if (svma->pref_dpagemap) {
+ drm_pagemap_put(svma->pref_dpagemap);
+ svma->pref_dpagemap = NULL;
+ }
+}
@@ -24,6 +24,16 @@ struct xe_tile;
struct xe_vm;
struct xe_vma;
+/**
+ * struct xe_svm_vma - VMA svm metadata
+ * @pref_dpagemap: Reference-counted pointer to the drm_pagemap preferred
+ * for migration on a SVM page-fault. The pointer is protected by the
+ * vm lock.
+ */
+struct xe_svm_vma {
+ struct drm_pagemap *pref_dpagemap;
+};
+
/** struct xe_svm_range - SVM range */
struct xe_svm_range {
/** @base: base drm_gpusvm_range */
@@ -124,10 +134,18 @@ static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
#define xe_svm_notifier_unlock(vm__) \
drm_gpusvm_notifier_unlock(&(vm__)->svm.gpusvm)
+void xe_svm_vma_fini(struct xe_svm_vma *svma);
+
struct xe_pagemap *
xe_pagemap_create(struct xe_device *xe, struct xe_pagemap_cache *cache,
struct xe_vram_region *vr);
+static inline void xe_svm_vma_assign_dpagemap(struct xe_svm_vma *svma,
+ struct drm_pagemap *dpagemap)
+{
+ svma->pref_dpagemap = drm_pagemap_get(dpagemap);
+}
+
#else
#include <linux/interval_tree.h>
@@ -213,6 +231,10 @@ static inline void xe_svm_notifier_unlock(struct xe_vm *vm)
{
}
+#define xe_svm_vma_fini(...) do {} while (0)
+
+#define xe_svm_vma_assign_dpagemap(...) do {} while (0)
+
#endif
#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
@@ -1246,6 +1246,8 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
{
struct xe_vm *vm = xe_vma_vm(vma);
+ xe_svm_vma_fini(&vma->svm);
+
if (vma->ufence) {
xe_sync_ufence_put(vma->ufence);
vma->ufence = NULL;
@@ -2516,6 +2518,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
if (IS_ERR(vma))
return PTR_ERR(vma);
+ xe_svm_vma_assign_dpagemap(&vma->svm, old->svm.pref_dpagemap);
op->remap.prev = vma;
/*
@@ -2547,6 +2550,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return PTR_ERR(vma);
op->remap.next = vma;
+ xe_svm_vma_assign_dpagemap(&vma->svm, old->svm.pref_dpagemap);
/*
* Userptr creates a new SG mapping so
@@ -17,6 +17,7 @@
#include "xe_device_types.h"
#include "xe_pt_types.h"
#include "xe_range_fence.h"
+#include "xe_svm.h"
struct xe_bo;
struct xe_svm_range;
@@ -128,6 +129,11 @@ struct xe_vma {
* Needs to be signalled before UNMAP can be processed.
*/
struct xe_user_fence *ufence;
+
+#if IS_ENABLED(CONFIG_DRM_XE_GPUSVM)
+ /** @svm: SVM metadata attached to the vma. */
+ struct xe_svm_vma svm;
+#endif
};
/**
Introduce a preferred dpagemap, that can override the default. The default is still the local tile vram dpagemap. The preferred pagemap is intended to be set from user-space. Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> --- drivers/gpu/drm/xe/xe_svm.c | 18 +++++++++++++++++- drivers/gpu/drm/xe/xe_svm.h | 22 ++++++++++++++++++++++ drivers/gpu/drm/xe/xe_vm.c | 4 ++++ drivers/gpu/drm/xe/xe_vm_types.h | 6 ++++++ 4 files changed, 49 insertions(+), 1 deletion(-)