diff mbox series

[17/22] drm/xe/svm: clean up svm range during process exit

Message ID 20231221043812.3783313-18-oak.zeng@intel.com (mailing list archive)
State New, archived
Headers show
Series XeKmd basic SVM support | expand

Commit Message

Zeng, Oak Dec. 21, 2023, 4:38 a.m. UTC
Clean up svm range during process exit: Zap GPU page table of
the svm process on process exit; unregister all the mmu interval
notifiers which are registered before; free svm range and svm
data structure.

Signed-off-by: Oak Zeng <oak.zeng@intel.com>
Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@intel.com>
Cc: Brian Welty <brian.welty@intel.com>
---
 drivers/gpu/drm/xe/xe_svm.c       | 24 ++++++++++++++++++++++++
 drivers/gpu/drm/xe/xe_svm.h       |  1 +
 drivers/gpu/drm/xe/xe_svm_range.c | 17 +++++++++++++++++
 3 files changed, 42 insertions(+)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 6393251c0051..5772bfcf7da4 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -9,6 +9,8 @@ 
 #include <linux/hmm.h>
 #include <linux/scatterlist.h>
 #include "xe_pt.h"
+#include "xe_assert.h"
+#include "xe_vm_types.h"
 
 DEFINE_HASHTABLE(xe_svm_table, XE_MAX_SVM_PROCESS);
 
@@ -19,9 +21,31 @@  DEFINE_HASHTABLE(xe_svm_table, XE_MAX_SVM_PROCESS);
  */
 void xe_destroy_svm(struct xe_svm *svm)
 {
+#define MAX_SVM_RANGE (1024*1024)
+	struct xe_svm_range **range_array;
+	struct interval_tree_node *node;
+	struct xe_svm_range *range;
+	int i = 0;
+
+	range_array = kzalloc(sizeof(struct xe_svm_range *) * MAX_SVM_RANGE,
+							GFP_KERNEL);
+	node = interval_tree_iter_first(&svm->range_tree, 0, ~0ULL);
+	while (node) {
+		range = container_of(node, struct xe_svm_range, inode);
+		xe_svm_range_prepare_destroy(range);
+		node = interval_tree_iter_next(node, 0, ~0ULL);
+		xe_assert(svm->vm->xe, i < MAX_SVM_RANGE);
+		range_array[i++] = range;
+	}
+
+	/** Free range (thus range->inode) while traversing above is not safe */
+	for(; i >= 0; i--)
+		kfree(range_array[i]);
+
 	hash_del_rcu(&svm->hnode);
 	mutex_destroy(&svm->mutex);
 	kfree(svm);
+	kfree(range_array);
 }
 
 /**
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 0038f98c0cc7..5b3bd2c064f5 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -90,6 +90,7 @@  bool xe_svm_range_belongs_to_vma(struct mm_struct *mm,
 								struct vm_area_struct *vma);
 void xe_svm_range_unregister_mmu_notifier(struct xe_svm_range *range);
 int xe_svm_range_register_mmu_notifier(struct xe_svm_range *range);
+void xe_svm_range_prepare_destroy(struct xe_svm_range *range);
 
 int xe_svm_build_sg(struct hmm_range *range, struct sg_table *st);
 int xe_svm_devm_add(struct xe_tile *tile, struct xe_mem_region *mem);
diff --git a/drivers/gpu/drm/xe/xe_svm_range.c b/drivers/gpu/drm/xe/xe_svm_range.c
index 53dd3be7ab9f..dfb4660dc26f 100644
--- a/drivers/gpu/drm/xe/xe_svm_range.c
+++ b/drivers/gpu/drm/xe/xe_svm_range.c
@@ -165,3 +165,20 @@  int xe_svm_range_register_mmu_notifier(struct xe_svm_range *range)
 	range->mmu_notifier_registered = true;
 	return ret;
 }
+
+/**
+ * xe_svm_range_prepare_destroy() - prepare work to destroy a svm range
+ *
+ * @range: the svm range to destroy
+ *
+ * prepare for a svm range destroy: Zap this range from GPU, unregister mmu
+ * notifier.
+ */
+void xe_svm_range_prepare_destroy(struct xe_svm_range *range)
+{
+	struct xe_vm *vm = range->svm->vm;
+	unsigned long length = range->end - range->start;
+
+	xe_invalidate_svm_range(vm, range->start, length);
+	xe_svm_range_unregister_mmu_notifier(range);
+}