diff mbox series

[16/22] drm/xe/svm: Implement the mmu notifier range invalidate callback

Message ID 20231221043812.3783313-17-oak.zeng@intel.com (mailing list archive)
State New, archived
Headers show
Series XeKmd basic SVM support | expand

Commit Message

Zeng, Oak Dec. 21, 2023, 4:38 a.m. UTC
To mirror the CPU page table from GPU side, we register a mmu interval
notifier (in the coming patch of this series). Core mm call back to
GPU driver whenever there is a change to certain virtual address range,
i.e., range is released or unmapped by user etc.

This patch implemented the GPU driver callback function for such mmu
interval notifier. In the callback function we unbind the address
range from GPU if it is unmapped from CPU side, thus we mirror the
CPU page table change.

We also unregister the mmu interval notifier from core mm in the case
of munmap event. But we can't unregister mmu notifier directly from the
mmu notifier range invalidation callback function. The reason is, during
a munmap (see kernel function vm_munmap), a mmap_write_lock is held, but
unregister mmu notifier (calling mmu_interval_notifier_remove) also requires
a mmap_write_lock of the current process.

Thus, we start a kernel worker to unregister mmu interval notifier on a
MMU_NOTIFY_UNMAP event.

Signed-off-by: Oak Zeng <oak.zeng@intel.com>
Co-developed-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@intel.com>
Cc: Brian Welty <brian.welty@intel.com>
---
 drivers/gpu/drm/xe/xe_svm.c       |  1 +
 drivers/gpu/drm/xe/xe_svm.h       |  1 -
 drivers/gpu/drm/xe/xe_svm_range.c | 37 ++++++++++++++++++++++++++++++-
 3 files changed, 37 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index ab3cc2121869..6393251c0051 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -8,6 +8,7 @@ 
 #include "xe_svm.h"
 #include <linux/hmm.h>
 #include <linux/scatterlist.h>
+#include "xe_pt.h"
 
 DEFINE_HASHTABLE(xe_svm_table, XE_MAX_SVM_PROCESS);
 
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 90e665f2bfc6..0038f98c0cc7 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -54,7 +54,6 @@  struct xe_svm {
 struct xe_svm_range {
 	/** @svm: pointer of the xe_svm that this range belongs to */
 	struct xe_svm *svm;
-
 	/** @notifier: The mmu interval notifer used to keep track of CPU
 	 * side address range change. Driver will get a callback with this
 	 * notifier if anything changed from CPU side, such as range is
diff --git a/drivers/gpu/drm/xe/xe_svm_range.c b/drivers/gpu/drm/xe/xe_svm_range.c
index 286d5f7d6ecd..53dd3be7ab9f 100644
--- a/drivers/gpu/drm/xe/xe_svm_range.c
+++ b/drivers/gpu/drm/xe/xe_svm_range.c
@@ -10,6 +10,7 @@ 
 #include <linux/mutex.h>
 #include <linux/mm.h>
 #include "xe_svm.h"
+#include "xe_pt.h"
 
 /**
  * xe_svm_range_from_addr() - retrieve svm_range contains a virtual address
@@ -59,8 +60,42 @@  bool xe_svm_range_belongs_to_vma(struct mm_struct *mm,
 	return (vma1 == vma) && (vma2 == vma);
 }
 
+static bool xe_svm_range_invalidate(struct mmu_interval_notifier *mni,
+				      const struct mmu_notifier_range *range,
+				      unsigned long cur_seq)
+{
+	struct xe_svm_range *svm_range =
+		container_of(mni, struct xe_svm_range, notifier);
+	struct xe_svm *svm = svm_range->svm;
+	unsigned long length = range->end - range->start;
+
+	/*
+	 * MMU_NOTIFY_RELEASE is called upon process exit to notify driver
+	 * to release any process resources, such as zap GPU page table
+	 * mapping or unregister mmu notifier etc. We already clear GPU
+	 * page table  and unregister mmu notifier in in xe_destroy_svm,
+	 * upon process exit. So just simply return here.
+	 */
+	if (range->event == MMU_NOTIFY_RELEASE)
+		return true;
+
+	if (mmu_notifier_range_blockable(range))
+		mutex_lock(&svm->mutex);
+	else if (!mutex_trylock(&svm->mutex))
+		return false;
+
+	mmu_interval_set_seq(mni, cur_seq);
+	xe_invalidate_svm_range(svm->vm, range->start, length);
+	mutex_unlock(&svm->mutex);
+
+	if (range->event == MMU_NOTIFY_UNMAP)
+		queue_work(system_unbound_wq, &svm_range->unregister_notifier_work);
+
+	return true;
+}
+
 static const struct mmu_interval_notifier_ops xe_svm_mni_ops = {
-	.invalidate = NULL,
+	.invalidate = xe_svm_range_invalidate,
 };
 
 /**