@@ -145,3 +145,64 @@ int xe_svm_build_sg(struct hmm_range *range,
sg_mark_end(sg);
return 0;
}
+
+/** Populate physical pages of a virtual address range
+ * This function also read mmu notifier sequence # (
+ * mmu_interval_read_begin), for the purpose of later
+ * comparison (through mmu_interval_read_retry).
+ * This must be called with mmap read or write lock held.
+ *
+ * This function alloates hmm_range->hmm_pfns, it is caller's
+ * responsibility to free it.
+ *
+ * @svm_range: The svm range to populate
+ * @hmm_range: pointer to hmm_range struct. hmm_rang->hmm_pfns
+ * will hold the populated pfns.
+ * @write: populate pages with write permission
+ *
+ * returns: 0 for succuss; negative error no on failure
+ */
+static int svm_populate_range(struct xe_svm_range *svm_range,
+ struct hmm_range *hmm_range, bool write)
+{
+ unsigned long timeout =
+ jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
+ unsigned long *pfns, flags = HMM_PFN_REQ_FAULT;
+ u64 npages;
+ int ret;
+
+ mmap_assert_locked(svm_range->svm->mm);
+
+ npages = ((svm_range->end - 1) >> PAGE_SHIFT) -
+ (svm_range->start >> PAGE_SHIFT) + 1;
+ pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
+ if (unlikely(!pfns))
+ return -ENOMEM;
+
+ if (write)
+ flags |= HMM_PFN_REQ_WRITE;
+
+ memset64((u64 *)pfns, (u64)flags, npages);
+ hmm_range->hmm_pfns = pfns;
+ hmm_range->notifier_seq = mmu_interval_read_begin(&svm_range->notifier);
+ hmm_range->notifier = &svm_range->notifier;
+ hmm_range->start = svm_range->start;
+ hmm_range->end = svm_range->end;
+ hmm_range->pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE;
+ hmm_range->dev_private_owner = svm_range->svm->vm->xe->drm.dev;
+
+ while (true) {
+ ret = hmm_range_fault(hmm_range);
+ if (time_after(jiffies, timeout))
+ goto free_pfns;
+
+ if (ret == -EBUSY)
+ continue;
+ break;
+ }
+
+free_pfns:
+ if (ret)
+ kvfree(pfns);
+ return ret;
+}