@@ -5,12 +5,19 @@
#include <linux/mutex.h>
#include <linux/mm_types.h>
+#include <linux/interval_tree.h>
+#include <linux/container_of.h>
+#include <linux/types.h>
+#include <linux/migrate.h>
#include "xe_svm.h"
#include <linux/hmm.h>
#include <linux/scatterlist.h>
#include "xe_pt.h"
#include "xe_assert.h"
#include "xe_vm_types.h"
+#include "xe_gt.h"
+#include "xe_migrate.h"
+#include "xe_trace.h"
DEFINE_HASHTABLE(xe_svm_table, XE_MAX_SVM_PROCESS);
@@ -80,6 +80,9 @@ struct xe_svm_range {
};
vm_fault_t xe_devm_migrate_to_ram(struct vm_fault *vmf);
+int svm_migrate_range_to_vram(struct xe_svm_range *range,
+ struct vm_area_struct *vma,
+ struct xe_tile *tile);
void xe_destroy_svm(struct xe_svm *svm);
struct xe_svm *xe_create_svm(struct xe_vm *vm);
struct xe_svm *xe_lookup_svm_by_mm(struct mm_struct *mm);
@@ -229,3 +229,117 @@ vm_fault_t xe_devm_migrate_to_ram(struct vm_fault *vmf)
kvfree(buf);
return 0;
}
+
+
+/**
+ * svm_migrate_range_to_vram() - migrate backing store of a va range to vram
+ * Must be called with mmap_read_lock(mm) held.
+ * @range: the va range to migrate. Range should only belong to one vma.
+ * @vma: the vma that this range belongs to. @range can cover whole @vma
+ * or a sub-range of @vma.
+ * @tile: the destination tile which holds the new backing store of the range
+ *
+ * Returns: negative errno on faiure, 0 on success
+ */
+int svm_migrate_range_to_vram(struct xe_svm_range *range,
+ struct vm_area_struct *vma,
+ struct xe_tile *tile)
+{
+ struct mm_struct *mm = range->svm->mm;
+ unsigned long start = range->start;
+ unsigned long end = range->end;
+ unsigned long npages = (end - start) >> PAGE_SHIFT;
+ struct xe_mem_region *mr = &tile->mem.vram;
+ struct migrate_vma migrate = {
+ .vma = vma,
+ .start = start,
+ .end = end,
+ .pgmap_owner = tile->xe->drm.dev,
+ .flags = MIGRATE_VMA_SELECT_SYSTEM,
+ };
+ struct device *dev = tile->xe->drm.dev;
+ dma_addr_t *src_dma_addr;
+ struct dma_fence *fence;
+ struct page *src_page;
+ LIST_HEAD(blocks);
+ int ret = 0, i;
+ u64 dst_dpa;
+ void *buf;
+
+ mmap_assert_locked(mm);
+ xe_assert(tile->xe, xe_svm_range_belongs_to_vma(mm, range, vma));
+
+ buf = kvcalloc(npages, 2* sizeof(*migrate.src) + sizeof(*src_dma_addr),
+ GFP_KERNEL);
+ if(!buf)
+ return -ENOMEM;
+ migrate.src = buf;
+ migrate.dst = migrate.src + npages;
+ src_dma_addr = (dma_addr_t *) (migrate.dst + npages);
+ ret = xe_devm_alloc_pages(tile, npages, &blocks, migrate.dst);
+ if (ret)
+ goto kfree_buf;
+
+ ret = migrate_vma_setup(&migrate);
+ if (ret) {
+ drm_err(&tile->xe->drm, "vma setup returned %d for range [%lx - %lx]\n",
+ ret, start, end);
+ goto free_dst_pages;
+ }
+
+ trace_xe_svm_migrate_sram_to_vram(range);
+ /**FIXME: partial migration of a range
+ * print a warning for now. If this message
+ * is printed, we need to fall back to page by page
+ * migration: only migrate pages with MIGRATE_PFN_MIGRATE
+ */
+ if (migrate.cpages != npages)
+ drm_warn(&tile->xe->drm, "Partial migration for range [%lx - %lx], range is %ld pages, migrate only %ld pages\n",
+ start, end, npages, migrate.cpages);
+
+ /**Migrate page by page for now.
+ * Both source pages and destination pages can physically not contiguous,
+ * there is no good way to migrate multiple pages per blitter command.
+ */
+ for (i = 0; i < npages; i++) {
+ src_page = migrate_pfn_to_page(migrate.src[i]);
+ if (unlikely(!src_page || !(migrate.src[i] & MIGRATE_PFN_MIGRATE)))
+ goto free_dst_page;
+
+ xe_assert(tile->xe, !is_zone_device_page(src_page));
+ src_dma_addr[i] = dma_map_page(dev, src_page, 0, PAGE_SIZE, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, src_dma_addr[i]))) {
+ drm_warn(&tile->xe->drm, "dma map error for host pfn %lx\n", migrate.src[i]);
+ goto free_dst_page;
+ }
+ dst_dpa = vram_pfn_to_dpa(mr, migrate.dst[i]);
+ fence = xe_migrate_svm(tile->migrate, src_dma_addr[i], false,
+ dst_dpa, true, PAGE_SIZE);
+ if (IS_ERR(fence)) {
+ drm_warn(&tile->xe->drm, "migrate host page (pfn: %lx) to vram failed\n",
+ migrate.src[i]);
+ /**Migration is best effort. Even we failed here, we continue*/
+ goto free_dst_page;
+ }
+ /**FIXME: Use the first migration's out fence as the second migration's input fence,
+ * and so on. Only wait the out fence of last migration?
+ */
+ dma_fence_wait(fence, false);
+ dma_fence_put(fence);
+free_dst_page:
+ xe_devm_page_free(pfn_to_page(migrate.dst[i]));
+ }
+
+ for (i = 0; i < npages; i++)
+ if (!(dma_mapping_error(dev, src_dma_addr[i])))
+ dma_unmap_page(dev, src_dma_addr[i], PAGE_SIZE, DMA_TO_DEVICE);
+
+ migrate_vma_pages(&migrate);
+ migrate_vma_finalize(&migrate);
+free_dst_pages:
+ if (ret)
+ xe_devm_free_blocks(&blocks);
+kfree_buf:
+ kfree(buf);
+ return ret;
+}