diff mbox series

[RFC,6/6] nouveau: Add SVM support for migrating file-backed pages to the GPU

Message ID e935f82d29f8289862f611c15d28c4504aca2a82.1742099301.git-series.apopple@nvidia.com (mailing list archive)
State New
Headers show
Series Allow file-backed or shared device private pages | expand

Commit Message

Alistair Popple March 16, 2025, 4:29 a.m. UTC
Currently SVM for Nouveau only allows private anonymous memory to be
migrated to the GPU. Add support for migrating file-backed pages by
implementing the new migrate_to_pagecache() callback to copy pages back
as required.

Signed-off-by: Alistair Popple <apopple@nvidia.com>
---
 drivers/gpu/drm/nouveau/nouveau_dmem.c | 24 ++++++++++++++++++++++++
 1 file changed, 24 insertions(+)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index 1a07256..f9a5103 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -218,9 +218,33 @@  static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
 	return ret;
 }
 
+static int nouveau_dmem_migrate_to_pagecache(struct page *page,
+						struct page *newpage)
+{
+	struct nouveau_drm *drm = page_to_drm(page);
+	struct nouveau_dmem *dmem = drm->dmem;
+	dma_addr_t dma_addr = 0;
+	struct nouveau_svmm *svmm;
+	struct nouveau_fence *fence;
+
+	set_page_dirty(newpage);
+	svmm = page->zone_device_data;
+	mutex_lock(&svmm->mutex);
+
+	/* TODO: Error handling */
+	WARN_ON_ONCE(nouveau_dmem_copy_one(drm, page, newpage, &dma_addr));
+	mutex_unlock(&svmm->mutex);
+	nouveau_fence_new(&fence, dmem->migrate.chan);
+	nouveau_dmem_fence_done(&fence);
+	dma_unmap_page(drm->dev->dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+
+	return 0;
+}
+
 static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
 	.page_free		= nouveau_dmem_page_free,
 	.migrate_to_ram		= nouveau_dmem_migrate_to_ram,
+	.migrate_to_pagecache   = nouveau_dmem_migrate_to_pagecache,
 };
 
 static int