@@ -276,7 +276,8 @@ npages_in_range(unsigned long start, unsigned long end)
* The caller should hold a reference to the device memory allocation,
* and the reference is consumed by this function unless it returns with
* an error.
- * @mm: Pointer to the struct mm_struct.
+ * @mm: Pointer to the struct mm_struct. This pointer should hold a reference to
+ * the mm, and the mm should be locked on entry.
* @start: Start of the virtual address range to migrate.
* @end: End of the virtual address range to migrate.
* @pgmap_owner: Not used currently, since only system memory is considered.
@@ -814,3 +815,4 @@ int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
return err;
}
+EXPORT_SYMBOL(drm_pagemap_populate_mm);
@@ -3,12 +3,16 @@
* Copyright © 2024 Intel Corporation
*/
+#include <drm/drm_drv.h>
+
#include "xe_bo.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_migrate.h"
#include "xe_module.h"
+#include "xe_pm.h"
#include "xe_pt.h"
#include "xe_svm.h"
+#include "xe_tile.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_vm.h"
#include "xe_vm_types.h"
@@ -535,8 +539,10 @@ static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
{
struct xe_bo *bo = to_xe_bo(devmem_allocation);
+ struct xe_device *xe = xe_bo_device(bo);
xe_bo_put_async(bo);
+ xe_pm_runtime_put(xe);
}
static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset)
@@ -660,77 +666,66 @@ static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
return &tile->mem.vram;
}
-static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
- struct xe_svm_range *range,
- const struct drm_gpusvm_ctx *ctx)
+static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm)
{
- struct mm_struct *mm = vm->svm.gpusvm.mm;
+ struct xe_tile *tile = container_of(dpagemap, typeof(*tile), mem.vram.dpagemap);
+ struct xe_device *xe = tile_to_xe(tile);
+ struct device *dev = xe->drm.dev;
struct xe_vram_region *vr = tile_to_vr(tile);
struct drm_buddy_block *block;
struct list_head *blocks;
struct xe_bo *bo;
- ktime_t end = 0;
- int err;
+ ktime_t time_end = 0;
+ int err, idx;
- if (!range->base.flags.migrate_devmem)
- return -EINVAL;
+ if (!drm_dev_enter(&xe->drm, &idx))
+ return -ENODEV;
- range_debug(range, "ALLOCATE VRAM");
+ xe_pm_runtime_get(xe);
- if (!mmget_not_zero(mm))
- return -EFAULT;
- mmap_read_lock(mm);
-
-retry:
- bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL,
- xe_svm_range_size(range),
- ttm_bo_type_device,
+ retry:
+ bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL, end - start,
+ ttm_bo_type_kernel,
XE_BO_FLAG_VRAM_IF_DGFX(tile) |
XE_BO_FLAG_CPU_ADDR_MIRROR);
if (IS_ERR(bo)) {
err = PTR_ERR(bo);
- if (xe_vm_validate_should_retry(NULL, err, &end))
+ if (xe_vm_validate_should_retry(NULL, err, &time_end))
goto retry;
- goto unlock;
+ goto out_pm_put;
}
- drm_pagemap_devmem_init(&bo->devmem_allocation,
- vm->xe->drm.dev, mm,
+ drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
&dpagemap_devmem_ops,
&tile->mem.vram.dpagemap,
- xe_svm_range_size(range));
+ end - start);
blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
list_for_each_entry(block, blocks, link)
block->private = vr;
xe_bo_get(bo);
- err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation,
- mm,
- xe_svm_range_start(range),
- xe_svm_range_end(range),
- xe_svm_devm_owner(vm->xe));
+
+ /* Ensure the device has a pm ref while there are device pages active. */
+ xe_pm_runtime_get_noresume(xe);
+ err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
+ start, end, xe_svm_devm_owner(xe));
if (err)
xe_svm_devmem_release(&bo->devmem_allocation);
xe_bo_unlock(bo);
xe_bo_put(bo);
-unlock:
- mmap_read_unlock(mm);
- mmput(mm);
+out_pm_put:
+ xe_pm_runtime_put(xe);
+ drm_dev_exit(idx);
return err;
}
-#else
-static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
- struct xe_svm_range *range,
- const struct drm_gpusvm_ctx *ctx)
-{
- return -EOPNOTSUPP;
-}
-#endif
+#endif
/**
* xe_svm_handle_pagefault() - SVM handle page fault
@@ -787,9 +782,15 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
/* XXX: Add migration policy, for now migrate range once */
if (!range->skip_migrate && range->base.flags.migrate_devmem &&
xe_svm_range_size(range) >= SZ_64K) {
+ struct drm_pagemap *dpagemap;
+
range->skip_migrate = true;
- err = xe_svm_alloc_vram(vm, tile, range, &ctx);
+ range_debug(range, "ALLOCATE VRAM");
+ dpagemap = xe_tile_local_pagemap(tile);
+ err = drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
+ xe_svm_range_end(range),
+ range->base.gpusvm->mm);
if (err) {
drm_dbg(&vm->xe->drm,
"VRAM allocation failed, falling back to "
@@ -911,6 +912,7 @@ xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
.device_map = xe_drm_pagemap_device_map,
+ .populate_mm = xe_drm_pagemap_populate_mm,
};
/**
@@ -16,4 +16,15 @@ int xe_tile_init(struct xe_tile *tile);
void xe_tile_migrate_wait(struct xe_tile *tile);
+#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
+static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
+{
+ return &tile->mem.vram.dpagemap;
+}
+#else
+static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile)
+{
+ return NULL;
+}
+#endif
#endif
Add runtime PM since we might call populate_mm on a foreign device. Also create the VRAM bos as ttm_bo_type_kernel. This avoids the initial clearing and the creation of an mmap handle. Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> --- drivers/gpu/drm/drm_pagemap.c | 4 +- drivers/gpu/drm/xe/xe_svm.c | 80 ++++++++++++++++++----------------- drivers/gpu/drm/xe/xe_tile.h | 11 +++++ 3 files changed, 55 insertions(+), 40 deletions(-)