@@ -811,8 +811,8 @@ drm_gpusvm_range_alloc(struct drm_gpusvm *gpusvm,
range->itree.start = ALIGN_DOWN(fault_addr, chunk_size);
range->itree.last = ALIGN(fault_addr + 1, chunk_size) - 1;
INIT_LIST_HEAD(&range->entry);
- range->notifier_seq = LONG_MAX;
- range->flags.migrate_devmem = migrate_devmem ? 1 : 0;
+ range->pages.notifier_seq = LONG_MAX;
+ range->pages.flags.migrate_devmem = migrate_devmem ? 1 : 0;
return range;
}
@@ -1140,15 +1140,16 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_range *range,
unsigned long npages)
{
- unsigned long i, j;
- struct drm_pagemap *dpagemap = range->dpagemap;
+ struct drm_gpusvm_pages *svm_pages = &range->pages;
+ struct drm_pagemap *dpagemap = svm_pages->dpagemap;
struct device *dev = gpusvm->drm->dev;
+ unsigned long i, j;
lockdep_assert_held(&gpusvm->notifier_lock);
- if (range->flags.has_dma_mapping) {
+ if (svm_pages->flags.has_dma_mapping) {
for (i = 0, j = 0; i < npages; j++) {
- struct drm_pagemap_device_addr *addr = &range->dma_addr[j];
+ struct drm_pagemap_device_addr *addr = &svm_pages->dma_addr[j];
if (addr->proto == DRM_INTERCONNECT_SYSTEM)
dma_unmap_page(dev,
@@ -1160,9 +1161,9 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
dev, *addr);
i += 1 << addr->order;
}
- range->flags.has_devmem_pages = false;
- range->flags.has_dma_mapping = false;
- range->dpagemap = NULL;
+ svm_pages->flags.has_devmem_pages = false;
+ svm_pages->flags.has_dma_mapping = false;
+ svm_pages->dpagemap = NULL;
}
}
@@ -1176,11 +1177,13 @@ static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
static void drm_gpusvm_range_free_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_range *range)
{
+ struct drm_gpusvm_pages *svm_pages = &range->pages;
+
lockdep_assert_held(&gpusvm->notifier_lock);
- if (range->dma_addr) {
- kvfree(range->dma_addr);
- range->dma_addr = NULL;
+ if (svm_pages->dma_addr) {
+ kvfree(svm_pages->dma_addr);
+ svm_pages->dma_addr = NULL;
}
}
@@ -1291,9 +1294,11 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_put);
bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_range *range)
{
+ struct drm_gpusvm_pages *svm_pages = &range->pages;
+
lockdep_assert_held(&gpusvm->notifier_lock);
- return range->flags.has_devmem_pages || range->flags.has_dma_mapping;
+ return svm_pages->flags.has_devmem_pages || svm_pages->flags.has_dma_mapping;
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid);
@@ -1311,9 +1316,10 @@ static bool
drm_gpusvm_range_pages_valid_unlocked(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_range *range)
{
+ struct drm_gpusvm_pages *svm_pages = &range->pages;
bool pages_valid;
- if (!range->dma_addr)
+ if (!svm_pages->dma_addr)
return false;
drm_gpusvm_notifier_lock(gpusvm);
@@ -1340,6 +1346,7 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_range *range,
const struct drm_gpusvm_ctx *ctx)
{
+ struct drm_gpusvm_pages *svm_pages = &range->pages;
struct mmu_interval_notifier *notifier = &range->notifier->notifier;
struct hmm_range hmm_range = {
.default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 :
@@ -1409,7 +1416,7 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
*/
drm_gpusvm_notifier_lock(gpusvm);
- if (range->flags.unmapped) {
+ if (svm_pages->flags.unmapped) {
drm_gpusvm_notifier_unlock(gpusvm);
err = -EFAULT;
goto err_free;
@@ -1421,13 +1428,12 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
goto retry;
}
- if (!range->dma_addr) {
+ if (!svm_pages->dma_addr) {
/* Unlock and restart mapping to allocate memory. */
drm_gpusvm_notifier_unlock(gpusvm);
- range->dma_addr = kvmalloc_array(npages,
- sizeof(*range->dma_addr),
- GFP_KERNEL);
- if (!range->dma_addr) {
+ svm_pages->dma_addr =
+ kvmalloc_array(npages, sizeof(*svm_pages->dma_addr), GFP_KERNEL);
+ if (!svm_pages->dma_addr) {
err = -ENOMEM;
goto err_free;
}
@@ -1465,13 +1471,13 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
goto err_unmap;
}
}
- range->dma_addr[j] =
+ svm_pages->dma_addr[j] =
dpagemap->ops->device_map(dpagemap,
gpusvm->drm->dev,
page, order,
dma_dir);
if (dma_mapping_error(gpusvm->drm->dev,
- range->dma_addr[j].addr)) {
+ svm_pages->dma_addr[j].addr)) {
err = -EFAULT;
goto err_unmap;
}
@@ -1494,7 +1500,7 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
goto err_unmap;
}
- range->dma_addr[j] = drm_pagemap_device_addr_encode
+ svm_pages->dma_addr[j] = drm_pagemap_device_addr_encode
(addr, DRM_INTERCONNECT_SYSTEM, order,
dma_dir);
}
@@ -1502,16 +1508,16 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm,
num_dma_mapped = i;
}
- range->flags.has_dma_mapping = true;
+ svm_pages->flags.has_dma_mapping = true;
if (zdd) {
- range->flags.has_devmem_pages = true;
- range->dpagemap = dpagemap;
+ svm_pages->flags.has_devmem_pages = true;
+ svm_pages->dpagemap = dpagemap;
}
drm_gpusvm_notifier_unlock(gpusvm);
kvfree(pfns);
set_seqno:
- range->notifier_seq = hmm_range.notifier_seq;
+ svm_pages->notifier_seq = hmm_range.notifier_seq;
return 0;
@@ -1718,7 +1724,7 @@ int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
mmap_assert_locked(gpusvm->mm);
- if (!range->flags.migrate_devmem)
+ if (!range->pages.flags.migrate_devmem)
return -EINVAL;
if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
@@ -2247,10 +2253,10 @@ void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
{
lockdep_assert_held_write(&range->gpusvm->notifier_lock);
- range->flags.unmapped = true;
+ range->pages.flags.unmapped = true;
if (drm_gpusvm_range_start(range) < mmu_range->start ||
drm_gpusvm_range_end(range) > mmu_range->end)
- range->flags.partial_unmap = true;
+ range->pages.flags.partial_unmap = true;
}
EXPORT_SYMBOL_GPL(drm_gpusvm_range_set_unmapped);
@@ -701,7 +701,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
return -EAGAIN;
}
if (xe_svm_range_has_dma_mapping(range)) {
- xe_res_first_dma(range->base.dma_addr, 0,
+ xe_res_first_dma(range->base.pages.dma_addr, 0,
range->base.itree.last + 1 - range->base.itree.start,
&curs);
xe_svm_range_debug(range, "BIND PREPARE - MIXED");
@@ -17,7 +17,7 @@
static bool xe_svm_range_in_vram(struct xe_svm_range *range)
{
/* Not reliable without notifier lock */
- return range->base.flags.has_devmem_pages;
+ return range->base.pages.flags.has_devmem_pages;
}
static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
@@ -59,7 +59,7 @@ static unsigned long xe_svm_range_size(struct xe_svm_range *range)
(r__)->base.gpusvm, \
xe_svm_range_in_vram((r__)) ? 1 : 0, \
xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \
- (r__)->base.notifier_seq, \
+ (r__)->base.pages.notifier_seq, \
xe_svm_range_start((r__)), xe_svm_range_end((r__)), \
xe_svm_range_size((r__)))
@@ -135,7 +135,7 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
range_debug(range, "NOTIFIER");
/* Skip if already unmapped or if no binding exist */
- if (range->base.flags.unmapped || !range->tile_present)
+ if (range->base.pages.flags.unmapped || !range->tile_present)
return 0;
range_debug(range, "NOTIFIER - EXECUTE");
@@ -783,7 +783,7 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
range_debug(range, "PAGE FAULT");
/* XXX: Add migration policy, for now migrate range once */
- if (!range->skip_migrate && range->base.flags.migrate_devmem &&
+ if (!range->skip_migrate && range->base.pages.flags.migrate_devmem &&
xe_svm_range_size(range) >= SZ_64K) {
range->skip_migrate = true;
@@ -84,7 +84,7 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
{
lockdep_assert_held(&range->base.gpusvm->notifier_lock);
- return range->base.flags.has_dma_mapping;
+ return range->base.pages.flags.has_dma_mapping;
}
#define xe_svm_assert_in_notifier(vm__) \
@@ -112,7 +112,9 @@ struct xe_vram_region;
struct xe_svm_range {
struct {
struct interval_tree_node itree;
- const struct drm_pagemap_device_addr *dma_addr;
+ struct {
+ struct drm_pagemap_device_addr *dma_addr;
+ } pages;
} base;
u32 tile_present;
u32 tile_invalidated;
@@ -185,6 +185,35 @@ struct drm_gpusvm_notifier {
} flags;
};
+/**
+ * struct drm_gpusvm_pages - Structure representing a GPU SVM mapped pages
+ *
+ * @dma_addr: Device address array
+ * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping.
+ * Note this is assuming only one drm_pagemap per range is allowed.
+ * @notifier_seq: Notifier sequence number of the range's pages
+ * @flags: Flags for range
+ * @flags.migrate_devmem: Flag indicating whether the range can be migrated to device memory
+ * @flags.unmapped: Flag indicating if the range has been unmapped
+ * @flags.partial_unmap: Flag indicating if the range has been partially unmapped
+ * @flags.has_devmem_pages: Flag indicating if the range has devmem pages
+ * @flags.has_dma_mapping: Flag indicating if the range has a DMA mapping
+ */
+struct drm_gpusvm_pages {
+ struct drm_pagemap_device_addr *dma_addr;
+ struct drm_pagemap *dpagemap;
+ unsigned long notifier_seq;
+ struct {
+ /* All flags below must be set upon creation */
+ u16 migrate_devmem : 1;
+ /* All flags below must be set / cleared under notifier lock */
+ u16 unmapped : 1;
+ u16 partial_unmap : 1;
+ u16 has_devmem_pages : 1;
+ u16 has_dma_mapping : 1;
+ } flags;
+};
+
/**
* struct drm_gpusvm_range - Structure representing a GPU SVM range
*
@@ -193,16 +222,7 @@ struct drm_gpusvm_notifier {
* @refcount: Reference count for the range
* @itree: Interval tree node for the range (inserted in GPU SVM notifier)
* @entry: List entry to fast interval tree traversal
- * @notifier_seq: Notifier sequence number of the range's pages
- * @dma_addr: Device address array
- * @dpagemap: The struct drm_pagemap of the device pages we're dma-mapping.
- * Note this is assuming only one drm_pagemap per range is allowed.
- * @flags: Flags for range
- * @flags.migrate_devmem: Flag indicating whether the range can be migrated to device memory
- * @flags.unmapped: Flag indicating if the range has been unmapped
- * @flags.partial_unmap: Flag indicating if the range has been partially unmapped
- * @flags.has_devmem_pages: Flag indicating if the range has devmem pages
- * @flags.has_dma_mapping: Flag indicating if the range has a DMA mapping
+ * @pages: The pages for this range.
*
* This structure represents a GPU SVM range used for tracking memory ranges
* mapped in a DRM device.
@@ -213,18 +233,7 @@ struct drm_gpusvm_range {
struct kref refcount;
struct interval_tree_node itree;
struct list_head entry;
- unsigned long notifier_seq;
- struct drm_pagemap_device_addr *dma_addr;
- struct drm_pagemap *dpagemap;
- struct {
- /* All flags below must be set upon creation */
- u16 migrate_devmem : 1;
- /* All flags below must be set / cleared under notifier lock */
- u16 unmapped : 1;
- u16 partial_unmap : 1;
- u16 has_devmem_pages : 1;
- u16 has_dma_mapping : 1;
- } flags;
+ struct drm_gpusvm_pages pages;
};
/**
Pull the pages stuff from the svm range into its own substructure, with the idea of having the main pages related routines, like get_pages(), unmap_pages() and free_pages() all operating on some lower level structures, which can then be re-used for stuff like userptr. v2: - Move seq into pages struct (Matt B) Suggested-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> --- drivers/gpu/drm/drm_gpusvm.c | 66 ++++++++++++++++++++---------------- drivers/gpu/drm/xe/xe_pt.c | 2 +- drivers/gpu/drm/xe/xe_svm.c | 8 ++--- drivers/gpu/drm/xe/xe_svm.h | 6 ++-- include/drm/drm_gpusvm.h | 53 +++++++++++++++++------------ 5 files changed, 76 insertions(+), 59 deletions(-)