@@ -141,9 +141,18 @@ config DRM_I915_SVM
bool "Enable Shared Virtual Memory support in i915"
depends on STAGING
depends on DRM_I915
+ depends on ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on ARCH_ENABLE_MEMORY_HOTREMOVE
+ depends on MEMORY_HOTPLUG
+ depends on MEMORY_HOTREMOVE
+ depends on ARCH_HAS_PTE_DEVMAP
+ depends on SPARSEMEM_VMEMMAP
+ depends on ZONE_DEVICE
+ depends on DEVICE_PRIVATE
depends on MMU
select HMM_MIRROR
select MMU_NOTIFIER
+ select MIGRATE_VMA_HELPER
default n
help
Choose this option if you want Shared Virtual Memory (SVM)
@@ -155,7 +155,8 @@ i915-y += \
# SVM code
i915-$(CONFIG_DRM_I915_SVM) += gem/i915_gem_svm.o \
- i915_svm.o
+ i915_svm.o \
+ i915_svm_devmem.o
# general-purpose microcontroller (GuC) support
obj-y += gt/uc/
@@ -504,19 +504,6 @@ int __init i915_global_objects_init(void)
return 0;
}
-static enum intel_region_id
-__region_id(u32 region)
-{
- enum intel_region_id id;
-
- for (id = 0; id < INTEL_REGION_UNKNOWN; ++id) {
- if (intel_region_map[id] == region)
- return id;
- }
-
- return INTEL_REGION_UNKNOWN;
-}
-
bool
i915_gem_object_svm_mapped(struct drm_i915_gem_object *obj)
{
@@ -9,6 +9,9 @@
#include <linux/bitops.h>
#include <linux/list.h>
+/* 512 bits (one per pages) supports 2MB blocks */
+#define I915_BUDDY_MAX_PAGES 512
+
struct i915_buddy_block {
#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
#define I915_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
@@ -32,6 +35,15 @@ struct i915_buddy_block {
*/
struct list_head link;
struct list_head tmp_link;
+
+ unsigned long pfn_first;
+ /*
+ * FIXME: There are other alternatives to bitmap. Like splitting the
+ * block into contiguous 4K sized blocks. But it is part of bigger
+ * issues involving partially invalidating large mapping, freeing the
+ * blocks etc., revisit.
+ */
+ unsigned long bitmap[BITS_TO_LONGS(I915_BUDDY_MAX_PAGES)];
};
#define I915_BUDDY_MAX_ORDER I915_BUDDY_HEADER_ORDER
@@ -2765,6 +2765,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_VM_BIND, i915_gem_vm_bind_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_VM_PREFETCH, i915_gem_vm_prefetch_ioctl, DRM_RENDER_ALLOW)
};
static struct drm_driver driver = {
@@ -152,6 +152,7 @@ static int i915_range_fault(struct svm_notifier *sn,
struct mm_struct *mm = sn->notifier.mm;
struct i915_address_space *vm = svm->vm;
u32 sg_page_sizes;
+ int regions;
u64 flags;
long ret;
@@ -169,6 +170,11 @@ static int i915_range_fault(struct svm_notifier *sn,
return ret;
}
+ /* For dgfx, ensure the range is in device local memory only */
+ regions = i915_dmem_convert_pfn(vm->i915, &range);
+ if (!regions || (IS_DGFX(vm->i915) && (regions & REGION_SMEM)))
+ return -EINVAL;
+
sg_page_sizes = i915_svm_build_sg(vm, &range, st);
mutex_lock(&svm->mutex);
@@ -33,6 +33,14 @@ static inline bool i915_vm_is_svm_enabled(struct i915_address_space *vm)
return vm->svm;
}
+int i915_dmem_convert_pfn(struct drm_i915_private *dev_priv,
+ struct hmm_range *range);
+int i915_gem_vm_prefetch_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+struct i915_devmem *i915_svm_devmem_add(struct drm_i915_private *i915,
+ u64 size);
+void i915_svm_devmem_remove(struct i915_devmem *devmem);
+
#else
struct i915_svm { };
@@ -45,6 +53,13 @@ static inline int i915_svm_bind_mm(struct i915_address_space *vm)
static inline bool i915_vm_is_svm_enabled(struct i915_address_space *vm)
{ return false; }
+static inline int i915_gem_vm_prefetch_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{ return -ENOTSUPP; }
+static inline
+struct i915_devmem *i915_svm_devmem_add(struct drm_i915_private *i915, u64 size)
+{ return NULL; }
+static inline void i915_svm_devmem_remove(struct i915_devmem *devmem) { }
#endif
#endif /* __I915_SVM_H */
new file mode 100644
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/mm_types.h>
+#include <linux/sched/mm.h>
+
+#include "i915_svm.h"
+#include "intel_memory_region.h"
+
+struct i915_devmem_migrate {
+ struct drm_i915_private *i915;
+ struct migrate_vma *args;
+
+ enum intel_region_id src_id;
+ enum intel_region_id dst_id;
+ u64 npages;
+};
+
+struct i915_devmem {
+ struct drm_i915_private *i915;
+ struct dev_pagemap pagemap;
+ unsigned long pfn_first;
+ unsigned long pfn_last;
+};
+
+static inline bool
+i915_dmem_page(struct drm_i915_private *dev_priv, struct page *page)
+{
+ if (!is_device_private_page(page))
+ return false;
+
+ return true;
+}
+
+int i915_dmem_convert_pfn(struct drm_i915_private *dev_priv,
+ struct hmm_range *range)
+{
+ unsigned long i, npages;
+ int regions = 0;
+
+ npages = (range->end - range->start) >> PAGE_SHIFT;
+ for (i = 0; i < npages; ++i) {
+ struct i915_buddy_block *block;
+ struct intel_memory_region *mem;
+ struct page *page;
+ u64 addr;
+
+ page = hmm_device_entry_to_page(range, range->pfns[i]);
+ if (!page)
+ continue;
+
+ if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
+ regions |= REGION_SMEM;
+ continue;
+ }
+
+ if (!i915_dmem_page(dev_priv, page)) {
+ WARN(1, "Some unknown device memory !\n");
+ range->pfns[i] = 0;
+ continue;
+ }
+
+ regions |= REGION_LMEM;
+ block = page->zone_device_data;
+ mem = block->private;
+ addr = mem->region.start +
+ i915_buddy_block_offset(block);
+ addr += (page_to_pfn(page) - block->pfn_first) << PAGE_SHIFT;
+
+ range->pfns[i] &= ~range->flags[HMM_PFN_DEVICE_PRIVATE];
+ range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
+ range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
+ }
+
+ return regions;
+}
+
+static int
+i915_devmem_page_alloc_locked(struct intel_memory_region *mem,
+ unsigned long npages,
+ struct list_head *blocks)
+{
+ unsigned long size = ALIGN((npages * PAGE_SIZE), mem->mm.chunk_size);
+ struct i915_buddy_block *block;
+ int ret;
+
+ INIT_LIST_HEAD(blocks);
+ ret = __intel_memory_region_get_pages_buddy(mem, size, 0, blocks);
+ if (unlikely(ret))
+ goto alloc_failed;
+
+ list_for_each_entry(block, blocks, link) {
+ block->pfn_first = mem->devmem->pfn_first;
+ block->pfn_first += i915_buddy_block_offset(block) /
+ PAGE_SIZE;
+ bitmap_zero(block->bitmap, I915_BUDDY_MAX_PAGES);
+ DRM_DEBUG_DRIVER("%s pfn_first 0x%lx off 0x%llx size 0x%llx\n",
+ "Allocated block", block->pfn_first,
+ i915_buddy_block_offset(block),
+ i915_buddy_block_size(&mem->mm, block));
+ }
+
+alloc_failed:
+ return ret;
+}
+
+static struct page *
+i915_devmem_page_get_locked(struct intel_memory_region *mem,
+ struct list_head *blocks)
+{
+ struct i915_buddy_block *block, *on;
+
+ list_for_each_entry_safe(block, on, blocks, link) {
+ unsigned long weight, max;
+ unsigned long i, pfn;
+ struct page *page;
+
+ max = i915_buddy_block_size(&mem->mm, block) / PAGE_SIZE;
+ i = find_first_zero_bit(block->bitmap, max);
+ if (unlikely(i == max)) {
+ WARN(1, "Getting a page should have never failed\n");
+ break;
+ }
+
+ set_bit(i, block->bitmap);
+ pfn = block->pfn_first + i;
+ page = pfn_to_page(pfn);
+ get_page(page);
+ lock_page(page);
+ page->zone_device_data = block;
+ weight = bitmap_weight(block->bitmap, max);
+ if (weight == max)
+ list_del_init(&block->link);
+ DRM_DEBUG_DRIVER("%s pfn 0x%lx block weight 0x%lx\n",
+ "Allocated page", pfn, weight);
+ return page;
+ }
+ return NULL;
+}
+
+static void
+i915_devmem_page_free_locked(struct drm_i915_private *dev_priv,
+ struct page *page)
+{
+ unlock_page(page);
+ put_page(page);
+}
+
+static int
+i915_devmem_migrate_alloc_and_copy(struct i915_devmem_migrate *migrate)
+{
+ struct drm_i915_private *i915 = migrate->i915;
+ struct migrate_vma *args = migrate->args;
+ struct intel_memory_region *mem;
+ struct list_head blocks = {0};
+ unsigned long i, npages, cnt;
+ struct page *page;
+ int ret;
+
+ npages = (args->end - args->start) >> PAGE_SHIFT;
+ DRM_DEBUG_DRIVER("start 0x%lx npages %ld\n", args->start, npages);
+
+ /* Check source pages */
+ for (i = 0, cnt = 0; i < npages; i++) {
+ args->dst[i] = 0;
+ page = migrate_pfn_to_page(args->src[i]);
+ if (unlikely(!page || !(args->src[i] & MIGRATE_PFN_MIGRATE)))
+ continue;
+
+ args->dst[i] = MIGRATE_PFN_VALID;
+ cnt++;
+ }
+
+ if (!cnt) {
+ ret = -ENOMEM;
+ goto migrate_out;
+ }
+
+ mem = i915->mm.regions[migrate->dst_id];
+ ret = i915_devmem_page_alloc_locked(mem, cnt, &blocks);
+ if (unlikely(ret))
+ goto migrate_out;
+
+ /* Allocate device memory */
+ for (i = 0, cnt = 0; i < npages; i++) {
+ if (!args->dst[i])
+ continue;
+
+ page = i915_devmem_page_get_locked(mem, &blocks);
+ if (unlikely(!page)) {
+ WARN(1, "Failed to get dst page\n");
+ args->dst[i] = 0;
+ continue;
+ }
+
+ cnt++;
+ args->dst[i] = migrate_pfn(page_to_pfn(page)) |
+ MIGRATE_PFN_LOCKED;
+ }
+
+ if (!cnt) {
+ ret = -ENOMEM;
+ goto migrate_out;
+ }
+
+ /* Copy the pages */
+ migrate->npages = npages;
+migrate_out:
+ if (unlikely(ret)) {
+ for (i = 0; i < npages; i++) {
+ if (args->dst[i] & MIGRATE_PFN_LOCKED) {
+ page = migrate_pfn_to_page(args->dst[i]);
+ i915_devmem_page_free_locked(i915, page);
+ }
+ args->dst[i] = 0;
+ }
+ }
+
+ return ret;
+}
+
+void i915_devmem_migrate_finalize_and_map(struct i915_devmem_migrate *migrate)
+{
+ DRM_DEBUG_DRIVER("npages %lld\n", migrate->npages);
+}
+
+static void i915_devmem_migrate_chunk(struct i915_devmem_migrate *migrate)
+{
+ int ret;
+
+ ret = i915_devmem_migrate_alloc_and_copy(migrate);
+ if (!ret) {
+ migrate_vma_pages(migrate->args);
+ i915_devmem_migrate_finalize_and_map(migrate);
+ }
+ migrate_vma_finalize(migrate->args);
+}
+
+int i915_devmem_migrate_vma(struct intel_memory_region *mem,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end)
+{
+ unsigned long npages = (end - start) >> PAGE_SHIFT;
+ unsigned long max = min_t(unsigned long, I915_BUDDY_MAX_PAGES, npages);
+ struct i915_devmem_migrate migrate = {0};
+ struct migrate_vma args = {
+ .vma = vma,
+ .start = start,
+ };
+ unsigned long c, i;
+ int ret = 0;
+
+ /* XXX: Opportunistically migrate additional pages? */
+ DRM_DEBUG_DRIVER("start 0x%lx end 0x%lx\n", start, end);
+ args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
+ if (unlikely(!args.src))
+ return -ENOMEM;
+
+ args.dst = kcalloc(max, sizeof(args.dst), GFP_KERNEL);
+ if (unlikely(!args.dst)) {
+ kfree(args.src);
+ return -ENOMEM;
+ }
+
+ /* XXX: Support migrating from LMEM to SMEM */
+ migrate.args = &args;
+ migrate.i915 = mem->i915;
+ migrate.src_id = INTEL_REGION_SMEM;
+ migrate.dst_id = MEMORY_TYPE_FROM_REGION(mem->id);
+ for (i = 0; i < npages; i += c) {
+ c = min_t(unsigned long, I915_BUDDY_MAX_PAGES, npages);
+ args.end = start + (c << PAGE_SHIFT);
+ ret = migrate_vma_setup(&args);
+ if (unlikely(ret))
+ goto migrate_done;
+ if (args.cpages)
+ i915_devmem_migrate_chunk(&migrate);
+ args.start = args.end;
+ }
+migrate_done:
+ kfree(args.dst);
+ kfree(args.src);
+ return ret;
+}
+
+static vm_fault_t i915_devmem_migrate_to_ram(struct vm_fault *vmf)
+{
+ return VM_FAULT_SIGBUS;
+}
+
+static void i915_devmem_page_free(struct page *page)
+{
+ struct i915_buddy_block *block = page->zone_device_data;
+ struct intel_memory_region *mem = block->private;
+ unsigned long i, max, weight;
+
+ max = i915_buddy_block_size(&mem->mm, block) / PAGE_SIZE;
+ i = page_to_pfn(page) - block->pfn_first;
+ clear_bit(i, block->bitmap);
+ weight = bitmap_weight(block->bitmap, max);
+ DRM_DEBUG_DRIVER("%s pfn 0x%lx block weight 0x%lx\n",
+ "Freeing page", page_to_pfn(page), weight);
+ if (!weight) {
+ DRM_DEBUG_DRIVER("%s pfn_first 0x%lx off 0x%llx size 0x%llx\n",
+ "Freeing block", block->pfn_first,
+ i915_buddy_block_offset(block),
+ i915_buddy_block_size(&mem->mm, block));
+ __intel_memory_region_put_block_buddy(block);
+ }
+}
+
+static const struct dev_pagemap_ops i915_devmem_pagemap_ops = {
+ .page_free = i915_devmem_page_free,
+ .migrate_to_ram = i915_devmem_migrate_to_ram,
+};
+
+struct i915_devmem *i915_svm_devmem_add(struct drm_i915_private *i915, u64 size)
+{
+ struct device *dev = &i915->drm.pdev->dev;
+ struct i915_devmem *devmem;
+ struct resource *res;
+
+ devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
+ if (!devmem)
+ return NULL;
+
+ devmem->i915 = i915;
+ res = devm_request_free_mem_region(dev, &iomem_resource, size);
+ if (IS_ERR(res))
+ goto out_free;
+
+ devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ devmem->pagemap.res = *res;
+ devmem->pagemap.ops = &i915_devmem_pagemap_ops;
+ if (IS_ERR(devm_memremap_pages(dev, &devmem->pagemap)))
+ goto out_free;
+
+ devmem->pfn_first = res->start >> PAGE_SHIFT;
+ devmem->pfn_last = res->end >> PAGE_SHIFT;
+ return devmem;
+out_free:
+ kfree(devmem);
+ return NULL;
+}
+
+void i915_svm_devmem_remove(struct i915_devmem *devmem)
+{
+ /* XXX: Is it the right way to release? */
+ release_resource(&devmem->pagemap.res);
+ kfree(devmem);
+}
+
+int i915_gem_vm_prefetch_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_vm_prefetch *args = data;
+ unsigned long addr, end, size = args->length;
+ struct intel_memory_region *mem;
+ enum intel_region_id id;
+ struct mm_struct *mm;
+
+ if (args->type != I915_GEM_VM_PREFETCH_SVM_BUFFER)
+ return -EINVAL;
+
+ DRM_DEBUG_DRIVER("start 0x%llx length 0x%llx region 0x%x\n",
+ args->start, args->length, args->region);
+ id = __region_id(args->region);
+ if ((MEMORY_TYPE_FROM_REGION(args->region) != INTEL_MEMORY_LOCAL) ||
+ id == INTEL_REGION_UNKNOWN)
+ return -EINVAL;
+
+ mem = i915->mm.regions[id];
+
+ mm = get_task_mm(current);
+ down_read(&mm->mmap_sem);
+
+ for (addr = args->start, end = args->start + size; addr < end;) {
+ struct vm_area_struct *vma;
+ unsigned long next;
+
+ vma = find_vma_intersection(mm, addr, end);
+ if (!vma)
+ break;
+
+ addr &= PAGE_MASK;
+ next = min(vma->vm_end, end);
+ next = round_up(next, PAGE_SIZE);
+ /* This is a best effort so we ignore errors */
+ i915_devmem_migrate_vma(mem, vma, addr, next);
+ addr = next;
+ }
+
+ up_read(&mm->mmap_sem);
+ mmput(mm);
+ return 0;
+}
@@ -56,6 +56,19 @@ enum intel_region_id {
*/
extern const u32 intel_region_map[];
+static inline enum intel_region_id
+__region_id(u32 region)
+{
+ enum intel_region_id id;
+
+ for (id = 0; id < INTEL_REGION_UNKNOWN; ++id) {
+ if (intel_region_map[id] == region)
+ return id;
+ }
+
+ return INTEL_REGION_UNKNOWN;
+}
+
struct intel_memory_region_ops {
unsigned int flags;
@@ -71,6 +84,7 @@ struct intel_memory_region_ops {
struct intel_memory_region {
struct drm_i915_private *i915;
+ struct i915_devmem *devmem;
const struct intel_memory_region_ops *ops;
struct io_mapping iomap;
@@ -4,6 +4,7 @@
*/
#include "i915_drv.h"
+#include "i915_svm.h"
#include "intel_memory_region.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
@@ -66,6 +67,7 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem)
static void
region_lmem_release(struct intel_memory_region *mem)
{
+ i915_svm_devmem_remove(mem->devmem);
release_fake_lmem_bar(mem);
io_mapping_fini(&mem->iomap);
intel_memory_region_release_buddy(mem);
@@ -122,6 +124,14 @@ intel_setup_fake_lmem(struct drm_i915_private *i915)
PAGE_SIZE,
io_start,
&intel_region_lmem_ops);
+ if (!IS_ERR(mem)) {
+ mem->devmem = i915_svm_devmem_add(i915, mappable_end);
+ if (IS_ERR(mem->devmem)) {
+ intel_memory_region_put(mem);
+ mem = ERR_CAST(mem->devmem);
+ }
+ }
+
if (!IS_ERR(mem)) {
DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region);
DRM_INFO("Intel graphics fake LMEM IO start: %llx\n",
@@ -361,6 +361,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_VM_DESTROY 0x3b
#define DRM_I915_GEM_OBJECT_SETPARAM DRM_I915_GEM_CONTEXT_SETPARAM
#define DRM_I915_GEM_VM_BIND 0x3c
+#define DRM_I915_GEM_VM_PREFETCH 0x3d
/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -426,6 +427,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
#define DRM_IOCTL_I915_GEM_OBJECT_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_OBJECT_SETPARAM, struct drm_i915_gem_object_param)
#define DRM_IOCTL_I915_GEM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
+#define DRM_IOCTL_I915_GEM_VM_PREFETCH DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_PREFETCH, struct drm_i915_gem_vm_prefetch)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -2334,6 +2336,26 @@ struct drm_i915_gem_vm_bind {
#define I915_GEM_VM_BIND_READONLY (1 << 1)
};
+/**
+ * struct drm_i915_gem_vm_prefetch
+ *
+ * Prefetch an address range to a memory region.
+ */
+struct drm_i915_gem_vm_prefetch {
+ /** Type of memory to prefetch **/
+ __u32 type;
+#define I915_GEM_VM_PREFETCH_SVM_BUFFER 0
+
+ /** Memory region to prefetch to **/
+ __u32 region;
+
+ /** VA start to prefetch **/
+ __u64 start;
+
+ /** VA length to prefetch **/
+ __u64 length;
+};
+
#if defined(__cplusplus)
}
#endif
Plugin device memory through HMM as DEVICE_PRIVATE. Add support functions to allocate pages and free pages from device memory. Implement ioctl to prefetch pages from host to device memory. For now, only support migrating pages from host memory to device memory. Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Jon Bloomfield <jon.bloomfield@intel.com> Cc: Daniel Vetter <daniel.vetter@intel.com> Cc: Sudeep Dutt <sudeep.dutt@intel.com> Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com> --- drivers/gpu/drm/i915/Kconfig | 9 + drivers/gpu/drm/i915/Makefile | 3 +- drivers/gpu/drm/i915/gem/i915_gem_object.c | 13 - drivers/gpu/drm/i915/i915_buddy.h | 12 + drivers/gpu/drm/i915/i915_drv.c | 1 + drivers/gpu/drm/i915/i915_svm.c | 6 + drivers/gpu/drm/i915/i915_svm.h | 15 + drivers/gpu/drm/i915/i915_svm_devmem.c | 400 +++++++++++++++++++++ drivers/gpu/drm/i915/intel_memory_region.h | 14 + drivers/gpu/drm/i915/intel_region_lmem.c | 10 + include/uapi/drm/i915_drm.h | 22 ++ 11 files changed, 491 insertions(+), 14 deletions(-) create mode 100644 drivers/gpu/drm/i915/i915_svm_devmem.c