diff mbox series

[4/4] drm/i915: Add cpu fault handler for mmap_offset

Message ID 20191119113710.1162-4-abdiel.janulgue@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series [1/4] drm/i915: Allow i915 to manage the vma offset nodes instead of drm core | expand

Commit Message

Abdiel Janulgue Nov. 19, 2019, 11:37 a.m. UTC
Fault handler to handle missing pages for shmem-backed objects.

v2: bail out of inserting PTEs when failing to insert the
    fault address
v3: has struct page check
v4: Add self-test for validating CPU fault handler to ensure PTEs
    are revoked when an object is unbound.
v5: Add comment where PTEs are revoked (Chris)

Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c      | 129 ++++++++++++++----
 .../drm/i915/gem/selftests/i915_gem_mman.c    |  48 ++++++-
 2 files changed, 145 insertions(+), 32 deletions(-)

Comments

Chris Wilson Nov. 28, 2019, 11:08 a.m. UTC | #1
Quoting Abdiel Janulgue (2019-11-19 11:37:10)
> Fault handler to handle missing pages for shmem-backed objects.
> 
> v2: bail out of inserting PTEs when failing to insert the
>     fault address
> v3: has struct page check
> v4: Add self-test for validating CPU fault handler to ensure PTEs
>     are revoked when an object is unbound.
> v5: Add comment where PTEs are revoked (Chris)
> 
> Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>

Uh oh, the mmap() does remain valid past a gem_close() -- the
vm_area_struct is lacking a reference to the object?
-Chris
Chris Wilson Nov. 28, 2019, 11:22 a.m. UTC | #2
Quoting Chris Wilson (2019-11-28 11:08:35)
> Quoting Abdiel Janulgue (2019-11-19 11:37:10)
> > Fault handler to handle missing pages for shmem-backed objects.
> > 
> > v2: bail out of inserting PTEs when failing to insert the
> >     fault address
> > v3: has struct page check
> > v4: Add self-test for validating CPU fault handler to ensure PTEs
> >     are revoked when an object is unbound.
> > v5: Add comment where PTEs are revoked (Chris)
> > 
> > Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
> > Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> > Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> > Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
> 
> Uh oh, the mmap() does remain valid past a gem_close() -- the
> vm_area_struct is lacking a reference to the object?

Hmm, test is misleading -- may just be a coherency bug.
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 3913634ab717..b892f645ca2d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -5,6 +5,7 @@ 
  */
 
 #include <linux/mman.h>
+#include <linux/pfn_t.h>
 #include <linux/sizes.h>
 
 #include "gt/intel_gt.h"
@@ -201,6 +202,71 @@  compute_partial_view(const struct drm_i915_gem_object *obj,
 	return view;
 }
 
+static vm_fault_t i915_error_to_vmf_fault(int err)
+{
+	switch (err) {
+	default:
+		WARN_ONCE(err, "unhandled error in %s: %i\n", __func__, err);
+		/* fallthrough */
+	case -EIO: /* shmemfs failure from swap device */
+	case -EFAULT: /* purged object */
+	case -ENODEV: /* bad object, how did you get here! */
+		return VM_FAULT_SIGBUS;
+
+	case -ENOSPC: /* shmemfs allocation failure */
+	case -ENOMEM: /* our allocation failure */
+		return VM_FAULT_OOM;
+
+	case 0:
+	case -EAGAIN:
+	case -ERESTARTSYS:
+	case -EINTR:
+	case -EBUSY:
+		/*
+		 * EBUSY is ok: this just means that another thread
+		 * already did the job.
+		 */
+		return VM_FAULT_NOPAGE;
+	}
+}
+
+static vm_fault_t i915_gem_fault_cpu(struct vm_fault *vmf)
+{
+	struct vm_area_struct *area = vmf->vma;
+	struct i915_mmap_offset *priv = area->vm_private_data;
+	struct drm_i915_gem_object *obj = priv->obj;
+	vm_fault_t vmf_ret;
+	unsigned long i, size = area->vm_end - area->vm_start;
+	bool write = area->vm_flags & VM_WRITE;
+	int ret;
+
+	if (!i915_gem_object_has_struct_page(obj))
+		return VM_FAULT_SIGBUS;
+
+	/* Sanity check that we allow writing into this object */
+	if (i915_gem_object_is_readonly(obj) && write)
+		return VM_FAULT_SIGBUS;
+
+	ret = i915_gem_object_pin_pages(obj);
+	if (ret)
+		return i915_error_to_vmf_fault(ret);
+
+	/* PTEs are revoked in obj->ops->put_pages() */
+	for (i = 0; i < size >> PAGE_SHIFT; i++) {
+		struct page *page = i915_gem_object_get_page(obj, i);
+
+		vmf_ret = vmf_insert_pfn(area,
+					 (unsigned long)area->vm_start + i * PAGE_SIZE,
+					 page_to_pfn(page));
+		if (vmf_ret != VM_FAULT_NOPAGE)
+			break;
+	}
+
+	i915_gem_object_unpin_pages(obj);
+
+	return vmf_ret;
+}
+
 /**
  * i915_gem_fault - fault a page into the GTT
  * @vmf: fault info
@@ -340,30 +406,7 @@  vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 	intel_runtime_pm_put(rpm, wakeref);
 	i915_gem_object_unpin_pages(obj);
 err:
-	switch (ret) {
-	default:
-		WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
-		/* fallthrough */
-	case -EIO: /* shmemfs failure from swap device */
-	case -EFAULT: /* purged object */
-	case -ENODEV: /* bad object, how did you get here! */
-		return VM_FAULT_SIGBUS;
-
-	case -ENOSPC: /* shmemfs allocation failure */
-	case -ENOMEM: /* our allocation failure */
-		return VM_FAULT_OOM;
-
-	case 0:
-	case -EAGAIN:
-	case -ERESTARTSYS:
-	case -EINTR:
-	case -EBUSY:
-		/*
-		 * EBUSY is ok: this just means that another thread
-		 * already did the job.
-		 */
-		return VM_FAULT_NOPAGE;
-	}
+	return i915_error_to_vmf_fault(ret);
 }
 
 void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj)
@@ -647,6 +690,33 @@  static const struct vm_operations_struct i915_gem_gtt_vm_ops = {
 	.close = i915_gem_vm_close,
 };
 
+static const struct vm_operations_struct i915_gem_cpu_vm_ops = {
+	.fault = i915_gem_fault_cpu,
+	.open = i915_gem_vm_open,
+	.close = i915_gem_vm_close,
+};
+
+static void set_vmdata_mmap_offset(struct i915_mmap_offset *mmo, struct vm_area_struct *vma)
+{
+	switch (mmo->mmap_type) {
+	case I915_MMAP_TYPE_WC:
+		vma->vm_page_prot =
+			pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+		break;
+	case I915_MMAP_TYPE_WB:
+		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+		break;
+	case I915_MMAP_TYPE_UC:
+		vma->vm_page_prot =
+			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+		break;
+	default:
+		break;
+	}
+
+	vma->vm_ops = &i915_gem_cpu_vm_ops;
+}
+
 /* This overcomes the limitation in drm_gem_mmap's assignment of a
  * drm_gem_object as the vma->vm_private_data. Since we need to
  * be able to resolve multiple mmap offsets which could be tied
@@ -714,7 +784,16 @@  int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
 	vma->vm_private_data = mmo;
 
-	vma->vm_ops = &i915_gem_gtt_vm_ops;
+	switch (mmo->mmap_type) {
+	case I915_MMAP_TYPE_WC:
+	case I915_MMAP_TYPE_WB:
+	case I915_MMAP_TYPE_UC:
+		set_vmdata_mmap_offset(mmo, vma);
+		break;
+	case I915_MMAP_TYPE_GTT:
+		vma->vm_ops = &i915_gem_gtt_vm_ops;
+		break;
+	}
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 6c2333c61469..1bedc9d115c5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -731,7 +731,7 @@  static int igt_mmap_offset_exhaustion(void *arg)
 }
 
 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
-static int igt_mmap_gtt(void *arg)
+static int igt_mmap(void *arg, enum i915_mmap_type type)
 {
 	struct drm_i915_private *i915 = arg;
 	struct drm_i915_gem_object *obj;
@@ -762,7 +762,8 @@  static int igt_mmap_gtt(void *arg)
 	if (err)
 		goto out;
 
-	addr = igt_mmap_node(i915, &obj->base.vma_node,
+	mmo->mmap_type = type;
+	addr = igt_mmap_node(i915, &mmo->vma_node,
 			     0, PROT_WRITE, MAP_SHARED);
 	if (IS_ERR_VALUE(addr)) {
 		err = addr;
@@ -778,8 +779,8 @@  static int igt_mmap_gtt(void *arg)
 		goto out_unmap;
 	}
 
-	if (area->vm_private_data != obj) {
-		pr_err("vm_area_struct did not point back to our object!\n");
+	if (area->vm_private_data != mmo) {
+		pr_err("vm_area_struct did not point back to our mmap_offset object!\n");
 		err = -EINVAL;
 		goto out_unmap;
 	}
@@ -830,6 +831,16 @@  static int igt_mmap_gtt(void *arg)
 	return err;
 }
 
+static int igt_mmap_gtt(void *arg)
+{
+	return igt_mmap(arg, I915_MMAP_TYPE_GTT);
+}
+
+static int igt_mmap_cpu(void *arg)
+{
+	return igt_mmap(arg, I915_MMAP_TYPE_WC);
+}
+
 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
 {
 	if (!pte_present(*pte) || pte_none(*pte)) {
@@ -882,7 +893,7 @@  static int prefault_range(u64 start, u64 len)
 	return __get_user(c, end - 1);
 }
 
-static int igt_mmap_gtt_revoke(void *arg)
+static int igt_mmap_revoke(void *arg, enum i915_mmap_type type)
 {
 	struct drm_i915_private *i915 = arg;
 	struct drm_i915_gem_object *obj;
@@ -902,7 +913,8 @@  static int igt_mmap_gtt_revoke(void *arg)
 	if (err)
 		goto out;
 
-	addr = igt_mmap_node(i915, &obj->base.vma_node,
+	mmo->mmap_type = type;
+	addr = igt_mmap_node(i915, &mmo->vma_node,
 			     0, PROT_WRITE, MAP_SHARED);
 	if (IS_ERR_VALUE(addr)) {
 		err = addr;
@@ -913,7 +925,8 @@  static int igt_mmap_gtt_revoke(void *arg)
 	if (err)
 		goto out_unmap;
 
-	GEM_BUG_ON(!atomic_read(&obj->bind_count));
+	GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
+		   !atomic_read(&obj->bind_count));
 
 	err = check_present(addr, obj->base.size);
 	if (err)
@@ -931,6 +944,15 @@  static int igt_mmap_gtt_revoke(void *arg)
 	}
 	GEM_BUG_ON(atomic_read(&obj->bind_count));
 
+	if (type != I915_MMAP_TYPE_GTT) {
+		__i915_gem_object_put_pages(obj);
+		if (i915_gem_object_has_pages(obj)) {
+			pr_err("Failed to put-pages object!\n");
+			err = -EINVAL;
+			goto out_unmap;
+		}
+	}
+
 	err = check_absent(addr, obj->base.size);
 	if (err)
 		goto out_unmap;
@@ -942,6 +964,16 @@  static int igt_mmap_gtt_revoke(void *arg)
 	return err;
 }
 
+static int igt_mmap_gtt_revoke(void *arg)
+{
+	return igt_mmap_revoke(arg, I915_MMAP_TYPE_GTT);
+}
+
+static int igt_mmap_cpu_revoke(void *arg)
+{
+	return igt_mmap_revoke(arg, I915_MMAP_TYPE_WC);
+}
+
 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
@@ -949,7 +981,9 @@  int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(igt_smoke_tiling),
 		SUBTEST(igt_mmap_offset_exhaustion),
 		SUBTEST(igt_mmap_gtt),
+		SUBTEST(igt_mmap_cpu),
 		SUBTEST(igt_mmap_gtt_revoke),
+		SUBTEST(igt_mmap_cpu_revoke),
 	};
 
 	return i915_subtests(tests, i915);