diff mbox series

[v2,1/3] mm/hmm: make full use of walk_page_range()

Message ID 20191015204814.30099-2-rcampbell@nvidia.com (mailing list archive)
State Superseded
Headers show
Series HMM tests and minor fixes | expand

Commit Message

Ralph Campbell Oct. 15, 2019, 8:48 p.m. UTC
hmm_range_fault() calls find_vma() and walk_page_range() in a loop.
This is unnecessary duplication since walk_page_range() calls find_vma()
in a loop already.
Simplify hmm_range_fault() by defining a walk_test() callback function
to filter unhandled vmas.
This also fixes a bug where hmm_range_fault() was not checking
start >= vma->vm_start before checking vma->vm_flags so hmm_range_fault()
could return an error based on the wrong vma for the requested range.

Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Christoph Hellwig <hch@lst.de>
---
 mm/hmm.c | 143 +++++++++++++++++++++++++++++++------------------------
 1 file changed, 80 insertions(+), 63 deletions(-)

Comments

Jason Gunthorpe Oct. 21, 2019, 6:32 p.m. UTC | #1
On Tue, Oct 15, 2019 at 01:48:12PM -0700, Ralph Campbell wrote:

> +static bool hmm_range_needs_fault(unsigned long addr, unsigned long end,
> +				  const struct hmm_vma_walk *hmm_vma_walk)

This has a very similar name to hmm_range_need_fault(), and seems like
it does the same thing?

> +static int hmm_vma_walk_test(unsigned long start, unsigned long end,
> +			     struct mm_walk *walk)
> +{
> +	struct hmm_vma_walk *hmm_vma_walk = walk->private;
> +	struct hmm_range *range = hmm_vma_walk->range;
> +	struct vm_area_struct *vma = walk->vma;
> +
> +	/* If range is no longer valid, force retry. */
> +	if (!range->valid)
> +		return -EBUSY;
> +
> +	/*
> +	 * Skip vma ranges that don't have struct page backing them or
> +	 * map I/O devices directly.
> +	 */
> +	if (vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP))
> +		return -EFAULT;
> +
> +	/*
> +	 * If the vma does not allow read access, then assume that it does not
> +	 * allow write access either. HMM does not support architectures
> +	 * that allow write without read.
> +	 */
> +	if (!(vma->vm_flags & VM_READ)) {
> +		/*
> +		 * Check to see if a fault is requested for any page in the
> +		 * range.
> +		 */
> +		if (hmm_range_needs_fault(start, end, hmm_vma_walk))
> +			return -EFAULT;

Is this change to call hmm_range_needs_fault another bug fix?

Jason
Ralph Campbell Oct. 21, 2019, 8:32 p.m. UTC | #2
On 10/21/19 11:32 AM, Jason Gunthorpe wrote:
> On Tue, Oct 15, 2019 at 01:48:12PM -0700, Ralph Campbell wrote:
> 
>> +static bool hmm_range_needs_fault(unsigned long addr, unsigned long end,
>> +				  const struct hmm_vma_walk *hmm_vma_walk)
> 
> This has a very similar name to hmm_range_need_fault(), and seems like
> it does the same thing?

The two functions are very similar but not identical.
I guess I could resolve the differences and use one function.

>> +static int hmm_vma_walk_test(unsigned long start, unsigned long end,
>> +			     struct mm_walk *walk)
>> +{
>> +	struct hmm_vma_walk *hmm_vma_walk = walk->private;
>> +	struct hmm_range *range = hmm_vma_walk->range;
>> +	struct vm_area_struct *vma = walk->vma;
>> +
>> +	/* If range is no longer valid, force retry. */
>> +	if (!range->valid)
>> +		return -EBUSY;
>> +
>> +	/*
>> +	 * Skip vma ranges that don't have struct page backing them or
>> +	 * map I/O devices directly.
>> +	 */
>> +	if (vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP))
>> +		return -EFAULT;
>> +
>> +	/*
>> +	 * If the vma does not allow read access, then assume that it does not
>> +	 * allow write access either. HMM does not support architectures
>> +	 * that allow write without read.
>> +	 */
>> +	if (!(vma->vm_flags & VM_READ)) {
>> +		/*
>> +		 * Check to see if a fault is requested for any page in the
>> +		 * range.
>> +		 */
>> +		if (hmm_range_needs_fault(start, end, hmm_vma_walk))
>> +			return -EFAULT;
> 
> Is this change to call hmm_range_needs_fault another bug fix?
> 
> Jason

Yes. If the HMM_FAULT_SNAPSHOT is specified, there shouldn't be any
error return code. If it is not specified, then the range->pfns[] array,
on input, holds flags indicating which pages the driver wants populated
if the page is not already present. The hmm_range_needs_fault() checks
for this and hmm_vma_walk_test() returns -EFAULT.

I guess I could include this in the change log.
diff mbox series

Patch

diff --git a/mm/hmm.c b/mm/hmm.c
index 902f5fa6bf93..5df0dbf77e89 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -252,18 +252,15 @@  static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
 	return -EFAULT;
 }
 
-static int hmm_pfns_bad(unsigned long addr,
-			unsigned long end,
-			struct mm_walk *walk)
+static int hmm_pfns_fill(unsigned long addr, unsigned long end,
+		struct hmm_range *range, enum hmm_pfn_value_e value)
 {
-	struct hmm_vma_walk *hmm_vma_walk = walk->private;
-	struct hmm_range *range = hmm_vma_walk->range;
 	uint64_t *pfns = range->pfns;
 	unsigned long i;
 
 	i = (addr - range->start) >> PAGE_SHIFT;
 	for (; addr < end; addr += PAGE_SIZE, i++)
-		pfns[i] = range->values[HMM_PFN_ERROR];
+		pfns[i] = range->values[value];
 
 	return 0;
 }
@@ -584,7 +581,7 @@  static int hmm_vma_walk_pmd(pmd_t *pmdp,
 		}
 		return 0;
 	} else if (!pmd_present(pmd))
-		return hmm_pfns_bad(start, end, walk);
+		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
 
 	if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
 		/*
@@ -612,7 +609,7 @@  static int hmm_vma_walk_pmd(pmd_t *pmdp,
 	 * recover.
 	 */
 	if (pmd_bad(pmd))
-		return hmm_pfns_bad(start, end, walk);
+		return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
 
 	ptep = pte_offset_map(pmdp, addr);
 	i = (addr - range->start) >> PAGE_SHIFT;
@@ -770,13 +767,68 @@  static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
 #define hmm_vma_walk_hugetlb_entry NULL
 #endif /* CONFIG_HUGETLB_PAGE */
 
-static void hmm_pfns_clear(struct hmm_range *range,
-			   uint64_t *pfns,
-			   unsigned long addr,
-			   unsigned long end)
+static bool hmm_range_needs_fault(unsigned long addr, unsigned long end,
+				  const struct hmm_vma_walk *hmm_vma_walk)
 {
-	for (; addr < end; addr += PAGE_SIZE, pfns++)
-		*pfns = range->values[HMM_PFN_NONE];
+	bool fault, write_fault;
+	unsigned long i;
+	uint64_t *pfns;
+
+	if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
+		return false;
+
+	fault = false;
+	write_fault = false;
+	pfns = hmm_vma_walk->range->pfns;
+	i = (addr - hmm_vma_walk->range->start) >> PAGE_SHIFT;
+	for (; addr < end; addr += PAGE_SIZE, i++) {
+		hmm_pte_need_fault(hmm_vma_walk, pfns[i], 0,
+				   &fault, &write_fault);
+		if (fault || write_fault)
+			return true;
+	}
+	return false;
+}
+
+static int hmm_vma_walk_test(unsigned long start, unsigned long end,
+			     struct mm_walk *walk)
+{
+	struct hmm_vma_walk *hmm_vma_walk = walk->private;
+	struct hmm_range *range = hmm_vma_walk->range;
+	struct vm_area_struct *vma = walk->vma;
+
+	/* If range is no longer valid, force retry. */
+	if (!range->valid)
+		return -EBUSY;
+
+	/*
+	 * Skip vma ranges that don't have struct page backing them or
+	 * map I/O devices directly.
+	 */
+	if (vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP))
+		return -EFAULT;
+
+	/*
+	 * If the vma does not allow read access, then assume that it does not
+	 * allow write access either. HMM does not support architectures
+	 * that allow write without read.
+	 */
+	if (!(vma->vm_flags & VM_READ)) {
+		/*
+		 * Check to see if a fault is requested for any page in the
+		 * range.
+		 */
+		if (hmm_range_needs_fault(start, end, hmm_vma_walk))
+			return -EFAULT;
+
+		hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
+		hmm_vma_walk->last = end;
+
+		/* Skip this vma and continue processing the next vma. */
+		return 1;
+	}
+
+	return 0;
 }
 
 /*
@@ -857,6 +909,7 @@  static const struct mm_walk_ops hmm_walk_ops = {
 	.pmd_entry	= hmm_vma_walk_pmd,
 	.pte_hole	= hmm_vma_walk_hole,
 	.hugetlb_entry	= hmm_vma_walk_hugetlb_entry,
+	.test_walk	= hmm_vma_walk_test,
 };
 
 /**
@@ -889,63 +942,27 @@  static const struct mm_walk_ops hmm_walk_ops = {
  */
 long hmm_range_fault(struct hmm_range *range, unsigned int flags)
 {
-	const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
-	unsigned long start = range->start, end;
-	struct hmm_vma_walk hmm_vma_walk;
+	unsigned long start = range->start;
+	struct hmm_vma_walk hmm_vma_walk = {
+		.range = range,
+		.last = start,
+		.flags = flags,
+	};
 	struct hmm *hmm = range->hmm;
-	struct vm_area_struct *vma;
 	int ret;
 
 	lockdep_assert_held(&hmm->mmu_notifier.mm->mmap_sem);
 
 	do {
-		/* If range is no longer valid force retry. */
-		if (!range->valid)
-			return -EBUSY;
+		ret = walk_page_range(hmm->mmu_notifier.mm, start, range->end,
+				      &hmm_walk_ops, &hmm_vma_walk);
+		start = hmm_vma_walk.last;
 
-		vma = find_vma(hmm->mmu_notifier.mm, start);
-		if (vma == NULL || (vma->vm_flags & device_vma))
-			return -EFAULT;
+		/* Keep trying while the range is valid. */
+	} while (ret == -EBUSY && range->valid);
 
-		if (!(vma->vm_flags & VM_READ)) {
-			/*
-			 * If vma do not allow read access, then assume that it
-			 * does not allow write access, either. HMM does not
-			 * support architecture that allow write without read.
-			 */
-			hmm_pfns_clear(range, range->pfns,
-				range->start, range->end);
-			return -EPERM;
-		}
-
-		hmm_vma_walk.pgmap = NULL;
-		hmm_vma_walk.last = start;
-		hmm_vma_walk.flags = flags;
-		hmm_vma_walk.range = range;
-		end = min(range->end, vma->vm_end);
-
-		walk_page_range(vma->vm_mm, start, end, &hmm_walk_ops,
-				&hmm_vma_walk);
-
-		do {
-			ret = walk_page_range(vma->vm_mm, start, end,
-					&hmm_walk_ops, &hmm_vma_walk);
-			start = hmm_vma_walk.last;
-
-			/* Keep trying while the range is valid. */
-		} while (ret == -EBUSY && range->valid);
-
-		if (ret) {
-			unsigned long i;
-
-			i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
-			hmm_pfns_clear(range, &range->pfns[i],
-				hmm_vma_walk.last, range->end);
-			return ret;
-		}
-		start = end;
-
-	} while (start < range->end);
+	if (ret)
+		return ret;
 
 	return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
 }