Message ID | 20210607075855.5084-7-apopple@nvidia.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add support for SVM atomics in Nouveau | expand |
On Mon, Jun 07, 2021 at 05:58:51PM +1000, Alistair Popple wrote: > Currently if copy_nonpresent_pte() returns a non-zero value it is > assumed to be a swap entry which requires further processing outside the > loop in copy_pte_range() after dropping locks. This prevents other > values being returned to signal conditions such as failure which a > subsequent change requires. > > Instead make copy_nonpresent_pte() return an error code if further > processing is required and read the value for the swap entry in the main > loop under the ptl. > > Signed-off-by: Alistair Popple <apopple@nvidia.com> > > --- > > v10: > > Use a unique error code and only check return codes for handling. > > v9: > > New for v9 to allow device exclusive handling to occur in > copy_nonpresent_pte(). > --- > mm/memory.c | 26 ++++++++++++++++---------- > 1 file changed, 16 insertions(+), 10 deletions(-) > > diff --git a/mm/memory.c b/mm/memory.c > index 2fb455c365c2..0982cab37ecb 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -718,7 +718,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, > > if (likely(!non_swap_entry(entry))) { > if (swap_duplicate(entry) < 0) > - return entry.val; > + return -EIO; > > /* make sure dst_mm is on swapoff's mmlist. */ > if (unlikely(list_empty(&dst_mm->mmlist))) { > @@ -974,11 +974,13 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, > continue; > } > if (unlikely(!pte_present(*src_pte))) { > - entry.val = copy_nonpresent_pte(dst_mm, src_mm, > - dst_pte, src_pte, > - src_vma, addr, rss); > - if (entry.val) > + ret = copy_nonpresent_pte(dst_mm, src_mm, > + dst_pte, src_pte, > + src_vma, addr, rss); > + if (ret == -EIO) { > + entry = pte_to_swp_entry(*src_pte); > break; > + } > progress += 8; > continue; > } > @@ -1011,20 +1013,24 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, > pte_unmap_unlock(orig_dst_pte, dst_ptl); > cond_resched(); > > - if (entry.val) { > + if (ret == -EIO) { > + VM_WARN_ON_ONCE(!entry.val); > if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) { > ret = -ENOMEM; > goto out; > } > entry.val = 0; > - } else if (ret) { > - WARN_ON_ONCE(ret != -EAGAIN); > + } else if (ret == -EAGAIN) { ^ |----------------------------- one more space here > prealloc = page_copy_prealloc(src_mm, src_vma, addr); > if (!prealloc) > return -ENOMEM; > - /* We've captured and resolved the error. Reset, try again. */ > - ret = 0; > + } else if (ret) { > + VM_WARN_ON_ONCE(1); > } > + > + /* We've captured and resolved the error. Reset, try again. */ Maybe better as: /* * We've resolved all error even if there is, reset error code and try * again if necessary. */ as it also covers the no-error path. But I guess not a big deal.. Reviewed-by: Peter Xu <peterx@redhat.com> Thanks, > + ret = 0; > + > if (addr != end) > goto again; > out: > -- > 2.20.1 >
diff --git a/mm/memory.c b/mm/memory.c index 2fb455c365c2..0982cab37ecb 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -718,7 +718,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, if (likely(!non_swap_entry(entry))) { if (swap_duplicate(entry) < 0) - return entry.val; + return -EIO; /* make sure dst_mm is on swapoff's mmlist. */ if (unlikely(list_empty(&dst_mm->mmlist))) { @@ -974,11 +974,13 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, continue; } if (unlikely(!pte_present(*src_pte))) { - entry.val = copy_nonpresent_pte(dst_mm, src_mm, - dst_pte, src_pte, - src_vma, addr, rss); - if (entry.val) + ret = copy_nonpresent_pte(dst_mm, src_mm, + dst_pte, src_pte, + src_vma, addr, rss); + if (ret == -EIO) { + entry = pte_to_swp_entry(*src_pte); break; + } progress += 8; continue; } @@ -1011,20 +1013,24 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, pte_unmap_unlock(orig_dst_pte, dst_ptl); cond_resched(); - if (entry.val) { + if (ret == -EIO) { + VM_WARN_ON_ONCE(!entry.val); if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) { ret = -ENOMEM; goto out; } entry.val = 0; - } else if (ret) { - WARN_ON_ONCE(ret != -EAGAIN); + } else if (ret == -EAGAIN) { prealloc = page_copy_prealloc(src_mm, src_vma, addr); if (!prealloc) return -ENOMEM; - /* We've captured and resolved the error. Reset, try again. */ - ret = 0; + } else if (ret) { + VM_WARN_ON_ONCE(1); } + + /* We've captured and resolved the error. Reset, try again. */ + ret = 0; + if (addr != end) goto again; out:
Currently if copy_nonpresent_pte() returns a non-zero value it is assumed to be a swap entry which requires further processing outside the loop in copy_pte_range() after dropping locks. This prevents other values being returned to signal conditions such as failure which a subsequent change requires. Instead make copy_nonpresent_pte() return an error code if further processing is required and read the value for the swap entry in the main loop under the ptl. Signed-off-by: Alistair Popple <apopple@nvidia.com> --- v10: Use a unique error code and only check return codes for handling. v9: New for v9 to allow device exclusive handling to occur in copy_nonpresent_pte(). --- mm/memory.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-)