diff mbox series

[01/11] mm: page_vma_mapped_walk(): use page for pvmw->page

Message ID 88e67645-f467-c279-bf5e-af4b5c6b13eb@google.com (mailing list archive)
State New, archived
Headers show
Series mm: page_vma_mapped_walk() cleanup and THP fixes | expand

Commit Message

Hugh Dickins June 10, 2021, 6:34 a.m. UTC
page_vma_mapped_walk() cleanup: sometimes the local copy of pvwm->page was
used, sometimes pvmw->page itself: use the local copy "page" throughout.

Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: <stable@vger.kernel.org>
---
 mm/page_vma_mapped.c | 9 ++++-----
 1 file changed, 4 insertions(+), 5 deletions(-)

Comments

Alistair Popple June 10, 2021, 8:12 a.m. UTC | #1
Thanks for this, I've read through page_vma_mapped_walk() a few too many times
recently and it annoyed me enough to start writing some patches to clean it up,
but happy to see this instead.

I confirmed pvmw->page isn't modified here or in map/check_pte(pvmw), so the
patch looks good to me:

Reviewed-by: Alistair Popple <apopple@nvidia.com>

On Thursday, 10 June 2021 4:34:40 PM AEST Hugh Dickins wrote:
> page_vma_mapped_walk() cleanup: sometimes the local copy of pvwm->page was
> used, sometimes pvmw->page itself: use the local copy "page" throughout.
> 
> Signed-off-by: Hugh Dickins <hughd@google.com>
> Cc: <stable@vger.kernel.org>
> ---
>  mm/page_vma_mapped.c | 9 ++++-----
>  1 file changed, 4 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> index e37bd43904af..a6dbf714ca15 100644
> --- a/mm/page_vma_mapped.c
> +++ b/mm/page_vma_mapped.c
> @@ -156,7 +156,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>         if (pvmw->pte)
>                 goto next_pte;
> 
> -       if (unlikely(PageHuge(pvmw->page))) {
> +       if (unlikely(PageHuge(page))) {
>                 /* when pud is not present, pte will be NULL */
>                 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
>                 if (!pvmw->pte)
> @@ -217,8 +217,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>                  * cannot return prematurely, while zap_huge_pmd() has
>                  * cleared *pmd but not decremented compound_mapcount().
>                  */
> -               if ((pvmw->flags & PVMW_SYNC) &&
> -                   PageTransCompound(pvmw->page)) {
> +               if ((pvmw->flags & PVMW_SYNC) && PageTransCompound(page)) {
>                         spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
> 
>                         spin_unlock(ptl);
> @@ -234,9 +233,9 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>                         return true;
>  next_pte:
>                 /* Seek to next pte only makes sense for THP */
> -               if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
> +               if (!PageTransHuge(page) || PageHuge(page))
>                         return not_found(pvmw);
> -               end = vma_address_end(pvmw->page, pvmw->vma);
> +               end = vma_address_end(page, pvmw->vma);
>                 do {
>                         pvmw->address += PAGE_SIZE;
>                         if (pvmw->address >= end)
> --
> 2.26.2
>
Kirill A. Shutemov June 10, 2021, 8:55 a.m. UTC | #2
On Wed, Jun 09, 2021 at 11:34:40PM -0700, Hugh Dickins wrote:
> page_vma_mapped_walk() cleanup: sometimes the local copy of pvwm->page was
> used, sometimes pvmw->page itself: use the local copy "page" throughout.
> 
> Signed-off-by: Hugh Dickins <hughd@google.com>
> Cc: <stable@vger.kernel.org>

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

A question below.

> ---
>  mm/page_vma_mapped.c | 9 ++++-----
>  1 file changed, 4 insertions(+), 5 deletions(-)
> 
> diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
> index e37bd43904af..a6dbf714ca15 100644
> --- a/mm/page_vma_mapped.c
> +++ b/mm/page_vma_mapped.c
> @@ -156,7 +156,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>  	if (pvmw->pte)
>  		goto next_pte;
>  
> -	if (unlikely(PageHuge(pvmw->page))) {
> +	if (unlikely(PageHuge(page))) {
>  		/* when pud is not present, pte will be NULL */
>  		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
>  		if (!pvmw->pte)
> @@ -217,8 +217,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>  		 * cannot return prematurely, while zap_huge_pmd() has
>  		 * cleared *pmd but not decremented compound_mapcount().
>  		 */
> -		if ((pvmw->flags & PVMW_SYNC) &&
> -		    PageTransCompound(pvmw->page)) {
> +		if ((pvmw->flags & PVMW_SYNC) && PageTransCompound(page)) {
>  			spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
>  
>  			spin_unlock(ptl);
> @@ -234,9 +233,9 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
>  			return true;
>  next_pte:
>  		/* Seek to next pte only makes sense for THP */
> -		if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
> +		if (!PageTransHuge(page) || PageHuge(page))
>  			return not_found(pvmw);
> -		end = vma_address_end(pvmw->page, pvmw->vma);
> +		end = vma_address_end(page, pvmw->vma);
>  		do {
>  			pvmw->address += PAGE_SIZE;
>  			if (pvmw->address >= end)

I see two more pvmw->page in this loop. Do you leave them here as the code
will be rewritten later in the patchset?
Peter Xu June 10, 2021, 2:14 p.m. UTC | #3
On Thu, Jun 10, 2021 at 11:55:22AM +0300, Kirill A. Shutemov wrote:
> > @@ -234,9 +233,9 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
> >  			return true;
> >  next_pte:
> >  		/* Seek to next pte only makes sense for THP */
> > -		if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
> > +		if (!PageTransHuge(page) || PageHuge(page))
> >  			return not_found(pvmw);
> > -		end = vma_address_end(pvmw->page, pvmw->vma);
> > +		end = vma_address_end(page, pvmw->vma);
> >  		do {
> >  			pvmw->address += PAGE_SIZE;
> >  			if (pvmw->address >= end)
> 
> I see two more pvmw->page in this loop. Do you leave them here as the code
> will be rewritten later in the patchset?

I think they've got removed in previous series ("[PATCH v2 04/10] mm/thp: fix
vma_address() if virtual address below file offset").

Reviewed-by: Peter Xu <peterx@redhat.com>
diff mbox series

Patch

diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index e37bd43904af..a6dbf714ca15 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -156,7 +156,7 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 	if (pvmw->pte)
 		goto next_pte;
 
-	if (unlikely(PageHuge(pvmw->page))) {
+	if (unlikely(PageHuge(page))) {
 		/* when pud is not present, pte will be NULL */
 		pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
 		if (!pvmw->pte)
@@ -217,8 +217,7 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 		 * cannot return prematurely, while zap_huge_pmd() has
 		 * cleared *pmd but not decremented compound_mapcount().
 		 */
-		if ((pvmw->flags & PVMW_SYNC) &&
-		    PageTransCompound(pvmw->page)) {
+		if ((pvmw->flags & PVMW_SYNC) && PageTransCompound(page)) {
 			spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
 
 			spin_unlock(ptl);
@@ -234,9 +233,9 @@  bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
 			return true;
 next_pte:
 		/* Seek to next pte only makes sense for THP */
-		if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
+		if (!PageTransHuge(page) || PageHuge(page))
 			return not_found(pvmw);
-		end = vma_address_end(pvmw->page, pvmw->vma);
+		end = vma_address_end(page, pvmw->vma);
 		do {
 			pvmw->address += PAGE_SIZE;
 			if (pvmw->address >= end)