diff mbox series

[v2,09/28] gup: Turn hpage_pincount_add() into page_pincount_add()

Message ID 20220110042406.499429-10-willy@infradead.org (mailing list archive)
State New
Headers show
Series Convert GUP to folios | expand

Commit Message

Matthew Wilcox (Oracle) Jan. 10, 2022, 4:23 a.m. UTC
Simplify try_grab_compound_head() and remove an unnecessary
VM_BUG_ON by handling pages both with and without a pincount field in
page_pincount_add().

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/gup.c | 33 +++++++++++++++------------------
 1 file changed, 15 insertions(+), 18 deletions(-)

Comments

Christoph Hellwig Jan. 10, 2022, 8:31 a.m. UTC | #1
On Mon, Jan 10, 2022 at 04:23:47AM +0000, Matthew Wilcox (Oracle) wrote:
> Simplify try_grab_compound_head() and remove an unnecessary
> VM_BUG_ON by handling pages both with and without a pincount field in
> page_pincount_add().

Nice:

Reviewed-by: Christoph Hellwig <hch@lst.de>
John Hubbard Jan. 11, 2022, 3:35 a.m. UTC | #2
On 1/9/22 20:23, Matthew Wilcox (Oracle) wrote:
> Simplify try_grab_compound_head() and remove an unnecessary
> VM_BUG_ON by handling pages both with and without a pincount field in
> page_pincount_add().
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   mm/gup.c | 33 +++++++++++++++------------------
>   1 file changed, 15 insertions(+), 18 deletions(-)

Reviewed-by: John Hubbard <jhubbard@nvidia.com>


thanks,
John Hubbard Jan. 11, 2022, 4:32 a.m. UTC | #3
On 1/9/22 20:23, Matthew Wilcox (Oracle) wrote:
...
> diff --git a/mm/gup.c b/mm/gup.c
> index dbb1b54d0def..3ed9907f3c8d 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -29,12 +29,23 @@ struct follow_page_context {
>   	unsigned int page_mask;
>   };
>   
> -static void hpage_pincount_add(struct page *page, int refs)
> +/*
> + * When pinning a compound page of order > 1 (which is what
> + * hpage_pincount_available() checks for), use an exact count to track
> + * it, via page_pincount_add/_sub().
> + *
> + * However, be sure to *also* increment the normal page refcount field
> + * at least once, so that the page really is pinned.  That's why the
> + * refcount from the earlier try_get_compound_head() is left intact.
> + */

I just realized, after looking at this again in a later patch, that the
last paragraph, above, is now misplaced. It refers to the behavior of the
caller, not to this routine. So it needs to be airlifted back to the
caller.

> +static void page_pincount_add(struct page *page, int refs)
>   {
> -	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
>   	VM_BUG_ON_PAGE(page != compound_head(page), page);
>   
> -	atomic_add(refs, compound_pincount_ptr(page));
> +	if (hpage_pincount_available(page))
> +		atomic_add(refs, compound_pincount_ptr(page));
> +	else
> +		page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
>   }
>   
>   static void hpage_pincount_sub(struct page *page, int refs)
> @@ -150,21 +161,7 @@ struct page *try_grab_compound_head(struct page *page,
>   		if (!page)
>   			return NULL;
>   
> -		/*
> -		 * When pinning a compound page of order > 1 (which is what
> -		 * hpage_pincount_available() checks for), use an exact count to
> -		 * track it, via hpage_pincount_add/_sub().
> -		 *
> -		 * However, be sure to *also* increment the normal page refcount
> -		 * field at least once, so that the page really is pinned.
> -		 * That's why the refcount from the earlier
> -		 * try_get_compound_head() is left intact.
> -		 */

...here.

> -		if (hpage_pincount_available(page))
> -			hpage_pincount_add(page, refs);
> -		else
> -			page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
> -
> +		page_pincount_add(page, refs);
>   		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
>   				    refs);
>   

thanks,
Matthew Wilcox (Oracle) Jan. 11, 2022, 1:46 p.m. UTC | #4
On Mon, Jan 10, 2022 at 08:32:20PM -0800, John Hubbard wrote:
> On 1/9/22 20:23, Matthew Wilcox (Oracle) wrote:
> ...
> > diff --git a/mm/gup.c b/mm/gup.c
> > index dbb1b54d0def..3ed9907f3c8d 100644
> > --- a/mm/gup.c
> > +++ b/mm/gup.c
> > @@ -29,12 +29,23 @@ struct follow_page_context {
> >   	unsigned int page_mask;
> >   };
> > -static void hpage_pincount_add(struct page *page, int refs)
> > +/*
> > + * When pinning a compound page of order > 1 (which is what
> > + * hpage_pincount_available() checks for), use an exact count to track
> > + * it, via page_pincount_add/_sub().
> > + *
> > + * However, be sure to *also* increment the normal page refcount field
> > + * at least once, so that the page really is pinned.  That's why the
> > + * refcount from the earlier try_get_compound_head() is left intact.
> > + */
> 
> I just realized, after looking at this again in a later patch, that the
> last paragraph, above, is now misplaced. It refers to the behavior of the
> caller, not to this routine. So it needs to be airlifted back to the
> caller.

I really do think it fits better here.  The thing is there's just one
caller, so it's a little hard to decide what "all callers" need when
there's only one.  Maybe I can wordsmith this a bit to read better.

> > +static void page_pincount_add(struct page *page, int refs)
> >   {
> > -	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
> >   	VM_BUG_ON_PAGE(page != compound_head(page), page);
> > -	atomic_add(refs, compound_pincount_ptr(page));
> > +	if (hpage_pincount_available(page))
> > +		atomic_add(refs, compound_pincount_ptr(page));
> > +	else
> > +		page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
> >   }
> >   static void hpage_pincount_sub(struct page *page, int refs)
> > @@ -150,21 +161,7 @@ struct page *try_grab_compound_head(struct page *page,
> >   		if (!page)
> >   			return NULL;
> > -		/*
> > -		 * When pinning a compound page of order > 1 (which is what
> > -		 * hpage_pincount_available() checks for), use an exact count to
> > -		 * track it, via hpage_pincount_add/_sub().
> > -		 *
> > -		 * However, be sure to *also* increment the normal page refcount
> > -		 * field at least once, so that the page really is pinned.
> > -		 * That's why the refcount from the earlier
> > -		 * try_get_compound_head() is left intact.
> > -		 */
> 
> ...here.
> 
> > -		if (hpage_pincount_available(page))
> > -			hpage_pincount_add(page, refs);
> > -		else
> > -			page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
> > -
> > +		page_pincount_add(page, refs);
> >   		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
> >   				    refs);
> 
> thanks,
> -- 
> John Hubbard
> NVIDIA
diff mbox series

Patch

diff --git a/mm/gup.c b/mm/gup.c
index dbb1b54d0def..3ed9907f3c8d 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -29,12 +29,23 @@  struct follow_page_context {
 	unsigned int page_mask;
 };
 
-static void hpage_pincount_add(struct page *page, int refs)
+/*
+ * When pinning a compound page of order > 1 (which is what
+ * hpage_pincount_available() checks for), use an exact count to track
+ * it, via page_pincount_add/_sub().
+ *
+ * However, be sure to *also* increment the normal page refcount field
+ * at least once, so that the page really is pinned.  That's why the
+ * refcount from the earlier try_get_compound_head() is left intact.
+ */
+static void page_pincount_add(struct page *page, int refs)
 {
-	VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
 	VM_BUG_ON_PAGE(page != compound_head(page), page);
 
-	atomic_add(refs, compound_pincount_ptr(page));
+	if (hpage_pincount_available(page))
+		atomic_add(refs, compound_pincount_ptr(page));
+	else
+		page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
 }
 
 static void hpage_pincount_sub(struct page *page, int refs)
@@ -150,21 +161,7 @@  struct page *try_grab_compound_head(struct page *page,
 		if (!page)
 			return NULL;
 
-		/*
-		 * When pinning a compound page of order > 1 (which is what
-		 * hpage_pincount_available() checks for), use an exact count to
-		 * track it, via hpage_pincount_add/_sub().
-		 *
-		 * However, be sure to *also* increment the normal page refcount
-		 * field at least once, so that the page really is pinned.
-		 * That's why the refcount from the earlier
-		 * try_get_compound_head() is left intact.
-		 */
-		if (hpage_pincount_available(page))
-			hpage_pincount_add(page, refs);
-		else
-			page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
-
+		page_pincount_add(page, refs);
 		mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
 				    refs);