diff mbox series

[1/2] migrate: convert unmap_and_move() to use folios

Message ID 20221104083020.155835-2-ying.huang@intel.com (mailing list archive)
State New
Headers show
Series migrate: convert migrate_pages()/unmap_and_move() to use folios | expand

Commit Message

Huang, Ying Nov. 4, 2022, 8:30 a.m. UTC
Quite straightforward, the page functions are converted to
corresponding folio functions.  Same for comments.

Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Matthew Wilcox <willy@infradead.org>
---
 mm/migrate.c | 54 ++++++++++++++++++++++++++--------------------------
 1 file changed, 27 insertions(+), 27 deletions(-)

Comments

Baolin Wang Nov. 7, 2022, 7:26 a.m. UTC | #1
On 11/4/2022 4:30 PM, Huang Ying wrote:
> Quite straightforward, the page functions are converted to
> corresponding folio functions.  Same for comments.
>

LGTM. Please feel free to add:
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>

> Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Zi Yan <ziy@nvidia.com>
> Cc: Yang Shi <shy828301@gmail.com>
> Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
> Cc: Oscar Salvador <osalvador@suse.de>
> Cc: Matthew Wilcox <willy@infradead.org>
> ---
>   mm/migrate.c | 54 ++++++++++++++++++++++++++--------------------------
>   1 file changed, 27 insertions(+), 27 deletions(-)
> 
> diff --git a/mm/migrate.c b/mm/migrate.c
> index dff333593a8a..f6dd749dd2f8 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1150,79 +1150,79 @@ static int __unmap_and_move(struct folio *src, struct folio *dst,
>   }
>   
>   /*
> - * Obtain the lock on page, remove all ptes and migrate the page
> - * to the newly allocated page in newpage.
> + * Obtain the lock on folio, remove all ptes and migrate the folio
> + * to the newly allocated folio in dst.
>    */
>   static int unmap_and_move(new_page_t get_new_page,
>   				   free_page_t put_new_page,
> -				   unsigned long private, struct page *page,
> +				   unsigned long private, struct folio *src,
>   				   int force, enum migrate_mode mode,
>   				   enum migrate_reason reason,
>   				   struct list_head *ret)
>   {
> -	struct folio *dst, *src = page_folio(page);
> +	struct folio *dst;
>   	int rc = MIGRATEPAGE_SUCCESS;
>   	struct page *newpage = NULL;
>   
> -	if (!thp_migration_supported() && PageTransHuge(page))
> +	if (!thp_migration_supported() && folio_test_transhuge(src))
>   		return -ENOSYS;
>   
> -	if (page_count(page) == 1) {
> -		/* Page was freed from under us. So we are done. */
> -		ClearPageActive(page);
> -		ClearPageUnevictable(page);
> +	if (folio_ref_count(src) == 1) {
> +		/* Folio was freed from under us. So we are done. */
> +		folio_clear_active(src);
> +		folio_clear_unevictable(src);
>   		/* free_pages_prepare() will clear PG_isolated. */
>   		goto out;
>   	}
>   
> -	newpage = get_new_page(page, private);
> +	newpage = get_new_page(&src->page, private);
>   	if (!newpage)
>   		return -ENOMEM;
>   	dst = page_folio(newpage);
>   
> -	newpage->private = 0;
> +	dst->private = 0;
>   	rc = __unmap_and_move(src, dst, force, mode);
>   	if (rc == MIGRATEPAGE_SUCCESS)
> -		set_page_owner_migrate_reason(newpage, reason);
> +		set_page_owner_migrate_reason(&dst->page, reason);
>   
>   out:
>   	if (rc != -EAGAIN) {
>   		/*
> -		 * A page that has been migrated has all references
> -		 * removed and will be freed. A page that has not been
> +		 * A folio that has been migrated has all references
> +		 * removed and will be freed. A folio that has not been
>   		 * migrated will have kept its references and be restored.
>   		 */
> -		list_del(&page->lru);
> +		list_del(&src->lru);
>   	}
>   
>   	/*
>   	 * If migration is successful, releases reference grabbed during
> -	 * isolation. Otherwise, restore the page to right list unless
> +	 * isolation. Otherwise, restore the folio to right list unless
>   	 * we want to retry.
>   	 */
>   	if (rc == MIGRATEPAGE_SUCCESS) {
>   		/*
> -		 * Compaction can migrate also non-LRU pages which are
> +		 * Compaction can migrate also non-LRU folios which are
>   		 * not accounted to NR_ISOLATED_*. They can be recognized
> -		 * as __PageMovable
> +		 * as __folio_test_movable
>   		 */
> -		if (likely(!__PageMovable(page)))
> -			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
> -					page_is_file_lru(page), -thp_nr_pages(page));
> +		if (likely(!__folio_test_movable(src)))
> +			mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
> +					folio_is_file_lru(src), -folio_nr_pages(src));
>   
>   		if (reason != MR_MEMORY_FAILURE)
>   			/*
> -			 * We release the page in page_handle_poison.
> +			 * We release the folio in page_handle_poison.
>   			 */
> -			put_page(page);
> +			folio_put(src);
>   	} else {
>   		if (rc != -EAGAIN)
> -			list_add_tail(&page->lru, ret);
> +			list_add_tail(&src->lru, ret);
>   
>   		if (put_new_page)
> -			put_new_page(newpage, private);
> +			put_new_page(&dst->page, private);
>   		else
> -			put_page(newpage);
> +			folio_put(dst);
>   	}
>   
>   	return rc;
> @@ -1459,7 +1459,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
>   						&ret_pages);
>   			else
>   				rc = unmap_and_move(get_new_page, put_new_page,
> -						private, page, pass > 2, mode,
> +						private, page_folio(page), pass > 2, mode,
>   						reason, &ret_pages);
>   			/*
>   			 * The rules are:
Matthew Wilcox Nov. 7, 2022, 1:49 p.m. UTC | #2
On Fri, Nov 04, 2022 at 04:30:19PM +0800, Huang Ying wrote:
> Quite straightforward, the page functions are converted to
> corresponding folio functions.  Same for comments.

Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Zi Yan Nov. 7, 2022, 3:29 p.m. UTC | #3
On 4 Nov 2022, at 4:30, Huang Ying wrote:

> Quite straightforward, the page functions are converted to
> corresponding folio functions.  Same for comments.
>
> Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Zi Yan <ziy@nvidia.com>
> Cc: Yang Shi <shy828301@gmail.com>
> Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
> Cc: Oscar Salvador <osalvador@suse.de>
> Cc: Matthew Wilcox <willy@infradead.org>
> ---
>  mm/migrate.c | 54 ++++++++++++++++++++++++++--------------------------
>  1 file changed, 27 insertions(+), 27 deletions(-)
>
LGTM. Thanks. Reviewed-by: Zi Yan <ziy@nvidia.com>

--
Best Regards,
Yan, Zi
Yang Shi Nov. 7, 2022, 6:49 p.m. UTC | #4
On Fri, Nov 4, 2022 at 1:31 AM Huang Ying <ying.huang@intel.com> wrote:
>
> Quite straightforward, the page functions are converted to
> corresponding folio functions.  Same for comments.

Reviewed-by: Yang Shi <shy828301@gmail.com>

>
> Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Cc: Zi Yan <ziy@nvidia.com>
> Cc: Yang Shi <shy828301@gmail.com>
> Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
> Cc: Oscar Salvador <osalvador@suse.de>
> Cc: Matthew Wilcox <willy@infradead.org>
> ---
>  mm/migrate.c | 54 ++++++++++++++++++++++++++--------------------------
>  1 file changed, 27 insertions(+), 27 deletions(-)
>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index dff333593a8a..f6dd749dd2f8 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1150,79 +1150,79 @@ static int __unmap_and_move(struct folio *src, struct folio *dst,
>  }
>
>  /*
> - * Obtain the lock on page, remove all ptes and migrate the page
> - * to the newly allocated page in newpage.
> + * Obtain the lock on folio, remove all ptes and migrate the folio
> + * to the newly allocated folio in dst.
>   */
>  static int unmap_and_move(new_page_t get_new_page,
>                                    free_page_t put_new_page,
> -                                  unsigned long private, struct page *page,
> +                                  unsigned long private, struct folio *src,
>                                    int force, enum migrate_mode mode,
>                                    enum migrate_reason reason,
>                                    struct list_head *ret)
>  {
> -       struct folio *dst, *src = page_folio(page);
> +       struct folio *dst;
>         int rc = MIGRATEPAGE_SUCCESS;
>         struct page *newpage = NULL;
>
> -       if (!thp_migration_supported() && PageTransHuge(page))
> +       if (!thp_migration_supported() && folio_test_transhuge(src))
>                 return -ENOSYS;
>
> -       if (page_count(page) == 1) {
> -               /* Page was freed from under us. So we are done. */
> -               ClearPageActive(page);
> -               ClearPageUnevictable(page);
> +       if (folio_ref_count(src) == 1) {
> +               /* Folio was freed from under us. So we are done. */
> +               folio_clear_active(src);
> +               folio_clear_unevictable(src);
>                 /* free_pages_prepare() will clear PG_isolated. */
>                 goto out;
>         }
>
> -       newpage = get_new_page(page, private);
> +       newpage = get_new_page(&src->page, private);
>         if (!newpage)
>                 return -ENOMEM;
>         dst = page_folio(newpage);
>
> -       newpage->private = 0;
> +       dst->private = 0;
>         rc = __unmap_and_move(src, dst, force, mode);
>         if (rc == MIGRATEPAGE_SUCCESS)
> -               set_page_owner_migrate_reason(newpage, reason);
> +               set_page_owner_migrate_reason(&dst->page, reason);
>
>  out:
>         if (rc != -EAGAIN) {
>                 /*
> -                * A page that has been migrated has all references
> -                * removed and will be freed. A page that has not been
> +                * A folio that has been migrated has all references
> +                * removed and will be freed. A folio that has not been
>                  * migrated will have kept its references and be restored.
>                  */
> -               list_del(&page->lru);
> +               list_del(&src->lru);
>         }
>
>         /*
>          * If migration is successful, releases reference grabbed during
> -        * isolation. Otherwise, restore the page to right list unless
> +        * isolation. Otherwise, restore the folio to right list unless
>          * we want to retry.
>          */
>         if (rc == MIGRATEPAGE_SUCCESS) {
>                 /*
> -                * Compaction can migrate also non-LRU pages which are
> +                * Compaction can migrate also non-LRU folios which are
>                  * not accounted to NR_ISOLATED_*. They can be recognized
> -                * as __PageMovable
> +                * as __folio_test_movable
>                  */
> -               if (likely(!__PageMovable(page)))
> -                       mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
> -                                       page_is_file_lru(page), -thp_nr_pages(page));
> +               if (likely(!__folio_test_movable(src)))
> +                       mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
> +                                       folio_is_file_lru(src), -folio_nr_pages(src));
>
>                 if (reason != MR_MEMORY_FAILURE)
>                         /*
> -                        * We release the page in page_handle_poison.
> +                        * We release the folio in page_handle_poison.
>                          */
> -                       put_page(page);
> +                       folio_put(src);
>         } else {
>                 if (rc != -EAGAIN)
> -                       list_add_tail(&page->lru, ret);
> +                       list_add_tail(&src->lru, ret);
>
>                 if (put_new_page)
> -                       put_new_page(newpage, private);
> +                       put_new_page(&dst->page, private);
>                 else
> -                       put_page(newpage);
> +                       folio_put(dst);
>         }
>
>         return rc;
> @@ -1459,7 +1459,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
>                                                 &ret_pages);
>                         else
>                                 rc = unmap_and_move(get_new_page, put_new_page,
> -                                               private, page, pass > 2, mode,
> +                                               private, page_folio(page), pass > 2, mode,
>                                                 reason, &ret_pages);
>                         /*
>                          * The rules are:
> --
> 2.35.1
>
diff mbox series

Patch

diff --git a/mm/migrate.c b/mm/migrate.c
index dff333593a8a..f6dd749dd2f8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1150,79 +1150,79 @@  static int __unmap_and_move(struct folio *src, struct folio *dst,
 }
 
 /*
- * Obtain the lock on page, remove all ptes and migrate the page
- * to the newly allocated page in newpage.
+ * Obtain the lock on folio, remove all ptes and migrate the folio
+ * to the newly allocated folio in dst.
  */
 static int unmap_and_move(new_page_t get_new_page,
 				   free_page_t put_new_page,
-				   unsigned long private, struct page *page,
+				   unsigned long private, struct folio *src,
 				   int force, enum migrate_mode mode,
 				   enum migrate_reason reason,
 				   struct list_head *ret)
 {
-	struct folio *dst, *src = page_folio(page);
+	struct folio *dst;
 	int rc = MIGRATEPAGE_SUCCESS;
 	struct page *newpage = NULL;
 
-	if (!thp_migration_supported() && PageTransHuge(page))
+	if (!thp_migration_supported() && folio_test_transhuge(src))
 		return -ENOSYS;
 
-	if (page_count(page) == 1) {
-		/* Page was freed from under us. So we are done. */
-		ClearPageActive(page);
-		ClearPageUnevictable(page);
+	if (folio_ref_count(src) == 1) {
+		/* Folio was freed from under us. So we are done. */
+		folio_clear_active(src);
+		folio_clear_unevictable(src);
 		/* free_pages_prepare() will clear PG_isolated. */
 		goto out;
 	}
 
-	newpage = get_new_page(page, private);
+	newpage = get_new_page(&src->page, private);
 	if (!newpage)
 		return -ENOMEM;
 	dst = page_folio(newpage);
 
-	newpage->private = 0;
+	dst->private = 0;
 	rc = __unmap_and_move(src, dst, force, mode);
 	if (rc == MIGRATEPAGE_SUCCESS)
-		set_page_owner_migrate_reason(newpage, reason);
+		set_page_owner_migrate_reason(&dst->page, reason);
 
 out:
 	if (rc != -EAGAIN) {
 		/*
-		 * A page that has been migrated has all references
-		 * removed and will be freed. A page that has not been
+		 * A folio that has been migrated has all references
+		 * removed and will be freed. A folio that has not been
 		 * migrated will have kept its references and be restored.
 		 */
-		list_del(&page->lru);
+		list_del(&src->lru);
 	}
 
 	/*
 	 * If migration is successful, releases reference grabbed during
-	 * isolation. Otherwise, restore the page to right list unless
+	 * isolation. Otherwise, restore the folio to right list unless
 	 * we want to retry.
 	 */
 	if (rc == MIGRATEPAGE_SUCCESS) {
 		/*
-		 * Compaction can migrate also non-LRU pages which are
+		 * Compaction can migrate also non-LRU folios which are
 		 * not accounted to NR_ISOLATED_*. They can be recognized
-		 * as __PageMovable
+		 * as __folio_test_movable
 		 */
-		if (likely(!__PageMovable(page)))
-			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
-					page_is_file_lru(page), -thp_nr_pages(page));
+		if (likely(!__folio_test_movable(src)))
+			mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
+					folio_is_file_lru(src), -folio_nr_pages(src));
 
 		if (reason != MR_MEMORY_FAILURE)
 			/*
-			 * We release the page in page_handle_poison.
+			 * We release the folio in page_handle_poison.
 			 */
-			put_page(page);
+			folio_put(src);
 	} else {
 		if (rc != -EAGAIN)
-			list_add_tail(&page->lru, ret);
+			list_add_tail(&src->lru, ret);
 
 		if (put_new_page)
-			put_new_page(newpage, private);
+			put_new_page(&dst->page, private);
 		else
-			put_page(newpage);
+			folio_put(dst);
 	}
 
 	return rc;
@@ -1459,7 +1459,7 @@  int migrate_pages(struct list_head *from, new_page_t get_new_page,
 						&ret_pages);
 			else
 				rc = unmap_and_move(get_new_page, put_new_page,
-						private, page, pass > 2, mode,
+						private, page_folio(page), pass > 2, mode,
 						reason, &ret_pages);
 			/*
 			 * The rules are: