diff mbox series

[v2,06/11] mm: Convert hugetlb_page_mapping_lock_write to folio

Message ID 20240408194232.118537-7-willy@infradead.org (mailing list archive)
State New
Headers show
Series Some cleanups for memory-failure | expand

Commit Message

Matthew Wilcox April 8, 2024, 7:42 p.m. UTC
The page is only used to get the mapping, so the folio will do just
as well.  Both callers already have a folio available, so this saves
a call to compound_head().

Acked-by: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/hugetlb.h | 6 +++---
 mm/hugetlb.c            | 6 +++---
 mm/memory-failure.c     | 2 +-
 mm/migrate.c            | 2 +-
 4 files changed, 8 insertions(+), 8 deletions(-)

Comments

Jane Chu April 8, 2024, 11:09 p.m. UTC | #1
On 4/8/2024 12:42 PM, Matthew Wilcox (Oracle) wrote:

> The page is only used to get the mapping, so the folio will do just
> as well.  Both callers already have a folio available, so this saves
> a call to compound_head().
>
> Acked-by: Miaohe Lin <linmiaohe@huawei.com>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   include/linux/hugetlb.h | 6 +++---
>   mm/hugetlb.c            | 6 +++---
>   mm/memory-failure.c     | 2 +-
>   mm/migrate.c            | 2 +-
>   4 files changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index 3f3e62880279..bebf4c3a53ef 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -178,7 +178,7 @@ bool hugetlbfs_pagecache_present(struct hstate *h,
>   				 struct vm_area_struct *vma,
>   				 unsigned long address);
>   
> -struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
> +struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
>   
>   extern int sysctl_hugetlb_shm_group;
>   extern struct list_head huge_boot_pages[MAX_NUMNODES];
> @@ -297,8 +297,8 @@ static inline unsigned long hugetlb_total_pages(void)
>   	return 0;
>   }
>   
> -static inline struct address_space *hugetlb_page_mapping_lock_write(
> -							struct page *hpage)
> +static inline struct address_space *hugetlb_folio_mapping_lock_write(
> +							struct folio *folio)
>   {
>   	return NULL;
>   }
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 456c81fbf8f5..707c85303e88 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -2155,13 +2155,13 @@ static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
>   /*
>    * Find and lock address space (mapping) in write mode.
>    *
> - * Upon entry, the page is locked which means that page_mapping() is
> + * Upon entry, the folio is locked which means that folio_mapping() is
>    * stable.  Due to locking order, we can only trylock_write.  If we can
>    * not get the lock, simply return NULL to caller.
>    */
> -struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
> +struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
>   {
> -	struct address_space *mapping = page_mapping(hpage);
> +	struct address_space *mapping = folio_mapping(folio);
>   
>   	if (!mapping)
>   		return mapping;
> diff --git a/mm/memory-failure.c b/mm/memory-failure.c
> index 2e64e132bba1..0a45fb7fb055 100644
> --- a/mm/memory-failure.c
> +++ b/mm/memory-failure.c
> @@ -1608,7 +1608,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
>   		 * TTU_RMAP_LOCKED to indicate we have taken the lock
>   		 * at this higher level.
>   		 */
> -		mapping = hugetlb_page_mapping_lock_write(hpage);
> +		mapping = hugetlb_folio_mapping_lock_write(folio);
>   		if (mapping) {
>   			try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
>   			i_mmap_unlock_write(mapping);
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 285072bca29c..f8da9b89e043 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -1425,7 +1425,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
>   			 * semaphore in write mode here and set TTU_RMAP_LOCKED
>   			 * to let lower levels know we have taken the lock.
>   			 */
> -			mapping = hugetlb_page_mapping_lock_write(&src->page);
> +			mapping = hugetlb_folio_mapping_lock_write(src);
>   			if (unlikely(!mapping))
>   				goto unlock_put_anon;
>   

Looks good.

Reviewed-by: Jane Chu  <jane.chu@oracle.com>

-jane
Oscar Salvador April 10, 2024, 9:52 a.m. UTC | #2
On Mon, Apr 08, 2024 at 08:42:24PM +0100, Matthew Wilcox (Oracle) wrote:
> The page is only used to get the mapping, so the folio will do just
> as well.  Both callers already have a folio available, so this saves
> a call to compound_head().
> 
> Acked-by: Miaohe Lin <linmiaohe@huawei.com>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

Reviewed-by: Oscar Salvador <osalvador@suse.de>
diff mbox series

Patch

diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 3f3e62880279..bebf4c3a53ef 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -178,7 +178,7 @@  bool hugetlbfs_pagecache_present(struct hstate *h,
 				 struct vm_area_struct *vma,
 				 unsigned long address);
 
-struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
+struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
 
 extern int sysctl_hugetlb_shm_group;
 extern struct list_head huge_boot_pages[MAX_NUMNODES];
@@ -297,8 +297,8 @@  static inline unsigned long hugetlb_total_pages(void)
 	return 0;
 }
 
-static inline struct address_space *hugetlb_page_mapping_lock_write(
-							struct page *hpage)
+static inline struct address_space *hugetlb_folio_mapping_lock_write(
+							struct folio *folio)
 {
 	return NULL;
 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 456c81fbf8f5..707c85303e88 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2155,13 +2155,13 @@  static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
 /*
  * Find and lock address space (mapping) in write mode.
  *
- * Upon entry, the page is locked which means that page_mapping() is
+ * Upon entry, the folio is locked which means that folio_mapping() is
  * stable.  Due to locking order, we can only trylock_write.  If we can
  * not get the lock, simply return NULL to caller.
  */
-struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
+struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
 {
-	struct address_space *mapping = page_mapping(hpage);
+	struct address_space *mapping = folio_mapping(folio);
 
 	if (!mapping)
 		return mapping;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 2e64e132bba1..0a45fb7fb055 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1608,7 +1608,7 @@  static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
 		 * TTU_RMAP_LOCKED to indicate we have taken the lock
 		 * at this higher level.
 		 */
-		mapping = hugetlb_page_mapping_lock_write(hpage);
+		mapping = hugetlb_folio_mapping_lock_write(folio);
 		if (mapping) {
 			try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
 			i_mmap_unlock_write(mapping);
diff --git a/mm/migrate.c b/mm/migrate.c
index 285072bca29c..f8da9b89e043 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1425,7 +1425,7 @@  static int unmap_and_move_huge_page(new_folio_t get_new_folio,
 			 * semaphore in write mode here and set TTU_RMAP_LOCKED
 			 * to let lower levels know we have taken the lock.
 			 */
-			mapping = hugetlb_page_mapping_lock_write(&src->page);
+			mapping = hugetlb_folio_mapping_lock_write(src);
 			if (unlikely(!mapping))
 				goto unlock_put_anon;