Message ID | 20240229212036.2160900-6-willy@infradead.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Some cleanups for memory-failure | expand |
On 2024/3/1 5:20, Matthew Wilcox (Oracle) wrote: > The page is only used to get the mapping, so the folio will do just > as well. Both callers already have a folio available, so this saves > a call to compound_head(). > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Looks good to me. Thanks. Acked-by: Miaohe Lin <linmiaohe@huawei.com> > --- > include/linux/hugetlb.h | 6 +++--- > mm/hugetlb.c | 6 +++--- > mm/memory-failure.c | 2 +- > mm/migrate.c | 2 +- > 4 files changed, 8 insertions(+), 8 deletions(-) > > diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h > index 77b30a8c6076..acb1096ecdaa 100644 > --- a/include/linux/hugetlb.h > +++ b/include/linux/hugetlb.h > @@ -175,7 +175,7 @@ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); > pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, > unsigned long addr, pud_t *pud); > > -struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); > +struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio); > > extern int sysctl_hugetlb_shm_group; > extern struct list_head huge_boot_pages[MAX_NUMNODES]; > @@ -298,8 +298,8 @@ static inline unsigned long hugetlb_total_pages(void) > return 0; > } > > -static inline struct address_space *hugetlb_page_mapping_lock_write( > - struct page *hpage) > +static inline struct address_space *hugetlb_folio_mapping_lock_write( > + struct folio *folio) > { > return NULL; > } > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > index bb17e5c22759..0e464a8f1aa9 100644 > --- a/mm/hugetlb.c > +++ b/mm/hugetlb.c > @@ -2178,13 +2178,13 @@ EXPORT_SYMBOL_GPL(PageHuge); > /* > * Find and lock address space (mapping) in write mode. > * > - * Upon entry, the page is locked which means that page_mapping() is > + * Upon entry, the folio is locked which means that folio_mapping() is > * stable. Due to locking order, we can only trylock_write. If we can > * not get the lock, simply return NULL to caller. > */ > -struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) > +struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio) > { > - struct address_space *mapping = page_mapping(hpage); > + struct address_space *mapping = folio_mapping(folio); > > if (!mapping) > return mapping; > diff --git a/mm/memory-failure.c b/mm/memory-failure.c > index 27dc21063552..fe4959e994d0 100644 > --- a/mm/memory-failure.c > +++ b/mm/memory-failure.c > @@ -1624,7 +1624,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, > * TTU_RMAP_LOCKED to indicate we have taken the lock > * at this higher level. > */ > - mapping = hugetlb_page_mapping_lock_write(hpage); > + mapping = hugetlb_folio_mapping_lock_write(folio); > if (mapping) { > try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); > i_mmap_unlock_write(mapping); > diff --git a/mm/migrate.c b/mm/migrate.c > index 73a052a382f1..0aef867d600b 100644 > --- a/mm/migrate.c > +++ b/mm/migrate.c > @@ -1425,7 +1425,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, > * semaphore in write mode here and set TTU_RMAP_LOCKED > * to let lower levels know we have taken the lock. > */ > - mapping = hugetlb_page_mapping_lock_write(&src->page); > + mapping = hugetlb_folio_mapping_lock_write(src); > if (unlikely(!mapping)) > goto unlock_put_anon; > >
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 77b30a8c6076..acb1096ecdaa 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -175,7 +175,7 @@ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pud_t *pud); -struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); +struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio); extern int sysctl_hugetlb_shm_group; extern struct list_head huge_boot_pages[MAX_NUMNODES]; @@ -298,8 +298,8 @@ static inline unsigned long hugetlb_total_pages(void) return 0; } -static inline struct address_space *hugetlb_page_mapping_lock_write( - struct page *hpage) +static inline struct address_space *hugetlb_folio_mapping_lock_write( + struct folio *folio) { return NULL; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bb17e5c22759..0e464a8f1aa9 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2178,13 +2178,13 @@ EXPORT_SYMBOL_GPL(PageHuge); /* * Find and lock address space (mapping) in write mode. * - * Upon entry, the page is locked which means that page_mapping() is + * Upon entry, the folio is locked which means that folio_mapping() is * stable. Due to locking order, we can only trylock_write. If we can * not get the lock, simply return NULL to caller. */ -struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage) +struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio) { - struct address_space *mapping = page_mapping(hpage); + struct address_space *mapping = folio_mapping(folio); if (!mapping) return mapping; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 27dc21063552..fe4959e994d0 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1624,7 +1624,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, * TTU_RMAP_LOCKED to indicate we have taken the lock * at this higher level. */ - mapping = hugetlb_page_mapping_lock_write(hpage); + mapping = hugetlb_folio_mapping_lock_write(folio); if (mapping) { try_to_unmap(folio, ttu|TTU_RMAP_LOCKED); i_mmap_unlock_write(mapping); diff --git a/mm/migrate.c b/mm/migrate.c index 73a052a382f1..0aef867d600b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1425,7 +1425,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, * semaphore in write mode here and set TTU_RMAP_LOCKED * to let lower levels know we have taken the lock. */ - mapping = hugetlb_page_mapping_lock_write(&src->page); + mapping = hugetlb_folio_mapping_lock_write(src); if (unlikely(!mapping)) goto unlock_put_anon;
The page is only used to get the mapping, so the folio will do just as well. Both callers already have a folio available, so this saves a call to compound_head(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- include/linux/hugetlb.h | 6 +++--- mm/hugetlb.c | 6 +++--- mm/memory-failure.c | 2 +- mm/migrate.c | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-)