Message ID | 20240217022546.1496101-19-willy@infradead.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Rearrange batched folio freeing | expand |
On 17.02.24 03:25, Matthew Wilcox (Oracle) wrote: > All but one caller already has a folio, so convert > free_page_and_swap_cache() to have a folio and remove the call to > page_folio(). > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> > --- > include/linux/swap.h | 8 ++++---- > mm/khugepaged.c | 2 +- > mm/memory.c | 2 +- > mm/swap_state.c | 12 ++++++------ > 4 files changed, 12 insertions(+), 12 deletions(-) > > diff --git a/include/linux/swap.h b/include/linux/swap.h > index 3e2b038852bb..a211a0383425 100644 > --- a/include/linux/swap.h > +++ b/include/linux/swap.h > @@ -440,9 +440,9 @@ static inline unsigned long total_swapcache_pages(void) > return global_node_page_state(NR_SWAPCACHE); > } > > -extern void free_swap_cache(struct page *page); > -extern void free_page_and_swap_cache(struct page *); > -extern void free_pages_and_swap_cache(struct encoded_page **, int); > +void free_swap_cache(struct folio *folio); > +void free_page_and_swap_cache(struct page *); > +void free_pages_and_swap_cache(struct encoded_page **, int); > /* linux/mm/swapfile.c */ > extern atomic_long_t nr_swap_pages; > extern long total_swap_pages; > @@ -524,7 +524,7 @@ static inline void put_swap_device(struct swap_info_struct *si) > /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */ > #define free_swap_and_cache(e) is_pfn_swap_entry(e) > > -static inline void free_swap_cache(struct page *page) > +static inline void free_swap_cache(struct folio *folio) > { > } > > diff --git a/mm/khugepaged.c b/mm/khugepaged.c > index 5cc39c3f3847..d19fba3355a7 100644 > --- a/mm/khugepaged.c > +++ b/mm/khugepaged.c > @@ -731,7 +731,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte, > node_stat_sub_folio(src, NR_ISOLATED_ANON + > folio_is_file_lru(src)); > folio_unlock(src); > - free_swap_cache(&src->page); > + free_swap_cache(src); > folio_putback_lru(src); > } > } > diff --git a/mm/memory.c b/mm/memory.c > index e3e32c5b4be1..815312b2dc48 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -3376,7 +3376,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) > folio_put(new_folio); > if (old_folio) { > if (page_copied) > - free_swap_cache(&old_folio->page); > + free_swap_cache(old_folio); > folio_put(old_folio); > } > > diff --git a/mm/swap_state.c b/mm/swap_state.c > index f2e07022d763..3f58d6fd5b44 100644 > --- a/mm/swap_state.c > +++ b/mm/swap_state.c > @@ -283,10 +283,8 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin, > * folio_free_swap() _with_ the lock. > * - Marcelo > */ > -void free_swap_cache(struct page *page) > +void free_swap_cache(struct folio *folio) > { I wanted to do the same, great to see that you already have a patch for it. I was wondering whether we should call that something like "folio_try_free_swap_cache" instead. Anyhow Reviewed-by: David Hildenbrand <david@redhat.com>
diff --git a/include/linux/swap.h b/include/linux/swap.h index 3e2b038852bb..a211a0383425 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -440,9 +440,9 @@ static inline unsigned long total_swapcache_pages(void) return global_node_page_state(NR_SWAPCACHE); } -extern void free_swap_cache(struct page *page); -extern void free_page_and_swap_cache(struct page *); -extern void free_pages_and_swap_cache(struct encoded_page **, int); +void free_swap_cache(struct folio *folio); +void free_page_and_swap_cache(struct page *); +void free_pages_and_swap_cache(struct encoded_page **, int); /* linux/mm/swapfile.c */ extern atomic_long_t nr_swap_pages; extern long total_swap_pages; @@ -524,7 +524,7 @@ static inline void put_swap_device(struct swap_info_struct *si) /* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */ #define free_swap_and_cache(e) is_pfn_swap_entry(e) -static inline void free_swap_cache(struct page *page) +static inline void free_swap_cache(struct folio *folio) { } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 5cc39c3f3847..d19fba3355a7 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -731,7 +731,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte, node_stat_sub_folio(src, NR_ISOLATED_ANON + folio_is_file_lru(src)); folio_unlock(src); - free_swap_cache(&src->page); + free_swap_cache(src); folio_putback_lru(src); } } diff --git a/mm/memory.c b/mm/memory.c index e3e32c5b4be1..815312b2dc48 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3376,7 +3376,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) folio_put(new_folio); if (old_folio) { if (page_copied) - free_swap_cache(&old_folio->page); + free_swap_cache(old_folio); folio_put(old_folio); } diff --git a/mm/swap_state.c b/mm/swap_state.c index f2e07022d763..3f58d6fd5b44 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -283,10 +283,8 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin, * folio_free_swap() _with_ the lock. * - Marcelo */ -void free_swap_cache(struct page *page) +void free_swap_cache(struct folio *folio) { - struct folio *folio = page_folio(page); - if (folio_test_swapcache(folio) && !folio_mapped(folio) && folio_trylock(folio)) { folio_free_swap(folio); @@ -300,9 +298,11 @@ void free_swap_cache(struct page *page) */ void free_page_and_swap_cache(struct page *page) { - free_swap_cache(page); + struct folio *folio = page_folio(page); + + free_swap_cache(folio); if (!is_huge_zero_page(page)) - put_page(page); + folio_put(folio); } /* @@ -317,7 +317,7 @@ void free_pages_and_swap_cache(struct encoded_page **pages, int nr) folio_batch_init(&folios); for (int i = 0; i < nr; i++) { struct folio *folio = page_folio(encoded_page_ptr(pages[i])); - free_swap_cache(&folio->page); + free_swap_cache(folio); if (folio_batch_add(&folios, folio) == 0) folios_put(&folios); }