Message ID | 20231211162214.2146080-10-willy@infradead.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | Finish two folio conversions | expand |
On 11.12.23 17:22, Matthew Wilcox (Oracle) wrote: > All callers have now been converted to folio_add_new_anon_rmap() and > folio_add_lru_vma() so we can remove the wrapper. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > --- > include/linux/rmap.h | 2 -- > include/linux/swap.h | 3 --- > mm/folio-compat.c | 16 ---------------- > 3 files changed, 21 deletions(-) > > diff --git a/include/linux/rmap.h b/include/linux/rmap.h > index af6a32b6f3e7..0ae2bb0e77f5 100644 > --- a/include/linux/rmap.h > +++ b/include/linux/rmap.h > @@ -197,8 +197,6 @@ typedef int __bitwise rmap_t; > void folio_move_anon_rmap(struct folio *, struct vm_area_struct *); > void page_add_anon_rmap(struct page *, struct vm_area_struct *, > unsigned long address, rmap_t flags); > -void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, > - unsigned long address); > void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, > unsigned long address); > void page_add_file_rmap(struct page *, struct vm_area_struct *, > diff --git a/include/linux/swap.h b/include/linux/swap.h > index 4f25b1237364..edc0f2c8ce01 100644 > --- a/include/linux/swap.h > +++ b/include/linux/swap.h > @@ -397,9 +397,6 @@ void folio_deactivate(struct folio *folio); > void folio_mark_lazyfree(struct folio *folio); > extern void swap_setup(void); > > -extern void lru_cache_add_inactive_or_unevictable(struct page *page, > - struct vm_area_struct *vma); > - > /* linux/mm/vmscan.c */ > extern unsigned long zone_reclaimable_pages(struct zone *zone); > extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, > diff --git a/mm/folio-compat.c b/mm/folio-compat.c > index aee3b9a16828..50412014f16f 100644 > --- a/mm/folio-compat.c > +++ b/mm/folio-compat.c > @@ -77,12 +77,6 @@ bool redirty_page_for_writepage(struct writeback_control *wbc, > } > EXPORT_SYMBOL(redirty_page_for_writepage); > > -void lru_cache_add_inactive_or_unevictable(struct page *page, > - struct vm_area_struct *vma) > -{ > - folio_add_lru_vma(page_folio(page), vma); > -} > - > int add_to_page_cache_lru(struct page *page, struct address_space *mapping, > pgoff_t index, gfp_t gfp) > { > @@ -122,13 +116,3 @@ void putback_lru_page(struct page *page) > { > folio_putback_lru(page_folio(page)); > } > - > -#ifdef CONFIG_MMU > -void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, > - unsigned long address) > -{ > - VM_BUG_ON_PAGE(PageTail(page), page); > - > - return folio_add_new_anon_rmap((struct folio *)page, vma, address); > -} > -#endif Reviewed-by: David Hildenbrand <david@redhat.com>
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index af6a32b6f3e7..0ae2bb0e77f5 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -197,8 +197,6 @@ typedef int __bitwise rmap_t; void folio_move_anon_rmap(struct folio *, struct vm_area_struct *); void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long address, rmap_t flags); -void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long address); void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); void page_add_file_rmap(struct page *, struct vm_area_struct *, diff --git a/include/linux/swap.h b/include/linux/swap.h index 4f25b1237364..edc0f2c8ce01 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -397,9 +397,6 @@ void folio_deactivate(struct folio *folio); void folio_mark_lazyfree(struct folio *folio); extern void swap_setup(void); -extern void lru_cache_add_inactive_or_unevictable(struct page *page, - struct vm_area_struct *vma); - /* linux/mm/vmscan.c */ extern unsigned long zone_reclaimable_pages(struct zone *zone); extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, diff --git a/mm/folio-compat.c b/mm/folio-compat.c index aee3b9a16828..50412014f16f 100644 --- a/mm/folio-compat.c +++ b/mm/folio-compat.c @@ -77,12 +77,6 @@ bool redirty_page_for_writepage(struct writeback_control *wbc, } EXPORT_SYMBOL(redirty_page_for_writepage); -void lru_cache_add_inactive_or_unevictable(struct page *page, - struct vm_area_struct *vma) -{ - folio_add_lru_vma(page_folio(page), vma); -} - int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp) { @@ -122,13 +116,3 @@ void putback_lru_page(struct page *page) { folio_putback_lru(page_folio(page)); } - -#ifdef CONFIG_MMU -void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, - unsigned long address) -{ - VM_BUG_ON_PAGE(PageTail(page), page); - - return folio_add_new_anon_rmap((struct folio *)page, vma, address); -} -#endif
All callers have now been converted to folio_add_new_anon_rmap() and folio_add_lru_vma() so we can remove the wrapper. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- include/linux/rmap.h | 2 -- include/linux/swap.h | 3 --- mm/folio-compat.c | 16 ---------------- 3 files changed, 21 deletions(-)