Message ID | 20240227180532.721365-1-willy@infradead.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: Use folio more widely in __split_huge_page | expand |
On 2/27/24 10:05 AM, Matthew Wilcox (Oracle) wrote: > We already have a folio; use it instead of the head page where reasonable. > Saves a couple of calls to compound_head() and elimimnates a few > references to page->mapping. > > Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> > --- > mm/huge_memory.c | 23 ++++++++++++----------- > 1 file changed, 12 insertions(+), 11 deletions(-) > > diff --git a/mm/huge_memory.c b/mm/huge_memory.c > index 28341a5067fb..aeb6671f7c44 100644 > --- a/mm/huge_memory.c > +++ b/mm/huge_memory.c > @@ -2884,7 +2884,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, > struct lruvec *lruvec; > struct address_space *swap_cache = NULL; > unsigned long offset = 0; > - unsigned int nr = thp_nr_pages(head); > + unsigned int nr = folio_nr_pages(folio); > int i, nr_dropped = 0; > > /* complete memcg works before add pages to LRU */ > @@ -2907,7 +2907,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, > if (head[i].index >= end) { > struct folio *tail = page_folio(head + i); > > - if (shmem_mapping(head->mapping)) > + if (shmem_mapping(folio->mapping)) > nr_dropped++; > else if (folio_test_clear_dirty(tail)) > folio_account_cleaned(tail, > @@ -2915,7 +2915,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, > __filemap_remove_folio(tail, NULL); > folio_put(tail); > } else if (!PageAnon(page)) { > - __xa_store(&head->mapping->i_pages, head[i].index, > + __xa_store(&folio->mapping->i_pages, head[i].index, > head + i, 0); > } else if (swap_cache) { > __xa_store(&swap_cache->i_pages, offset + i, > @@ -2930,23 +2930,23 @@ static void __split_huge_page(struct page *page, struct list_head *list, > split_page_owner(head, nr); > > /* See comment in __split_huge_page_tail() */ > - if (PageAnon(head)) { > + if (folio_test_anon(folio)) { > /* Additional pin to swap cache */ > - if (PageSwapCache(head)) { > - page_ref_add(head, 2); > + if (folio_test_swapcache(folio)) { > + folio_ref_add(folio, 2); > xa_unlock(&swap_cache->i_pages); > } else { > - page_ref_inc(head); > + folio_ref_inc(folio); > } > } else { > /* Additional pin to page cache */ > - page_ref_add(head, 2); > - xa_unlock(&head->mapping->i_pages); > + folio_ref_add(folio, 2); > + xa_unlock(&folio->mapping->i_pages); > } > local_irq_enable(); > > if (nr_dropped) > - shmem_uncharge(head->mapping->host, nr_dropped); > + shmem_uncharge(folio->mapping->host, nr_dropped); > remap_page(folio, nr); > > if (folio_test_swapcache(folio)) > @@ -2954,9 +2954,10 @@ static void __split_huge_page(struct page *page, struct list_head *list, > > for (i = 0; i < nr; i++) { > struct page *subpage = head + i; > + struct folio *new_folio = page_folio(subpage); > if (subpage == page) > continue; > - unlock_page(subpage); > + folio_unlock(new_folio); > > /* > * Subpages may be freed if there wasn't any mapping Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 28341a5067fb..aeb6671f7c44 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2884,7 +2884,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, struct lruvec *lruvec; struct address_space *swap_cache = NULL; unsigned long offset = 0; - unsigned int nr = thp_nr_pages(head); + unsigned int nr = folio_nr_pages(folio); int i, nr_dropped = 0; /* complete memcg works before add pages to LRU */ @@ -2907,7 +2907,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, if (head[i].index >= end) { struct folio *tail = page_folio(head + i); - if (shmem_mapping(head->mapping)) + if (shmem_mapping(folio->mapping)) nr_dropped++; else if (folio_test_clear_dirty(tail)) folio_account_cleaned(tail, @@ -2915,7 +2915,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, __filemap_remove_folio(tail, NULL); folio_put(tail); } else if (!PageAnon(page)) { - __xa_store(&head->mapping->i_pages, head[i].index, + __xa_store(&folio->mapping->i_pages, head[i].index, head + i, 0); } else if (swap_cache) { __xa_store(&swap_cache->i_pages, offset + i, @@ -2930,23 +2930,23 @@ static void __split_huge_page(struct page *page, struct list_head *list, split_page_owner(head, nr); /* See comment in __split_huge_page_tail() */ - if (PageAnon(head)) { + if (folio_test_anon(folio)) { /* Additional pin to swap cache */ - if (PageSwapCache(head)) { - page_ref_add(head, 2); + if (folio_test_swapcache(folio)) { + folio_ref_add(folio, 2); xa_unlock(&swap_cache->i_pages); } else { - page_ref_inc(head); + folio_ref_inc(folio); } } else { /* Additional pin to page cache */ - page_ref_add(head, 2); - xa_unlock(&head->mapping->i_pages); + folio_ref_add(folio, 2); + xa_unlock(&folio->mapping->i_pages); } local_irq_enable(); if (nr_dropped) - shmem_uncharge(head->mapping->host, nr_dropped); + shmem_uncharge(folio->mapping->host, nr_dropped); remap_page(folio, nr); if (folio_test_swapcache(folio)) @@ -2954,9 +2954,10 @@ static void __split_huge_page(struct page *page, struct list_head *list, for (i = 0; i < nr; i++) { struct page *subpage = head + i; + struct folio *new_folio = page_folio(subpage); if (subpage == page) continue; - unlock_page(subpage); + folio_unlock(new_folio); /* * Subpages may be freed if there wasn't any mapping
We already have a folio; use it instead of the head page where reasonable. Saves a couple of calls to compound_head() and elimimnates a few references to page->mapping. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> --- mm/huge_memory.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-)