diff mbox series

[v3,15/15] slab: Allocate frozen pages

Message ID 20241125210149.2976098-16-willy@infradead.org (mailing list archive)
State New
Headers show
Series Allocate and free frozen pages | expand

Commit Message

Matthew Wilcox Nov. 25, 2024, 9:01 p.m. UTC
Since slab does not use the page refcount, it can allocate and
free frozen pages, saving one atomic operation per free.

Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/slub.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

Comments

David Hildenbrand Nov. 27, 2024, 3:07 p.m. UTC | #1
On 25.11.24 22:01, Matthew Wilcox (Oracle) wrote:
> Since slab does not use the page refcount, it can allocate and
> free frozen pages, saving one atomic operation per free.
> 
> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> ---
>   mm/slub.c | 6 +++---
>   1 file changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/mm/slub.c b/mm/slub.c
> index 19980419b176..3d0c1cd6bdf5 100644
> --- a/mm/slub.c
> +++ b/mm/slub.c
> @@ -2405,9 +2405,9 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
>   	unsigned int order = oo_order(oo);
>   
>   	if (node == NUMA_NO_NODE)
> -		folio = (struct folio *)alloc_pages(flags, order);
> +		folio = (struct folio *)alloc_frozen_pages(flags, order);
>   	else
> -		folio = (struct folio *)__alloc_pages_node(node, flags, order);
> +		folio = (struct folio *)__alloc_frozen_pages(flags, order, node, NULL);
>   
>   	if (!folio)
>   		return NULL;
> @@ -2641,7 +2641,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
>   	__folio_clear_slab(folio);
>   	mm_account_reclaimed_pages(pages);
>   	unaccount_slab(slab, order, s);
> -	__free_pages(&folio->page, order);
> +	free_frozen_pages(&folio->page, order);
>   }
>   
>   static void rcu_free_slab(struct rcu_head *h)


I think we discussed in v1 or v2 that page isolation should be taught about that.

Likely we want something like:

diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 7e04047977cfe..7db2f79b39f0d 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -101,6 +101,8 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
                  * because their page->_refcount is zero at all time.
                  */
                 if (!page_ref_count(page)) {
+                       if (PageSlab(page))
+                               return page;
                         if (PageBuddy(page))
                                 pfn += (1 << buddy_order(page)) - 1;
                         continue;
Matthew Wilcox Nov. 27, 2024, 3:52 p.m. UTC | #2
On Wed, Nov 27, 2024 at 04:07:01PM +0100, David Hildenbrand wrote:
> I think we discussed in v1 or v2 that page isolation should be taught about that.
> 
> Likely we want something like:
> 
> diff --git a/mm/page_isolation.c b/mm/page_isolation.c
> index 7e04047977cfe..7db2f79b39f0d 100644
> --- a/mm/page_isolation.c
> +++ b/mm/page_isolation.c
> @@ -101,6 +101,8 @@ static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long e
>                  * because their page->_refcount is zero at all time.
>                  */
>                 if (!page_ref_count(page)) {
> +                       if (PageSlab(page))
> +                               return page;
>                         if (PageBuddy(page))
>                                 pfn += (1 << buddy_order(page)) - 1;

Ah, for order 0 slabs!  I got caught up thinking that slabs would be
caught by the earlier PageTransCompound() check.  But that's also a
bit messy since we know that __folio_test_movable() can sometimes appear
true on slab pages.  So I'm tempted to hoist this way up to between the
check for ZONE_MOVABLE and PageHuge.
diff mbox series

Patch

diff --git a/mm/slub.c b/mm/slub.c
index 19980419b176..3d0c1cd6bdf5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2405,9 +2405,9 @@  static inline struct slab *alloc_slab_page(gfp_t flags, int node,
 	unsigned int order = oo_order(oo);
 
 	if (node == NUMA_NO_NODE)
-		folio = (struct folio *)alloc_pages(flags, order);
+		folio = (struct folio *)alloc_frozen_pages(flags, order);
 	else
-		folio = (struct folio *)__alloc_pages_node(node, flags, order);
+		folio = (struct folio *)__alloc_frozen_pages(flags, order, node, NULL);
 
 	if (!folio)
 		return NULL;
@@ -2641,7 +2641,7 @@  static void __free_slab(struct kmem_cache *s, struct slab *slab)
 	__folio_clear_slab(folio);
 	mm_account_reclaimed_pages(pages);
 	unaccount_slab(slab, order, s);
-	__free_pages(&folio->page, order);
+	free_frozen_pages(&folio->page, order);
 }
 
 static void rcu_free_slab(struct rcu_head *h)