diff mbox series

[v2,02/14] mm/page_alloc: split prep_compound_page into head and tail subparts

Message ID 20210617184507.3662-3-joao.m.martins@oracle.com (mailing list archive)
State Superseded
Headers show
Series mm, sparse-vmemmap: Introduce compound pagemaps | expand

Commit Message

Joao Martins June 17, 2021, 6:44 p.m. UTC
Split the utility function prep_compound_page() into head and tail
counterparts, and use them accordingly.

This is in preparation for sharing the storage for / deduplicating
compound page metadata.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
---
 mm/page_alloc.c | 32 +++++++++++++++++++++-----------
 1 file changed, 21 insertions(+), 11 deletions(-)

Comments

Mike Kravetz July 13, 2021, 12:02 a.m. UTC | #1
On 6/17/21 11:44 AM, Joao Martins wrote:
> Split the utility function prep_compound_page() into head and tail
> counterparts, and use them accordingly.
> 
> This is in preparation for sharing the storage for / deduplicating
> compound page metadata.
> 
> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
> ---
>  mm/page_alloc.c | 32 +++++++++++++++++++++-----------
>  1 file changed, 21 insertions(+), 11 deletions(-)
> 
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 8836e54721ae..95967ce55829 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -741,24 +741,34 @@ void free_compound_page(struct page *page)
>  	free_the_page(page, compound_order(page));
>  }
>  
> +static void prep_compound_head(struct page *page, unsigned int order)
> +{
> +	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
> +	set_compound_order(page, order);
> +	atomic_set(compound_mapcount_ptr(page), -1);
> +	if (hpage_pincount_available(page))
> +		atomic_set(compound_pincount_ptr(page), 0);
> +}
> +
> +static void prep_compound_tail(struct page *head, int tail_idx)
> +{
> +	struct page *p = head + tail_idx;
> +
> +	set_page_count(p, 0);

When you rebase, you should notice this has been removed from
prep_compound_page as all tail pages should have zero ref count.

> +	p->mapping = TAIL_MAPPING;
> +	set_compound_head(p, head);
> +}
> +
>  void prep_compound_page(struct page *page, unsigned int order)
>  {
>  	int i;
>  	int nr_pages = 1 << order;
>  
>  	__SetPageHead(page);
> -	for (i = 1; i < nr_pages; i++) {
> -		struct page *p = page + i;
> -		set_page_count(p, 0);
> -		p->mapping = TAIL_MAPPING;
> -		set_compound_head(p, page);
> -	}
> +	for (i = 1; i < nr_pages; i++)
> +		prep_compound_tail(page, i);
>  
> -	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
> -	set_compound_order(page, order);
> -	atomic_set(compound_mapcount_ptr(page), -1);
> -	if (hpage_pincount_available(page))
> -		atomic_set(compound_pincount_ptr(page), 0);
> +	prep_compound_head(page, order);
>  }
>  
>  #ifdef CONFIG_DEBUG_PAGEALLOC
> 

I'll need something like this for demote hugetlb page fuinctionality
when the pages being demoted have been optimized for minimal vmemmap
usage.

Acked-by: Mike Kravetz <mike.kravetz@oracle.com>
Joao Martins July 13, 2021, 1:11 a.m. UTC | #2
On 7/13/21 1:02 AM, Mike Kravetz wrote:
> On 6/17/21 11:44 AM, Joao Martins wrote:
>> Split the utility function prep_compound_page() into head and tail
>> counterparts, and use them accordingly.
>>
>> This is in preparation for sharing the storage for / deduplicating
>> compound page metadata.
>>
>> Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
>> ---
>>  mm/page_alloc.c | 32 +++++++++++++++++++++-----------
>>  1 file changed, 21 insertions(+), 11 deletions(-)
>>
>> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>> index 8836e54721ae..95967ce55829 100644
>> --- a/mm/page_alloc.c
>> +++ b/mm/page_alloc.c
>> @@ -741,24 +741,34 @@ void free_compound_page(struct page *page)
>>  	free_the_page(page, compound_order(page));
>>  }
>>  
>> +static void prep_compound_head(struct page *page, unsigned int order)
>> +{
>> +	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
>> +	set_compound_order(page, order);
>> +	atomic_set(compound_mapcount_ptr(page), -1);
>> +	if (hpage_pincount_available(page))
>> +		atomic_set(compound_pincount_ptr(page), 0);
>> +}
>> +
>> +static void prep_compound_tail(struct page *head, int tail_idx)
>> +{
>> +	struct page *p = head + tail_idx;
>> +
>> +	set_page_count(p, 0);
> 
> When you rebase, you should notice this has been removed from
> prep_compound_page as all tail pages should have zero ref count.
> 
/me nods

>> +	p->mapping = TAIL_MAPPING;
>> +	set_compound_head(p, head);
>> +}
>> +
>>  void prep_compound_page(struct page *page, unsigned int order)
>>  {
>>  	int i;
>>  	int nr_pages = 1 << order;
>>  
>>  	__SetPageHead(page);
>> -	for (i = 1; i < nr_pages; i++) {
>> -		struct page *p = page + i;
>> -		set_page_count(p, 0);
>> -		p->mapping = TAIL_MAPPING;
>> -		set_compound_head(p, page);
>> -	}
>> +	for (i = 1; i < nr_pages; i++)
>> +		prep_compound_tail(page, i);
>>  
>> -	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
>> -	set_compound_order(page, order);
>> -	atomic_set(compound_mapcount_ptr(page), -1);
>> -	if (hpage_pincount_available(page))
>> -		atomic_set(compound_pincount_ptr(page), 0);
>> +	prep_compound_head(page, order);
>>  }
>>  
>>  #ifdef CONFIG_DEBUG_PAGEALLOC
>>
> 
> I'll need something like this for demote hugetlb page fuinctionality
> when the pages being demoted have been optimized for minimal vmemmap
> usage.
> 
> Acked-by: Mike Kravetz <mike.kravetz@oracle.com>
> 
Thanks!
diff mbox series

Patch

diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8836e54721ae..95967ce55829 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -741,24 +741,34 @@  void free_compound_page(struct page *page)
 	free_the_page(page, compound_order(page));
 }
 
+static void prep_compound_head(struct page *page, unsigned int order)
+{
+	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
+	set_compound_order(page, order);
+	atomic_set(compound_mapcount_ptr(page), -1);
+	if (hpage_pincount_available(page))
+		atomic_set(compound_pincount_ptr(page), 0);
+}
+
+static void prep_compound_tail(struct page *head, int tail_idx)
+{
+	struct page *p = head + tail_idx;
+
+	set_page_count(p, 0);
+	p->mapping = TAIL_MAPPING;
+	set_compound_head(p, head);
+}
+
 void prep_compound_page(struct page *page, unsigned int order)
 {
 	int i;
 	int nr_pages = 1 << order;
 
 	__SetPageHead(page);
-	for (i = 1; i < nr_pages; i++) {
-		struct page *p = page + i;
-		set_page_count(p, 0);
-		p->mapping = TAIL_MAPPING;
-		set_compound_head(p, page);
-	}
+	for (i = 1; i < nr_pages; i++)
+		prep_compound_tail(page, i);
 
-	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
-	set_compound_order(page, order);
-	atomic_set(compound_mapcount_ptr(page), -1);
-	if (hpage_pincount_available(page))
-		atomic_set(compound_pincount_ptr(page), 0);
+	prep_compound_head(page, order);
 }
 
 #ifdef CONFIG_DEBUG_PAGEALLOC