diff mbox series

[v6,2/9] mm: Non-pmd-mappable, large folios for folio_add_new_anon_rmap()

Message ID 20230929114421.3761121-3-ryan.roberts@arm.com (mailing list archive)
State New
Headers show
Series variable-order, large folios for anonymous memory | expand

Commit Message

Ryan Roberts Sept. 29, 2023, 11:44 a.m. UTC
In preparation for anonymous large folio support, improve
folio_add_new_anon_rmap() to allow a non-pmd-mappable, large folio to be
passed to it. In this case, all contained pages are accounted using the
order-0 folio (or base page) scheme.

Reviewed-by: Yu Zhao <yuzhao@google.com>
Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
---
 mm/rmap.c | 27 ++++++++++++++++++++-------
 1 file changed, 20 insertions(+), 7 deletions(-)

Comments

Kirill A . Shutemov Sept. 29, 2023, 1:45 p.m. UTC | #1
On Fri, Sep 29, 2023 at 12:44:13PM +0100, Ryan Roberts wrote:
> In preparation for anonymous large folio support, improve
> folio_add_new_anon_rmap() to allow a non-pmd-mappable, large folio to be
> passed to it. In this case, all contained pages are accounted using the
> order-0 folio (or base page) scheme.
> 
> Reviewed-by: Yu Zhao <yuzhao@google.com>
> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
> ---
>  mm/rmap.c | 27 ++++++++++++++++++++-------
>  1 file changed, 20 insertions(+), 7 deletions(-)
> 
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 8600bd029acf..106149690366 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1266,31 +1266,44 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
>   * This means the inc-and-test can be bypassed.
>   * The folio does not have to be locked.
>   *
> - * If the folio is large, it is accounted as a THP.  As the folio
> + * If the folio is pmd-mappable, it is accounted as a THP.  As the folio
>   * is new, it's assumed to be mapped exclusively by a single process.
>   */
>  void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
>  		unsigned long address)
>  {
> -	int nr;
> +	int nr = folio_nr_pages(folio);
>  
> -	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
> +	VM_BUG_ON_VMA(address < vma->vm_start ||
> +			address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
>  	__folio_set_swapbacked(folio);
>  
> -	if (likely(!folio_test_pmd_mappable(folio))) {
> +	if (likely(!folio_test_large(folio))) {
>  		/* increment count (starts at -1) */
>  		atomic_set(&folio->_mapcount, 0);
> -		nr = 1;
> +		__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
> +	} else if (!folio_test_pmd_mappable(folio)) {
> +		int i;
> +
> +		for (i = 0; i < nr; i++) {
> +			struct page *page = folio_page(folio, i);
> +
> +			/* increment count (starts at -1) */
> +			atomic_set(&page->_mapcount, 0);
> +			__page_set_anon_rmap(folio, page, vma,
> +					address + (i << PAGE_SHIFT), 1);
> +		}
> +
> +		atomic_set(&folio->_nr_pages_mapped, nr);

This code should work for !folio_test_large() case too, no?

>  	} else {
>  		/* increment count (starts at -1) */
>  		atomic_set(&folio->_entire_mapcount, 0);
>  		atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED);
> -		nr = folio_nr_pages(folio);
> +		__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
>  		__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr);
>  	}
>  
>  	__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
> -	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
>  }
>  
>  /**
> -- 
> 2.25.1
>
Ryan Roberts Sept. 29, 2023, 2:39 p.m. UTC | #2
On 29/09/2023 14:45, Kirill A. Shutemov wrote:
> On Fri, Sep 29, 2023 at 12:44:13PM +0100, Ryan Roberts wrote:
>> In preparation for anonymous large folio support, improve
>> folio_add_new_anon_rmap() to allow a non-pmd-mappable, large folio to be
>> passed to it. In this case, all contained pages are accounted using the
>> order-0 folio (or base page) scheme.
>>
>> Reviewed-by: Yu Zhao <yuzhao@google.com>
>> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com>
>> Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
>> ---
>>  mm/rmap.c | 27 ++++++++++++++++++++-------
>>  1 file changed, 20 insertions(+), 7 deletions(-)
>>
>> diff --git a/mm/rmap.c b/mm/rmap.c
>> index 8600bd029acf..106149690366 100644
>> --- a/mm/rmap.c
>> +++ b/mm/rmap.c
>> @@ -1266,31 +1266,44 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
>>   * This means the inc-and-test can be bypassed.
>>   * The folio does not have to be locked.
>>   *
>> - * If the folio is large, it is accounted as a THP.  As the folio
>> + * If the folio is pmd-mappable, it is accounted as a THP.  As the folio
>>   * is new, it's assumed to be mapped exclusively by a single process.
>>   */
>>  void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
>>  		unsigned long address)
>>  {
>> -	int nr;
>> +	int nr = folio_nr_pages(folio);
>>  
>> -	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
>> +	VM_BUG_ON_VMA(address < vma->vm_start ||
>> +			address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
>>  	__folio_set_swapbacked(folio);
>>  
>> -	if (likely(!folio_test_pmd_mappable(folio))) {
>> +	if (likely(!folio_test_large(folio))) {
>>  		/* increment count (starts at -1) */
>>  		atomic_set(&folio->_mapcount, 0);
>> -		nr = 1;
>> +		__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
>> +	} else if (!folio_test_pmd_mappable(folio)) {
>> +		int i;
>> +
>> +		for (i = 0; i < nr; i++) {
>> +			struct page *page = folio_page(folio, i);
>> +
>> +			/* increment count (starts at -1) */
>> +			atomic_set(&page->_mapcount, 0);
>> +			__page_set_anon_rmap(folio, page, vma,
>> +					address + (i << PAGE_SHIFT), 1);
>> +		}
>> +
>> +		atomic_set(&folio->_nr_pages_mapped, nr);
> 
> This code should work for !folio_test_large() case too, no?

Not quite; for !folio_test_large() we don't set _nr_pages_mapped - that's a
compound-only field in the second struct page. So I could make most of this
common but would still have a conditional around that last line, and at that
point I thought it was better to split it the way I've done it to avoid the loop
overhead for the !large case.

> 
>>  	} else {
>>  		/* increment count (starts at -1) */
>>  		atomic_set(&folio->_entire_mapcount, 0);
>>  		atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED);
>> -		nr = folio_nr_pages(folio);
>> +		__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
>>  		__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr);
>>  	}
>>  
>>  	__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
>> -	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
>>  }
>>  
>>  /**
>> -- 
>> 2.25.1
>>
>
diff mbox series

Patch

diff --git a/mm/rmap.c b/mm/rmap.c
index 8600bd029acf..106149690366 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1266,31 +1266,44 @@  void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
  * This means the inc-and-test can be bypassed.
  * The folio does not have to be locked.
  *
- * If the folio is large, it is accounted as a THP.  As the folio
+ * If the folio is pmd-mappable, it is accounted as a THP.  As the folio
  * is new, it's assumed to be mapped exclusively by a single process.
  */
 void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
 		unsigned long address)
 {
-	int nr;
+	int nr = folio_nr_pages(folio);
 
-	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
+	VM_BUG_ON_VMA(address < vma->vm_start ||
+			address + (nr << PAGE_SHIFT) > vma->vm_end, vma);
 	__folio_set_swapbacked(folio);
 
-	if (likely(!folio_test_pmd_mappable(folio))) {
+	if (likely(!folio_test_large(folio))) {
 		/* increment count (starts at -1) */
 		atomic_set(&folio->_mapcount, 0);
-		nr = 1;
+		__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
+	} else if (!folio_test_pmd_mappable(folio)) {
+		int i;
+
+		for (i = 0; i < nr; i++) {
+			struct page *page = folio_page(folio, i);
+
+			/* increment count (starts at -1) */
+			atomic_set(&page->_mapcount, 0);
+			__page_set_anon_rmap(folio, page, vma,
+					address + (i << PAGE_SHIFT), 1);
+		}
+
+		atomic_set(&folio->_nr_pages_mapped, nr);
 	} else {
 		/* increment count (starts at -1) */
 		atomic_set(&folio->_entire_mapcount, 0);
 		atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED);
-		nr = folio_nr_pages(folio);
+		__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
 		__lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr);
 	}
 
 	__lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
-	__page_set_anon_rmap(folio, &folio->page, vma, address, 1);
 }
 
 /**