diff mbox series

[RFC,5/5] filemap: batched update mm counter,rmap when map file folio

Message ID 20230130125504.2509710-6-fengwei.yin@intel.com (mailing list archive)
State New
Headers show
Series folio based filemap_map_pages() | expand

Commit Message

Yin Fengwei Jan. 30, 2023, 12:55 p.m. UTC
Instead of update mm counter, rmap one by one, batched update
brings some level performance gain.

Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
---
 mm/filemap.c | 32 ++++++++++++++++++++++++++++----
 1 file changed, 28 insertions(+), 4 deletions(-)

Comments

Matthew Wilcox Jan. 30, 2023, 2:14 p.m. UTC | #1
On Mon, Jan 30, 2023 at 08:55:04PM +0800, Yin Fengwei wrote:
> +++ b/mm/filemap.c
> @@ -3360,28 +3360,52 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
>  	struct vm_area_struct *vma = vmf->vma;
>  	struct file *file = vma->vm_file;
>  	unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
> -	int ref_count = 0, count = 0;
> +	int ref_count = 0, count = 0, maplen = 0;
> +	struct page *pg = page;

OK, having read it through, I think you're making your life a lot harder
by keeping page pointers at all.  I'd pass xas.xa_index - folio->index
from filemap_map_pages() as a parameter called something like 'first'.

>  	do {
> -		if (PageHWPoison(page))
> +		if (PageHWPoison(page)) {
> +			if (maplen) {
> +				page_add_file_rmap_range(folio, pg, maplen,
> +					vma, false);
> +				add_mm_counter(vma->vm_mm,
> +					mm_counter_file(pg), maplen);

Again you've made your life harder ;-)  Try something like this:

void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
{
	do_set_pte_range(vmf, page, addr, 1);
}

... and then here, you can do:

			if (maplen)
				do_set_pte_range(vmf, page - maplen - 1,
						base_addr, maplen);
			base_addr += (maplen + 1) * PAGE_SIZE
			maplen = 0;
			continue;
Yin Fengwei Jan. 31, 2023, 1:11 a.m. UTC | #2
On 1/30/2023 10:14 PM, Matthew Wilcox wrote:
> On Mon, Jan 30, 2023 at 08:55:04PM +0800, Yin Fengwei wrote:
>> +++ b/mm/filemap.c
>> @@ -3360,28 +3360,52 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
>>  	struct vm_area_struct *vma = vmf->vma;
>>  	struct file *file = vma->vm_file;
>>  	unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
>> -	int ref_count = 0, count = 0;
>> +	int ref_count = 0, count = 0, maplen = 0;
>> +	struct page *pg = page;
> 
> OK, having read it through, I think you're making your life a lot harder
> by keeping page pointers at all.  I'd pass xas.xa_index - folio->index
> from filemap_map_pages() as a parameter called something like 'first'.
I use page pointer here because I saw other changes you made kept the
page pointer as parameter. And tried to align with that.

> 
>>  	do {
>> -		if (PageHWPoison(page))
>> +		if (PageHWPoison(page)) {
>> +			if (maplen) {
>> +				page_add_file_rmap_range(folio, pg, maplen,
>> +					vma, false);
>> +				add_mm_counter(vma->vm_mm,
>> +					mm_counter_file(pg), maplen);
> 
> Again you've made your life harder ;-)  Try something like this:
Yes. This is much better.

> 
> void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
> {
> 	do_set_pte_range(vmf, page, addr, 1);
> }
> 
> ... and then here, you can do:
> 
> 			if (maplen)
> 				do_set_pte_range(vmf, page - maplen - 1,
> 						base_addr, maplen);
> 			base_addr += (maplen + 1) * PAGE_SIZE
> 			maplen = 0;
> 			continue;
Thanks a lot for all your comments. I will address all your comments in
next version.

Regards
Yin, Fengwei

>
diff mbox series

Patch

diff --git a/mm/filemap.c b/mm/filemap.c
index fe0c226c8b1e..6d9438490025 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -3360,28 +3360,52 @@  static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,
 	struct vm_area_struct *vma = vmf->vma;
 	struct file *file = vma->vm_file;
 	unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
-	int ref_count = 0, count = 0;
+	int ref_count = 0, count = 0, maplen = 0;
+	struct page *pg = page;
 
 	do {
-		if (PageHWPoison(page))
+		if (PageHWPoison(page)) {
+			if (maplen) {
+				page_add_file_rmap_range(folio, pg, maplen,
+					vma, false);
+				add_mm_counter(vma->vm_mm,
+					mm_counter_file(pg), maplen);
+			}
+			pg = page + 1;
+			maplen = 0;
 			continue;
+		}
 
 		if (mmap_miss > 0)
 			mmap_miss--;
 
-		if (!pte_none(*vmf->pte))
+		if (!pte_none(*vmf->pte)) {
+			if (maplen) {
+				page_add_file_rmap_range(folio, pg, maplen,
+					vma, false);
+				add_mm_counter(vma->vm_mm,
+					mm_counter_file(pg), maplen);
+			}
+			pg = page + 1;
+			maplen = 0;
 			continue;
+		}
 
 		if (vmf->address == addr)
 			ret = VM_FAULT_NOPAGE;
 
 		ref_count++;
+		maplen++;
 
-		do_set_pte(vmf, page, addr);
+		do_set_pte_entry(vmf, page, addr);
 		update_mmu_cache(vma, addr, vmf->pte);
 
 	} while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < len);
 
+	if (maplen) {
+		page_add_file_rmap_range(folio, pg, maplen, vma, false);
+		add_mm_counter(vma->vm_mm, mm_counter_file(pg), maplen);
+	}
 	folio_ref_add(folio, ref_count);
 	WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);