Message ID | 20240412025704.53245-2-wangkefeng.wang@huawei.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: batch mm counter updating in filemap_map_pages() | expand |
On 2024/4/12 10:57, Kefeng Wang wrote: > In order to support batch mm counter updating in filemap_map_pages(), > move mm counter updating out of set_pte_range(), the folios are file > from filemap, and distinguish folios type by vmf->flags and vma->vm_flags > from another caller finish_fault(). > > Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> > --- > mm/filemap.c | 4 ++++ > mm/memory.c | 8 +++++--- > 2 files changed, 9 insertions(+), 3 deletions(-) > > diff --git a/mm/filemap.c b/mm/filemap.c > index 92e2d43e4c9d..04b813f0146c 100644 > --- a/mm/filemap.c > +++ b/mm/filemap.c > @@ -3540,6 +3540,8 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, > skip: > if (count) { > set_pte_range(vmf, folio, page, count, addr); > + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), > + count); > folio_ref_add(folio, count); > if (in_range(vmf->address, addr, count * PAGE_SIZE)) > ret = VM_FAULT_NOPAGE; > @@ -3554,6 +3556,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, > > if (count) { > set_pte_range(vmf, folio, page, count, addr); > + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), count); > folio_ref_add(folio, count); > if (in_range(vmf->address, addr, count * PAGE_SIZE)) > ret = VM_FAULT_NOPAGE; > @@ -3590,6 +3593,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, > ret = VM_FAULT_NOPAGE; > > set_pte_range(vmf, folio, page, 1, addr); > + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), 1); > folio_ref_inc(folio); > > return ret; > diff --git a/mm/memory.c b/mm/memory.c > index 78422d1c7381..69bc63a5d6c8 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -4685,12 +4685,10 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, > entry = pte_mkuffd_wp(entry); > /* copy-on-write page */ > if (write && !(vma->vm_flags & VM_SHARED)) { > - add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); > VM_BUG_ON_FOLIO(nr != 1, folio); > folio_add_new_anon_rmap(folio, vma, addr); > folio_add_lru_vma(folio, vma); > } else { > - add_mm_counter(vma->vm_mm, mm_counter_file(folio), nr); > folio_add_file_rmap_ptes(folio, page, nr, vma); > } > set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); > @@ -4727,9 +4725,11 @@ vm_fault_t finish_fault(struct vm_fault *vmf) > struct vm_area_struct *vma = vmf->vma; > struct page *page; > vm_fault_t ret; > + int is_cow = (vmf->flags & FAULT_FLAG_WRITE) && > + !(vma->vm_flags & VM_SHARED); oops, bool is enough. > > /* Did we COW the page? */ > - if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) > + if (is_cow) > page = vmf->cow_page; > else > page = vmf->page; > @@ -4765,8 +4765,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf) > /* Re-check under ptl */ > if (likely(!vmf_pte_changed(vmf))) { > struct folio *folio = page_folio(page); > + int type = is_cow ? MM_ANONPAGES : mm_counter_file(folio); > > set_pte_range(vmf, folio, page, 1, vmf->address); > + add_mm_counter(vma->vm_mm, type, 1); > ret = 0; > } else { > update_mmu_tlb(vma, vmf->address, vmf->pte);
diff --git a/mm/filemap.c b/mm/filemap.c index 92e2d43e4c9d..04b813f0146c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3540,6 +3540,8 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, skip: if (count) { set_pte_range(vmf, folio, page, count, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), + count); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3554,6 +3556,7 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, if (count) { set_pte_range(vmf, folio, page, count, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), count); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3590,6 +3593,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, ret = VM_FAULT_NOPAGE; set_pte_range(vmf, folio, page, 1, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), 1); folio_ref_inc(folio); return ret; diff --git a/mm/memory.c b/mm/memory.c index 78422d1c7381..69bc63a5d6c8 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4685,12 +4685,10 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio, entry = pte_mkuffd_wp(entry); /* copy-on-write page */ if (write && !(vma->vm_flags & VM_SHARED)) { - add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr); VM_BUG_ON_FOLIO(nr != 1, folio); folio_add_new_anon_rmap(folio, vma, addr); folio_add_lru_vma(folio, vma); } else { - add_mm_counter(vma->vm_mm, mm_counter_file(folio), nr); folio_add_file_rmap_ptes(folio, page, nr, vma); } set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr); @@ -4727,9 +4725,11 @@ vm_fault_t finish_fault(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; struct page *page; vm_fault_t ret; + int is_cow = (vmf->flags & FAULT_FLAG_WRITE) && + !(vma->vm_flags & VM_SHARED); /* Did we COW the page? */ - if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) + if (is_cow) page = vmf->cow_page; else page = vmf->page; @@ -4765,8 +4765,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf) /* Re-check under ptl */ if (likely(!vmf_pte_changed(vmf))) { struct folio *folio = page_folio(page); + int type = is_cow ? MM_ANONPAGES : mm_counter_file(folio); set_pte_range(vmf, folio, page, 1, vmf->address); + add_mm_counter(vma->vm_mm, type, 1); ret = 0; } else { update_mmu_tlb(vma, vmf->address, vmf->pte);
In order to support batch mm counter updating in filemap_map_pages(), move mm counter updating out of set_pte_range(), the folios are file from filemap, and distinguish folios type by vmf->flags and vma->vm_flags from another caller finish_fault(). Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> --- mm/filemap.c | 4 ++++ mm/memory.c | 8 +++++--- 2 files changed, 9 insertions(+), 3 deletions(-)