Message ID | 20240508224040.190469-7-21cnbao@gmail.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | large folios swap-in: handle refault cases first | expand |
Barry Song <21cnbao@gmail.com> writes: > From: Chuanhua Han <hanchuanhua@oppo.com> > > When a large folio is found in the swapcache, the current implementation > requires calling do_swap_page() nr_pages times, resulting in nr_pages > page faults. This patch opts to map the entire large folio at once to > minimize page faults. Additionally, redundant checks and early exits > for ARM64 MTE restoring are removed. > > Signed-off-by: Chuanhua Han <hanchuanhua@oppo.com> > Co-developed-by: Barry Song <v-songbaohua@oppo.com> > Signed-off-by: Barry Song <v-songbaohua@oppo.com> > Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> LGTM, Thanks! Feel free to add Reviewed-by: "Huang, Ying" <ying.huang@intel.com> in the future version. > --- > mm/memory.c | 59 +++++++++++++++++++++++++++++++++++++++++++---------- > 1 file changed, 48 insertions(+), 11 deletions(-) > > diff --git a/mm/memory.c b/mm/memory.c > index d9434df24d62..8b9e4cab93ed 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) > pte_t pte; > vm_fault_t ret = 0; > void *shadow = NULL; > + int nr_pages; > + unsigned long page_idx; > + unsigned long address; > + pte_t *ptep; > > if (!pte_unmap_same(vmf)) > goto out; > @@ -4166,6 +4170,38 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) > goto out_nomap; > } > > + nr_pages = 1; > + page_idx = 0; > + address = vmf->address; > + ptep = vmf->pte; > + if (folio_test_large(folio) && folio_test_swapcache(folio)) { > + int nr = folio_nr_pages(folio); > + unsigned long idx = folio_page_idx(folio, page); > + unsigned long folio_start = address - idx * PAGE_SIZE; > + unsigned long folio_end = folio_start + nr * PAGE_SIZE; > + pte_t *folio_ptep; > + pte_t folio_pte; > + > + if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start))) > + goto check_folio; > + if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end))) > + goto check_folio; > + > + folio_ptep = vmf->pte - idx; > + folio_pte = ptep_get(folio_ptep); > + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || > + swap_pte_batch(folio_ptep, nr, folio_pte) != nr) > + goto check_folio; > + > + page_idx = idx; > + address = folio_start; > + ptep = folio_ptep; > + nr_pages = nr; > + entry = folio->swap; > + page = &folio->page; > + } > + > +check_folio: > /* > * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte > * must never point at an anonymous page in the swapcache that is > @@ -4225,12 +4261,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) > * We're already holding a reference on the page but haven't mapped it > * yet. > */ > - swap_free(entry); > + swap_free_nr(entry, nr_pages); > if (should_try_to_free_swap(folio, vma, vmf->flags)) > folio_free_swap(folio); > > - inc_mm_counter(vma->vm_mm, MM_ANONPAGES); > - dec_mm_counter(vma->vm_mm, MM_SWAPENTS); > + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); > + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages); > pte = mk_pte(page, vma->vm_page_prot); > > /* > @@ -4247,27 +4283,28 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) > } > rmap_flags |= RMAP_EXCLUSIVE; > } > - flush_icache_page(vma, page); > + folio_ref_add(folio, nr_pages - 1); > + flush_icache_pages(vma, page, nr_pages); > if (pte_swp_soft_dirty(vmf->orig_pte)) > pte = pte_mksoft_dirty(pte); > if (pte_swp_uffd_wp(vmf->orig_pte)) > pte = pte_mkuffd_wp(pte); > - vmf->orig_pte = pte; > + vmf->orig_pte = pte_advance_pfn(pte, page_idx); > > /* ksm created a completely new copy */ > if (unlikely(folio != swapcache && swapcache)) { > - folio_add_new_anon_rmap(folio, vma, vmf->address); > + folio_add_new_anon_rmap(folio, vma, address); > folio_add_lru_vma(folio, vma); > } else { > - folio_add_anon_rmap_pte(folio, page, vma, vmf->address, > + folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address, > rmap_flags); > } > > VM_BUG_ON(!folio_test_anon(folio) || > (pte_write(pte) && !PageAnonExclusive(page))); > - set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); > - arch_do_swap_page_nr(vma->vm_mm, vma, vmf->address, > - pte, vmf->orig_pte, 1); > + set_ptes(vma->vm_mm, address, ptep, pte, nr_pages); > + arch_do_swap_page_nr(vma->vm_mm, vma, address, > + pte, pte, nr_pages); > > folio_unlock(folio); > if (folio != swapcache && swapcache) { > @@ -4291,7 +4328,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) > } > > /* No need to invalidate - it was non-present before */ > - update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); > + update_mmu_cache_range(vmf, vma, address, ptep, nr_pages); > unlock: > if (vmf->pte) > pte_unmap_unlock(vmf->pte, vmf->ptl); -- Best Regards, Huang, Ying
diff --git a/mm/memory.c b/mm/memory.c index d9434df24d62..8b9e4cab93ed 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3968,6 +3968,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) pte_t pte; vm_fault_t ret = 0; void *shadow = NULL; + int nr_pages; + unsigned long page_idx; + unsigned long address; + pte_t *ptep; if (!pte_unmap_same(vmf)) goto out; @@ -4166,6 +4170,38 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) goto out_nomap; } + nr_pages = 1; + page_idx = 0; + address = vmf->address; + ptep = vmf->pte; + if (folio_test_large(folio) && folio_test_swapcache(folio)) { + int nr = folio_nr_pages(folio); + unsigned long idx = folio_page_idx(folio, page); + unsigned long folio_start = address - idx * PAGE_SIZE; + unsigned long folio_end = folio_start + nr * PAGE_SIZE; + pte_t *folio_ptep; + pte_t folio_pte; + + if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start))) + goto check_folio; + if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end))) + goto check_folio; + + folio_ptep = vmf->pte - idx; + folio_pte = ptep_get(folio_ptep); + if (!pte_same(folio_pte, pte_move_swp_offset(vmf->orig_pte, -idx)) || + swap_pte_batch(folio_ptep, nr, folio_pte) != nr) + goto check_folio; + + page_idx = idx; + address = folio_start; + ptep = folio_ptep; + nr_pages = nr; + entry = folio->swap; + page = &folio->page; + } + +check_folio: /* * PG_anon_exclusive reuses PG_mappedtodisk for anon pages. A swap pte * must never point at an anonymous page in the swapcache that is @@ -4225,12 +4261,12 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) * We're already holding a reference on the page but haven't mapped it * yet. */ - swap_free(entry); + swap_free_nr(entry, nr_pages); if (should_try_to_free_swap(folio, vma, vmf->flags)) folio_free_swap(folio); - inc_mm_counter(vma->vm_mm, MM_ANONPAGES); - dec_mm_counter(vma->vm_mm, MM_SWAPENTS); + add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr_pages); + add_mm_counter(vma->vm_mm, MM_SWAPENTS, -nr_pages); pte = mk_pte(page, vma->vm_page_prot); /* @@ -4247,27 +4283,28 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) } rmap_flags |= RMAP_EXCLUSIVE; } - flush_icache_page(vma, page); + folio_ref_add(folio, nr_pages - 1); + flush_icache_pages(vma, page, nr_pages); if (pte_swp_soft_dirty(vmf->orig_pte)) pte = pte_mksoft_dirty(pte); if (pte_swp_uffd_wp(vmf->orig_pte)) pte = pte_mkuffd_wp(pte); - vmf->orig_pte = pte; + vmf->orig_pte = pte_advance_pfn(pte, page_idx); /* ksm created a completely new copy */ if (unlikely(folio != swapcache && swapcache)) { - folio_add_new_anon_rmap(folio, vma, vmf->address); + folio_add_new_anon_rmap(folio, vma, address); folio_add_lru_vma(folio, vma); } else { - folio_add_anon_rmap_pte(folio, page, vma, vmf->address, + folio_add_anon_rmap_ptes(folio, page, nr_pages, vma, address, rmap_flags); } VM_BUG_ON(!folio_test_anon(folio) || (pte_write(pte) && !PageAnonExclusive(page))); - set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte); - arch_do_swap_page_nr(vma->vm_mm, vma, vmf->address, - pte, vmf->orig_pte, 1); + set_ptes(vma->vm_mm, address, ptep, pte, nr_pages); + arch_do_swap_page_nr(vma->vm_mm, vma, address, + pte, pte, nr_pages); folio_unlock(folio); if (folio != swapcache && swapcache) { @@ -4291,7 +4328,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) } /* No need to invalidate - it was non-present before */ - update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); + update_mmu_cache_range(vmf, vma, address, ptep, nr_pages); unlock: if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl);