Message ID | 20231211155652.131054-3-david@redhat.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm/rmap: interface overhaul | expand |
On 11/12/2023 15:56, David Hildenbrand wrote: > hugetlb rmap handling differs quite a lot from "ordinary" rmap code. > For example, hugetlb currently only supports entire mappings, and treats > any mapping as mapped using a single "logical PTE". Let's move it out > of the way so we can overhaul our "ordinary" rmap. > implementation/interface. > > Let's introduce and use hugetlb_remove_rmap() and remove the hugetlb > code from page_remove_rmap(). This effectively removes one check on the > small-folio path as well. > > Note: all possible candidates that need care are page_remove_rmap() that > pass compound=true. > > Reviewed-by: Yin Fengwei <fengwei.yin@intel.com> > Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> > --- > include/linux/rmap.h | 5 +++++ > mm/hugetlb.c | 4 ++-- > mm/rmap.c | 17 ++++++++--------- > 3 files changed, 15 insertions(+), 11 deletions(-) > > diff --git a/include/linux/rmap.h b/include/linux/rmap.h > index 0bfea866f39b..d85bd1d4de04 100644 > --- a/include/linux/rmap.h > +++ b/include/linux/rmap.h > @@ -213,6 +213,11 @@ void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *, > void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *, > unsigned long address); > > +static inline void hugetlb_remove_rmap(struct folio *folio) > +{ > + atomic_dec(&folio->_entire_mapcount); > +} > + > static inline void __page_dup_rmap(struct page *page, bool compound) > { > if (compound) { > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > index 305f3ca1dee6..ef48ae673890 100644 > --- a/mm/hugetlb.c > +++ b/mm/hugetlb.c > @@ -5676,7 +5676,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, > make_pte_marker(PTE_MARKER_UFFD_WP), > sz); > hugetlb_count_sub(pages_per_huge_page(h), mm); > - page_remove_rmap(page, vma, true); > + hugetlb_remove_rmap(page_folio(page)); > > spin_unlock(ptl); > tlb_remove_page_size(tlb, page, huge_page_size(h)); > @@ -5987,7 +5987,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, > > /* Break COW or unshare */ > huge_ptep_clear_flush(vma, haddr, ptep); > - page_remove_rmap(&old_folio->page, vma, true); > + hugetlb_remove_rmap(old_folio); > hugetlb_add_new_anon_rmap(new_folio, vma, haddr); > if (huge_pte_uffd_wp(pte)) > newpte = huge_pte_mkuffd_wp(newpte); > diff --git a/mm/rmap.c b/mm/rmap.c > index 80d42c31281a..4e60c1f38eaa 100644 > --- a/mm/rmap.c > +++ b/mm/rmap.c > @@ -1482,13 +1482,6 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, > > VM_BUG_ON_PAGE(compound && !PageHead(page), page); > > - /* Hugetlb pages are not counted in NR_*MAPPED */ > - if (unlikely(folio_test_hugetlb(folio))) { > - /* hugetlb pages are always mapped with pmds */ > - atomic_dec(&folio->_entire_mapcount); > - return; > - } > - > /* Is page being unmapped by PTE? Is this its last map to be removed? */ > if (likely(!compound)) { > last = atomic_add_negative(-1, &page->_mapcount); > @@ -1846,7 +1839,10 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, > dec_mm_counter(mm, mm_counter_file(&folio->page)); > } > discard: > - page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); > + if (unlikely(folio_test_hugetlb(folio))) > + hugetlb_remove_rmap(folio); > + else > + page_remove_rmap(subpage, vma, false); > if (vma->vm_flags & VM_LOCKED) > mlock_drain_local(); > folio_put(folio); > @@ -2199,7 +2195,10 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, > */ > } > > - page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); > + if (unlikely(folio_test_hugetlb(folio))) > + hugetlb_remove_rmap(folio); > + else > + page_remove_rmap(subpage, vma, false); > if (vma->vm_flags & VM_LOCKED) > mlock_drain_local(); > folio_put(folio);
On Mon, Dec 11, 2023 at 04:56:15PM +0100, David Hildenbrand wrote: > hugetlb rmap handling differs quite a lot from "ordinary" rmap code. > For example, hugetlb currently only supports entire mappings, and treats > any mapping as mapped using a single "logical PTE". Let's move it out > of the way so we can overhaul our "ordinary" rmap. > implementation/interface. > > Let's introduce and use hugetlb_remove_rmap() and remove the hugetlb > code from page_remove_rmap(). This effectively removes one check on the > small-folio path as well. > > Note: all possible candidates that need care are page_remove_rmap() that > pass compound=true. > > Reviewed-by: Yin Fengwei <fengwei.yin@intel.com> > Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> > +++ b/mm/rmap.c > @@ -1482,13 +1482,6 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, > > VM_BUG_ON_PAGE(compound && !PageHead(page), page); > > - /* Hugetlb pages are not counted in NR_*MAPPED */ > - if (unlikely(folio_test_hugetlb(folio))) { > - /* hugetlb pages are always mapped with pmds */ > - atomic_dec(&folio->_entire_mapcount); > - return; > - } Maybe add VM_BUG_ON_FOLIO(folio_test_hugetlb(folio), folio);
On 11.12.23 17:33, Matthew Wilcox wrote: > On Mon, Dec 11, 2023 at 04:56:15PM +0100, David Hildenbrand wrote: >> hugetlb rmap handling differs quite a lot from "ordinary" rmap code. >> For example, hugetlb currently only supports entire mappings, and treats >> any mapping as mapped using a single "logical PTE". Let's move it out >> of the way so we can overhaul our "ordinary" rmap. >> implementation/interface. >> >> Let's introduce and use hugetlb_remove_rmap() and remove the hugetlb >> code from page_remove_rmap(). This effectively removes one check on the >> small-folio path as well. >> >> Note: all possible candidates that need care are page_remove_rmap() that >> pass compound=true. >> >> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com> >> Signed-off-by: David Hildenbrand <david@redhat.com> > > Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> > >> +++ b/mm/rmap.c >> @@ -1482,13 +1482,6 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, >> >> VM_BUG_ON_PAGE(compound && !PageHead(page), page); >> >> - /* Hugetlb pages are not counted in NR_*MAPPED */ >> - if (unlikely(folio_test_hugetlb(folio))) { >> - /* hugetlb pages are always mapped with pmds */ >> - atomic_dec(&folio->_entire_mapcount); >> - return; >> - } > > Maybe add > VM_BUG_ON_FOLIO(folio_test_hugetlb(folio), folio); > A bulk-add that in patch #6. Thanks!
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 0bfea866f39b..d85bd1d4de04 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -213,6 +213,11 @@ void hugetlb_add_anon_rmap(struct folio *, struct vm_area_struct *, void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *, unsigned long address); +static inline void hugetlb_remove_rmap(struct folio *folio) +{ + atomic_dec(&folio->_entire_mapcount); +} + static inline void __page_dup_rmap(struct page *page, bool compound) { if (compound) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 305f3ca1dee6..ef48ae673890 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5676,7 +5676,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, make_pte_marker(PTE_MARKER_UFFD_WP), sz); hugetlb_count_sub(pages_per_huge_page(h), mm); - page_remove_rmap(page, vma, true); + hugetlb_remove_rmap(page_folio(page)); spin_unlock(ptl); tlb_remove_page_size(tlb, page, huge_page_size(h)); @@ -5987,7 +5987,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, /* Break COW or unshare */ huge_ptep_clear_flush(vma, haddr, ptep); - page_remove_rmap(&old_folio->page, vma, true); + hugetlb_remove_rmap(old_folio); hugetlb_add_new_anon_rmap(new_folio, vma, haddr); if (huge_pte_uffd_wp(pte)) newpte = huge_pte_mkuffd_wp(newpte); diff --git a/mm/rmap.c b/mm/rmap.c index 80d42c31281a..4e60c1f38eaa 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1482,13 +1482,6 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, VM_BUG_ON_PAGE(compound && !PageHead(page), page); - /* Hugetlb pages are not counted in NR_*MAPPED */ - if (unlikely(folio_test_hugetlb(folio))) { - /* hugetlb pages are always mapped with pmds */ - atomic_dec(&folio->_entire_mapcount); - return; - } - /* Is page being unmapped by PTE? Is this its last map to be removed? */ if (likely(!compound)) { last = atomic_add_negative(-1, &page->_mapcount); @@ -1846,7 +1839,10 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, dec_mm_counter(mm, mm_counter_file(&folio->page)); } discard: - page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); + if (unlikely(folio_test_hugetlb(folio))) + hugetlb_remove_rmap(folio); + else + page_remove_rmap(subpage, vma, false); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio); @@ -2199,7 +2195,10 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, */ } - page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); + if (unlikely(folio_test_hugetlb(folio))) + hugetlb_remove_rmap(folio); + else + page_remove_rmap(subpage, vma, false); if (vma->vm_flags & VM_LOCKED) mlock_drain_local(); folio_put(folio);