Message ID | 20231211155652.131054-7-david@redhat.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm/rmap: interface overhaul | expand |
On 11/12/2023 15:56, David Hildenbrand wrote: > Let's make sure we end up with the right folios in the right functions. > > Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> > --- > include/linux/rmap.h | 7 +++++++ > mm/rmap.c | 6 ++++++ > 2 files changed, 13 insertions(+) > > diff --git a/include/linux/rmap.h b/include/linux/rmap.h > index 4c0650e9f6db..e3857d26b944 100644 > --- a/include/linux/rmap.h > +++ b/include/linux/rmap.h > @@ -217,6 +217,7 @@ void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *, > static inline int hugetlb_try_dup_anon_rmap(struct folio *folio, > struct vm_area_struct *vma) > { > + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); > VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); > > if (PageAnonExclusive(&folio->page)) { > @@ -231,6 +232,7 @@ static inline int hugetlb_try_dup_anon_rmap(struct folio *folio, > /* See page_try_share_anon_rmap() */ > static inline int hugetlb_try_share_anon_rmap(struct folio *folio) > { > + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); > VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); > VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio); > > @@ -253,6 +255,7 @@ static inline int hugetlb_try_share_anon_rmap(struct folio *folio) > > static inline void hugetlb_add_file_rmap(struct folio *folio) > { > + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); > VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); > > atomic_inc(&folio->_entire_mapcount); > @@ -260,11 +263,15 @@ static inline void hugetlb_add_file_rmap(struct folio *folio) > > static inline void hugetlb_remove_rmap(struct folio *folio) > { > + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); > + > atomic_dec(&folio->_entire_mapcount); > } > > static inline void __page_dup_rmap(struct page *page, bool compound) > { > + VM_WARN_ON(folio_test_hugetlb(page_folio(page))); > + > if (compound) { > struct folio *folio = (struct folio *)page; > > diff --git a/mm/rmap.c b/mm/rmap.c > index e210ac1b73de..41597da14f26 100644 > --- a/mm/rmap.c > +++ b/mm/rmap.c > @@ -1343,6 +1343,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, > { > int nr = folio_nr_pages(folio); > > + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); > VM_BUG_ON_VMA(address < vma->vm_start || > address + (nr << PAGE_SHIFT) > vma->vm_end, vma); > __folio_set_swapbacked(folio); > @@ -1395,6 +1396,7 @@ void folio_add_file_rmap_range(struct folio *folio, struct page *page, > unsigned int nr_pmdmapped = 0, first; > int nr = 0; > > + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); > VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio); > > /* Is page being mapped by PTE? Is this its first map to be added? */ > @@ -1480,6 +1482,7 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, > bool last; > enum node_stat_item idx; > > + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); > VM_BUG_ON_PAGE(compound && !PageHead(page), page); > > /* Is page being unmapped by PTE? Is this its last map to be removed? */ > @@ -2632,6 +2635,7 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) > void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, > unsigned long address, rmap_t flags) > { > + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); > VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); > > atomic_inc(&folio->_entire_mapcount); > @@ -2644,6 +2648,8 @@ void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, > void hugetlb_add_new_anon_rmap(struct folio *folio, > struct vm_area_struct *vma, unsigned long address) > { > + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); > + > BUG_ON(address < vma->vm_start || address >= vma->vm_end); > /* increment count (starts at -1) */ > atomic_set(&folio->_entire_mapcount, 0);
On 11.12.23 16:56, David Hildenbrand wrote: > Let's make sure we end up with the right folios in the right functions. > > Signed-off-by: David Hildenbrand <david@redhat.com> > --- I'll move all !anon handling to the relevant patches, so for this patch we'll only end up adding sanity checks for the "add" and "add_new" variants.
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 4c0650e9f6db..e3857d26b944 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -217,6 +217,7 @@ void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *, static inline int hugetlb_try_dup_anon_rmap(struct folio *folio, struct vm_area_struct *vma) { + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); if (PageAnonExclusive(&folio->page)) { @@ -231,6 +232,7 @@ static inline int hugetlb_try_dup_anon_rmap(struct folio *folio, /* See page_try_share_anon_rmap() */ static inline int hugetlb_try_share_anon_rmap(struct folio *folio) { + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio); @@ -253,6 +255,7 @@ static inline int hugetlb_try_share_anon_rmap(struct folio *folio) static inline void hugetlb_add_file_rmap(struct folio *folio) { + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); VM_WARN_ON_FOLIO(folio_test_anon(folio), folio); atomic_inc(&folio->_entire_mapcount); @@ -260,11 +263,15 @@ static inline void hugetlb_add_file_rmap(struct folio *folio) static inline void hugetlb_remove_rmap(struct folio *folio) { + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + atomic_dec(&folio->_entire_mapcount); } static inline void __page_dup_rmap(struct page *page, bool compound) { + VM_WARN_ON(folio_test_hugetlb(page_folio(page))); + if (compound) { struct folio *folio = (struct folio *)page; diff --git a/mm/rmap.c b/mm/rmap.c index e210ac1b73de..41597da14f26 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1343,6 +1343,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, { int nr = folio_nr_pages(folio); + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); VM_BUG_ON_VMA(address < vma->vm_start || address + (nr << PAGE_SHIFT) > vma->vm_end, vma); __folio_set_swapbacked(folio); @@ -1395,6 +1396,7 @@ void folio_add_file_rmap_range(struct folio *folio, struct page *page, unsigned int nr_pmdmapped = 0, first; int nr = 0; + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio); /* Is page being mapped by PTE? Is this its first map to be added? */ @@ -1480,6 +1482,7 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma, bool last; enum node_stat_item idx; + VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); VM_BUG_ON_PAGE(compound && !PageHead(page), page); /* Is page being unmapped by PTE? Is this its last map to be removed? */ @@ -2632,6 +2635,7 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc) void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address, rmap_t flags) { + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio); atomic_inc(&folio->_entire_mapcount); @@ -2644,6 +2648,8 @@ void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma, void hugetlb_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma, unsigned long address) { + VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio); + BUG_ON(address < vma->vm_start || address >= vma->vm_end); /* increment count (starts at -1) */ atomic_set(&folio->_entire_mapcount, 0);
Let's make sure we end up with the right folios in the right functions. Signed-off-by: David Hildenbrand <david@redhat.com> --- include/linux/rmap.h | 7 +++++++ mm/rmap.c | 6 ++++++ 2 files changed, 13 insertions(+)