@@ -212,6 +212,7 @@ void hugetlb_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
static inline int hugetlb_try_dup_anon_rmap(struct folio *folio,
struct vm_area_struct *vma)
{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
if (PageAnonExclusive(&folio->page)) {
@@ -225,6 +226,7 @@ static inline int hugetlb_try_dup_anon_rmap(struct folio *folio,
static inline void hugetlb_add_file_rmap(struct folio *folio)
{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);
atomic_inc(&folio->_entire_mapcount);
@@ -232,11 +234,15 @@ static inline void hugetlb_add_file_rmap(struct folio *folio)
static inline void hugetlb_remove_rmap(struct folio *folio)
{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+
atomic_dec(&folio->_entire_mapcount);
}
static inline void __page_dup_rmap(struct page *page, bool compound)
{
+ VM_WARN_ON(folio_test_hugetlb(page_folio(page)));
+
if (compound) {
struct folio *folio = (struct folio *)page;
@@ -1313,6 +1313,7 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
{
int nr;
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
__folio_set_swapbacked(folio);
@@ -1353,6 +1354,7 @@ void folio_add_file_rmap_range(struct folio *folio, struct page *page,
unsigned int nr_pmdmapped = 0, first;
int nr = 0;
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(compound && !folio_test_pmd_mappable(folio), folio);
/* Is page being mapped by PTE? Is this its first map to be added? */
@@ -1438,6 +1440,7 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma,
bool last;
enum node_stat_item idx;
+ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio);
VM_BUG_ON_PAGE(compound && !PageHead(page), page);
/* Is page being unmapped by PTE? Is this its last map to be removed? */
@@ -2585,6 +2588,7 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, rmap_t flags)
{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
atomic_inc(&folio->_entire_mapcount);
@@ -2597,6 +2601,8 @@ void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
void hugetlb_add_new_anon_rmap(struct folio *folio,
struct vm_area_struct *vma, unsigned long address)
{
+ VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
+
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
/* increment count (starts at -1) */
atomic_set(&folio->_entire_mapcount, 0);
Let's make sure we end up with the right folios in the right functions. Signed-off-by: David Hildenbrand <david@redhat.com> --- include/linux/rmap.h | 6 ++++++ mm/rmap.c | 6 ++++++ 2 files changed, 12 insertions(+)