@@ -1536,7 +1536,7 @@ static bool try_to_unmap_one_hugetlb(struct folio *folio,
*
* See Documentation/mm/mmu_notifier.rst
*/
- page_remove_rmap(&folio->page, vma, folio_test_hugetlb(folio));
+ page_remove_rmap(&folio->page, vma, true);
if (vma->vm_flags & VM_LOCKED)
mlock_drain_local();
folio_put(folio);
@@ -1550,15 +1550,13 @@ static bool try_to_unmap_one_page(struct folio *folio,
struct page_vma_mapped_walk pvmw, unsigned long address,
enum ttu_flags flags)
{
- bool anon_exclusive, ret = true;
- struct page *subpage;
+ bool anon_exclusive;
+ struct page *page;
struct mm_struct *mm = vma->vm_mm;
pte_t pteval;
- subpage = folio_page(folio,
- pte_pfn(*pvmw.pte) - folio_pfn(folio));
- anon_exclusive = folio_test_anon(folio) &&
- PageAnonExclusive(subpage);
+ page = folio_page(folio, pte_pfn(*pvmw.pte) - folio_pfn(folio));
+ anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
/* Nuke the page table entry. */
@@ -1586,15 +1584,14 @@ static bool try_to_unmap_one_page(struct folio *folio,
pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
/* Set the dirty flag on the folio now the pte is gone. */
- if (pte_dirty(pteval))
+ if (pte_dirty(pteval) && !folio_test_dirty(folio))
folio_mark_dirty(folio);
/* Update high watermark before we lower rss */
update_hiwater_rss(mm);
- if (PageHWPoison(subpage) && !(flags & TTU_IGNORE_HWPOISON)) {
- pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
- dec_mm_counter(mm, mm_counter(&folio->page));
+ if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
+ pteval = swp_entry_to_pte(make_hwpoison_entry(page));
set_pte_at(mm, address, pvmw.pte, pteval);
} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
/*
@@ -1607,12 +1604,11 @@ static bool try_to_unmap_one_page(struct folio *folio,
* migration) will not expect userfaults on already
* copied pages.
*/
- dec_mm_counter(mm, mm_counter(&folio->page));
/* We have to invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
} else if (folio_test_anon(folio)) {
- swp_entry_t entry = { .val = page_private(subpage) };
+ swp_entry_t entry = { .val = page_private(page) };
pte_t swp_pte;
/*
* Store the swap location in the pte.
@@ -1621,12 +1617,10 @@ static bool try_to_unmap_one_page(struct folio *folio,
if (unlikely(folio_test_swapbacked(folio) !=
folio_test_swapcache(folio))) {
WARN_ON_ONCE(1);
- ret = false;
/* We have to invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
+ goto exit;
}
/* MADV_FREE page check */
@@ -1658,7 +1652,6 @@ static bool try_to_unmap_one_page(struct folio *folio,
/* Invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm,
address, address + PAGE_SIZE);
- dec_mm_counter(mm, MM_ANONPAGES);
goto discard;
}
@@ -1666,43 +1659,30 @@ static bool try_to_unmap_one_page(struct folio *folio,
* If the folio was redirtied, it cannot be
* discarded. Remap the page to page table.
*/
- set_pte_at(mm, address, pvmw.pte, pteval);
folio_set_swapbacked(folio);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
+ goto exit_restore_pte;
}
- if (swap_duplicate(entry) < 0) {
- set_pte_at(mm, address, pvmw.pte, pteval);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
- }
+ if (swap_duplicate(entry) < 0)
+ goto exit_restore_pte;
+
if (arch_unmap_one(mm, vma, address, pteval) < 0) {
swap_free(entry);
- set_pte_at(mm, address, pvmw.pte, pteval);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
+ goto exit_restore_pte;
}
/* See page_try_share_anon_rmap(): clear PTE first. */
- if (anon_exclusive &&
- page_try_share_anon_rmap(subpage)) {
+ if (anon_exclusive && page_try_share_anon_rmap(page)) {
swap_free(entry);
- set_pte_at(mm, address, pvmw.pte, pteval);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- goto discard;
+ goto exit_restore_pte;
}
+
if (list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
if (list_empty(&mm->mmlist))
list_add(&mm->mmlist, &init_mm.mmlist);
spin_unlock(&mmlist_lock);
}
- dec_mm_counter(mm, MM_ANONPAGES);
inc_mm_counter(mm, MM_SWAPENTS);
swp_pte = swp_entry_to_pte(entry);
if (anon_exclusive)
@@ -1713,8 +1693,7 @@ static bool try_to_unmap_one_page(struct folio *folio,
swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, address, pvmw.pte, swp_pte);
/* Invalidate as we cleared the pte */
- mmu_notifier_invalidate_range(mm, address,
- address + PAGE_SIZE);
+ mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE);
} else {
/*
* This is a locked file-backed folio,
@@ -1727,11 +1706,16 @@ static bool try_to_unmap_one_page(struct folio *folio,
*
* See Documentation/mm/mmu_notifier.rst
*/
- dec_mm_counter(mm, mm_counter_file(&folio->page));
}
discard:
- return ret;
+ dec_mm_counter(vma->vm_mm, mm_counter(&folio->page));
+ return true;
+
+exit_restore_pte:
+ set_pte_at(mm, address, pvmw.pte, pteval);
+exit:
+ return false;
}
/*
@@ -1809,8 +1793,10 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
pte_pfn(*pvmw.pte) - folio_pfn(folio));
ret = try_to_unmap_one_page(folio, vma,
range, pvmw, address, flags);
- if (!ret)
+ if (!ret) {
+ page_vma_mapped_walk_done(&pvmw);
break;
+ }
/*
* No need to call mmu_notifier_invalidate_range() it has be
@@ -1819,7 +1805,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*
* See Documentation/mm/mmu_notifier.rst
*/
- page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
+ page_remove_rmap(subpage, vma, false);
if (vma->vm_flags & VM_LOCKED)
mlock_drain_local();
folio_put(folio);
Cleanup exit path of try_to_unmap_one_page() by removing some duplicated code. Move page_vma_mapped_walk_done() back to try_to_unmap_one(). Change subpage to page as folio has no concept of subpage. Signed-off-by: Yin Fengwei <fengwei.yin@intel.com> --- mm/rmap.c | 74 ++++++++++++++++++++++--------------------------------- 1 file changed, 30 insertions(+), 44 deletions(-)