diff mbox series

[v4,3/5] rmap: cleanup exit path of try_to_unmap_one_page()

Message ID 20230313124526.1207490-4-fengwei.yin@intel.com (mailing list archive)
State New
Headers show
Series batched remove rmap in try_to_unmap_one() | expand

Commit Message

Yin Fengwei March 13, 2023, 12:45 p.m. UTC
Cleanup exit path of try_to_unmap_one_page() by removing
some duplicated code.
Move page_vma_mapped_walk_done() back to try_to_unmap_one().
Change subpage to page as folio has no concept of subpage.

Signed-off-by: Yin Fengwei <fengwei.yin@intel.com>
---
 mm/rmap.c | 72 ++++++++++++++++++++++---------------------------------
 1 file changed, 29 insertions(+), 43 deletions(-)
diff mbox series

Patch

diff --git a/mm/rmap.c b/mm/rmap.c
index 23eda671447a..72fc8c559cd9 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1543,15 +1543,13 @@  static bool try_to_unmap_one_page(struct folio *folio,
 		struct page_vma_mapped_walk pvmw, unsigned long address,
 		enum ttu_flags flags)
 {
-	bool anon_exclusive, ret = true;
-	struct page *subpage;
+	bool anon_exclusive;
+	struct page *page;
 	struct mm_struct *mm = vma->vm_mm;
 	pte_t pteval;
 
-	subpage = folio_page(folio,
-			pte_pfn(*pvmw.pte) - folio_pfn(folio));
-	anon_exclusive = folio_test_anon(folio) &&
-		PageAnonExclusive(subpage);
+	page = folio_page(folio, pte_pfn(*pvmw.pte) - folio_pfn(folio));
+	anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page);
 
 	flush_cache_page(vma, address, pte_pfn(*pvmw.pte));
 	/* Nuke the page table entry. */
@@ -1579,15 +1577,14 @@  static bool try_to_unmap_one_page(struct folio *folio,
 	pte_install_uffd_wp_if_needed(vma, address, pvmw.pte, pteval);
 
 	/* Set the dirty flag on the folio now the pte is gone. */
-	if (pte_dirty(pteval))
+	if (pte_dirty(pteval) && !folio_test_dirty(folio))
 		folio_mark_dirty(folio);
 
 	/* Update high watermark before we lower rss */
 	update_hiwater_rss(mm);
 
-	if (PageHWPoison(subpage) && !(flags & TTU_HWPOISON)) {
-		pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
-		dec_mm_counter(mm, mm_counter(&folio->page));
+	if (PageHWPoison(page) && !(flags & TTU_HWPOISON)) {
+		pteval = swp_entry_to_pte(make_hwpoison_entry(page));
 		set_pte_at(mm, address, pvmw.pte, pteval);
 	} else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
 		/*
@@ -1600,12 +1597,11 @@  static bool try_to_unmap_one_page(struct folio *folio,
 		 * migration) will not expect userfaults on already
 		 * copied pages.
 		 */
-		dec_mm_counter(mm, mm_counter(&folio->page));
 		/* We have to invalidate as we cleared the pte */
 		mmu_notifier_invalidate_range(mm, address,
 				address + PAGE_SIZE);
 	} else if (folio_test_anon(folio)) {
-		swp_entry_t entry = { .val = page_private(subpage) };
+		swp_entry_t entry = { .val = page_private(page) };
 		pte_t swp_pte;
 		/*
 		 * Store the swap location in the pte.
@@ -1614,12 +1610,10 @@  static bool try_to_unmap_one_page(struct folio *folio,
 		if (unlikely(folio_test_swapbacked(folio) !=
 					folio_test_swapcache(folio))) {
 			WARN_ON_ONCE(1);
-			ret = false;
 			/* We have to invalidate as we cleared the pte */
 			mmu_notifier_invalidate_range(mm, address,
 					address + PAGE_SIZE);
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
+			goto exit;
 		}
 
 		/* MADV_FREE page check */
@@ -1651,7 +1645,6 @@  static bool try_to_unmap_one_page(struct folio *folio,
 				/* Invalidate as we cleared the pte */
 				mmu_notifier_invalidate_range(mm,
 						address, address + PAGE_SIZE);
-				dec_mm_counter(mm, MM_ANONPAGES);
 				goto discard;
 			}
 
@@ -1659,43 +1652,30 @@  static bool try_to_unmap_one_page(struct folio *folio,
 			 * If the folio was redirtied, it cannot be
 			 * discarded. Remap the page to page table.
 			 */
-			set_pte_at(mm, address, pvmw.pte, pteval);
 			folio_set_swapbacked(folio);
-			ret = false;
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
+			goto exit_restore_pte;
 		}
 
-		if (swap_duplicate(entry) < 0) {
-			set_pte_at(mm, address, pvmw.pte, pteval);
-			ret = false;
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
-		}
+		if (swap_duplicate(entry) < 0)
+			goto exit_restore_pte;
+
 		if (arch_unmap_one(mm, vma, address, pteval) < 0) {
 			swap_free(entry);
-			set_pte_at(mm, address, pvmw.pte, pteval);
-			ret = false;
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
+			goto exit_restore_pte;
 		}
 
 		/* See page_try_share_anon_rmap(): clear PTE first. */
-		if (anon_exclusive &&
-				page_try_share_anon_rmap(subpage)) {
+		if (anon_exclusive && page_try_share_anon_rmap(page)) {
 			swap_free(entry);
-			set_pte_at(mm, address, pvmw.pte, pteval);
-			ret = false;
-			page_vma_mapped_walk_done(&pvmw);
-			goto discard;
+			goto exit_restore_pte;
 		}
+
 		if (list_empty(&mm->mmlist)) {
 			spin_lock(&mmlist_lock);
 			if (list_empty(&mm->mmlist))
 				list_add(&mm->mmlist, &init_mm.mmlist);
 			spin_unlock(&mmlist_lock);
 		}
-		dec_mm_counter(mm, MM_ANONPAGES);
 		inc_mm_counter(mm, MM_SWAPENTS);
 		swp_pte = swp_entry_to_pte(entry);
 		if (anon_exclusive)
@@ -1706,8 +1686,7 @@  static bool try_to_unmap_one_page(struct folio *folio,
 			swp_pte = pte_swp_mkuffd_wp(swp_pte);
 		set_pte_at(mm, address, pvmw.pte, swp_pte);
 		/* Invalidate as we cleared the pte */
-		mmu_notifier_invalidate_range(mm, address,
-				address + PAGE_SIZE);
+		mmu_notifier_invalidate_range(mm, address, address + PAGE_SIZE);
 	} else {
 		/*
 		 * This is a locked file-backed folio,
@@ -1720,11 +1699,16 @@  static bool try_to_unmap_one_page(struct folio *folio,
 		 *
 		 * See Documentation/mm/mmu_notifier.rst
 		 */
-		dec_mm_counter(mm, mm_counter_file(&folio->page));
 	}
 
 discard:
-	return ret;
+	dec_mm_counter(vma->vm_mm, mm_counter(&folio->page));
+	return true;
+
+exit_restore_pte:
+	set_pte_at(mm, address, pvmw.pte, pteval);
+exit:
+	return false;
 }
 
 /*
@@ -1802,8 +1786,10 @@  static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 					pte_pfn(*pvmw.pte) - folio_pfn(folio));
 		ret = try_to_unmap_one_page(folio, vma,
 						range, pvmw, address, flags);
-		if (!ret)
+		if (!ret) {
+			page_vma_mapped_walk_done(&pvmw);
 			break;
+		}
 
 		/*
 		 * No need to call mmu_notifier_invalidate_range() it has be
@@ -1812,7 +1798,7 @@  static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
 		 *
 		 * See Documentation/mm/mmu_notifier.rst
 		 */
-		page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
+		page_remove_rmap(subpage, vma, false);
 		if (vma->vm_flags & VM_LOCKED)
 			mlock_drain_local();
 		folio_put(folio);