diff mbox series

[RFC,v2,2/2] mm: khugepaged: recover from poisoned file-backed memory

Message ID 20220429000947.2172219-3-jiaqiyan@google.com (mailing list archive)
State New
Headers show
Series Memory poison recovery in khugepaged collapsing | expand

Commit Message

Jiaqi Yan April 29, 2022, 12:09 a.m. UTC
Make collapse_file roll back when copying pages failed.
More concretely:
* extract copying operations into a separate loop
* postpone the updates for nr_none until both scanning and
  copying succeeded
* postpone joining small xarray entries until both scanning and
  copying succeeded
* postpone the update operations to NR_XXX_THPS
* for non-SHMEM file, roll back filemap_nr_thps_inc if scan
  succeeded but copying failed

Signed-off-by: Jiaqi Yan <jiaqiyan@google.com>
---
 mm/khugepaged.c | 77 ++++++++++++++++++++++++++++++-------------------
 1 file changed, 48 insertions(+), 29 deletions(-)
diff mbox series

Patch

diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 8e69a0640e551..08f93f2ea9712 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1710,7 +1710,7 @@  static void collapse_file(struct mm_struct *mm,
 {
 	struct address_space *mapping = file->f_mapping;
 	gfp_t gfp;
-	struct page *new_page;
+	struct page *new_page, *page, *tmp;
 	pgoff_t index, end = start + HPAGE_PMD_NR;
 	LIST_HEAD(pagelist);
 	XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
@@ -1766,7 +1766,7 @@  static void collapse_file(struct mm_struct *mm,
 
 	xas_set(&xas, start);
 	for (index = start; index < end; index++) {
-		struct page *page = xas_next(&xas);
+		page = xas_next(&xas);
 
 		VM_BUG_ON(index != xas.xa_index);
 		if (is_shmem) {
@@ -1938,10 +1938,7 @@  static void collapse_file(struct mm_struct *mm,
 	}
 	nr = thp_nr_pages(new_page);
 
-	if (is_shmem)
-		__mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
-	else {
-		__mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
+	if (!is_shmem) {
 		filemap_nr_thps_inc(mapping);
 		/*
 		 * Paired with smp_mb() in do_dentry_open() to ensure
@@ -1952,40 +1949,44 @@  static void collapse_file(struct mm_struct *mm,
 		smp_mb();
 		if (inode_is_open_for_write(mapping->host)) {
 			result = SCAN_FAIL;
-			__mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
 			filemap_nr_thps_dec(mapping);
 			goto xa_locked;
 		}
 	}
 
-	if (nr_none) {
-		__mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
-		if (is_shmem)
-			__mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
-	}
-
-	/* Join all the small entries into a single multi-index entry */
-	xas_set_order(&xas, start, HPAGE_PMD_ORDER);
-	xas_store(&xas, new_page);
 xa_locked:
 	xas_unlock_irq(&xas);
 xa_unlocked:
 
 	if (result == SCAN_SUCCEED) {
-		struct page *page, *tmp;
-
 		/*
 		 * Replacing old pages with new one has succeeded, now we
-		 * need to copy the content and free the old pages.
+		 * attempt to copy the contents.
 		 */
 		index = start;
-		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
+		list_for_each_entry(page, &pagelist, lru) {
 			while (index < page->index) {
 				clear_highpage(new_page + (index % HPAGE_PMD_NR));
 				index++;
 			}
-			copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
-					page);
+			if (copy_highpage_mc(new_page + (page->index % HPAGE_PMD_NR), page)) {
+				result = SCAN_COPY_MC;
+				break;
+			}
+			index++;
+		}
+		while (result == SCAN_SUCCEED && index < end) {
+			clear_highpage(new_page + (page->index % HPAGE_PMD_NR));
+			index++;
+		}
+	}
+
+	if (result == SCAN_SUCCEED) {
+		/*
+		 * Copying old pages to huge one has succeeded, now we
+		 * need to free the old pages.
+		 */
+		list_for_each_entry_safe(page, tmp, &pagelist, lru) {
 			list_del(&page->lru);
 			page->mapping = NULL;
 			page_ref_unfreeze(page, 1);
@@ -1993,12 +1994,23 @@  static void collapse_file(struct mm_struct *mm,
 			ClearPageUnevictable(page);
 			unlock_page(page);
 			put_page(page);
-			index++;
 		}
-		while (index < end) {
-			clear_highpage(new_page + (index % HPAGE_PMD_NR));
-			index++;
+
+		xas_lock_irq(&xas);
+		if (is_shmem)
+			__mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
+		else
+			__mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
+
+		if (nr_none) {
+			__mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
+			if (is_shmem)
+				__mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
 		}
+		/* Join all the small entries into a single multi-index entry. */
+		xas_set_order(&xas, start, HPAGE_PMD_ORDER);
+		xas_store(&xas, new_page);
+		xas_unlock_irq(&xas);
 
 		SetPageUptodate(new_page);
 		page_ref_add(new_page, HPAGE_PMD_NR - 1);
@@ -2014,9 +2026,9 @@  static void collapse_file(struct mm_struct *mm,
 
 		khugepaged_pages_collapsed++;
 	} else {
-		struct page *page;
-
-		/* Something went wrong: roll back page cache changes */
+		/*
+		 * Something went wrong: roll back page cache changes
+		 */
 		xas_lock_irq(&xas);
 		mapping->nrpages -= nr_none;
 
@@ -2049,6 +2061,13 @@  static void collapse_file(struct mm_struct *mm,
 			xas_lock_irq(&xas);
 		}
 		VM_BUG_ON(nr_none);
+		/*
+		 * Undo the updates of filemap_nr_thps_inc for non-SHMEM file only.
+		 * This undo is not needed unless failure is due to SCAN_COPY_MC.
+		 */
+		if (!is_shmem && result == SCAN_COPY_MC)
+			filemap_nr_thps_dec(mapping);
+
 		xas_unlock_irq(&xas);
 
 		new_page->mapping = NULL;