@@ -1664,7 +1664,7 @@ static void collapse_file(struct mm_struct *mm,
{
struct address_space *mapping = file->f_mapping;
gfp_t gfp;
- struct page *new_page;
+ struct page *new_page, *page, *tmp;
pgoff_t index, end = start + HPAGE_PMD_NR;
LIST_HEAD(pagelist);
XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
@@ -1720,7 +1720,7 @@ static void collapse_file(struct mm_struct *mm,
xas_set(&xas, start);
for (index = start; index < end; index++) {
- struct page *page = xas_next(&xas);
+ page = xas_next(&xas);
VM_BUG_ON(index != xas.xa_index);
if (is_shmem) {
@@ -1892,10 +1892,7 @@ static void collapse_file(struct mm_struct *mm,
}
nr = thp_nr_pages(new_page);
- if (is_shmem)
- __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
- else {
- __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
+ if (!is_shmem) {
filemap_nr_thps_inc(mapping);
/*
* Paired with smp_mb() in do_dentry_open() to ensure
@@ -1906,21 +1903,10 @@ static void collapse_file(struct mm_struct *mm,
smp_mb();
if (inode_is_open_for_write(mapping->host)) {
result = SCAN_FAIL;
- __mod_lruvec_page_state(new_page, NR_FILE_THPS, -nr);
filemap_nr_thps_dec(mapping);
goto xa_locked;
}
}
-
- if (nr_none) {
- __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
- /* nr_none is always 0 for non-shmem. */
- __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
- }
-
- /* Join all the small entries into a single multi-index entry */
- xas_set_order(&xas, start, HPAGE_PMD_ORDER);
- xas_store(&xas, new_page);
xa_locked:
xas_unlock_irq(&xas);
xa_unlocked:
@@ -1933,20 +1919,34 @@ static void collapse_file(struct mm_struct *mm,
try_to_unmap_flush();
if (result == SCAN_SUCCEED) {
- struct page *page, *tmp;
-
/*
* Replacing old pages with new one has succeeded, now we
- * need to copy the content and free the old pages.
+ * attempt to copy the contents.
*/
index = start;
- list_for_each_entry_safe(page, tmp, &pagelist, lru) {
+ list_for_each_entry(page, &pagelist, lru) {
while (index < page->index) {
clear_highpage(new_page + (index % HPAGE_PMD_NR));
index++;
}
- copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
- page);
+ if (copy_highpage_mc(new_page + (page->index % HPAGE_PMD_NR), page)) {
+ result = SCAN_COPY_MC;
+ break;
+ }
+ index++;
+ }
+ while (result == SCAN_SUCCEED && index < end) {
+ clear_highpage(new_page + (page->index % HPAGE_PMD_NR));
+ index++;
+ }
+ }
+
+ if (result == SCAN_SUCCEED) {
+ /*
+ * Copying old pages to huge one has succeeded, now we
+ * need to free the old pages.
+ */
+ list_for_each_entry_safe(page, tmp, &pagelist, lru) {
list_del(&page->lru);
page->mapping = NULL;
page_ref_unfreeze(page, 1);
@@ -1954,12 +1954,23 @@ static void collapse_file(struct mm_struct *mm,
ClearPageUnevictable(page);
unlock_page(page);
put_page(page);
- index++;
}
- while (index < end) {
- clear_highpage(new_page + (index % HPAGE_PMD_NR));
- index++;
+
+ xas_lock_irq(&xas);
+ if (is_shmem)
+ __mod_lruvec_page_state(new_page, NR_SHMEM_THPS, nr);
+ else
+ __mod_lruvec_page_state(new_page, NR_FILE_THPS, nr);
+
+ if (nr_none) {
+ __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
+ /* nr_none is always 0 for non-shmem. */
+ __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
}
+ /* Join all the small entries into a single multi-index entry. */
+ xas_set_order(&xas, start, HPAGE_PMD_ORDER);
+ xas_store(&xas, new_page);
+ xas_unlock_irq(&xas);
SetPageUptodate(new_page);
page_ref_add(new_page, HPAGE_PMD_NR - 1);
@@ -1975,8 +1986,6 @@ static void collapse_file(struct mm_struct *mm,
khugepaged_pages_collapsed++;
} else {
- struct page *page;
-
/* Something went wrong: roll back page cache changes */
xas_lock_irq(&xas);
if (nr_none) {
@@ -2010,6 +2019,13 @@ static void collapse_file(struct mm_struct *mm,
xas_lock_irq(&xas);
}
VM_BUG_ON(nr_none);
+ /*
+ * Undo the updates of filemap_nr_thps_inc for non-SHMEM file only.
+ * This undo is not needed unless failure is due to SCAN_COPY_MC.
+ */
+ if (!is_shmem && result == SCAN_COPY_MC)
+ filemap_nr_thps_dec(mapping);
+
xas_unlock_irq(&xas);
new_page->mapping = NULL;
Make collapse_file roll back when copying pages failed. More concretely: * extract copying operations into a separate loop * postpone the updates for nr_none until both scanning and copying succeeded * postpone joining small xarray entries until both scanning and copying succeeded * postpone the update operations to NR_XXX_THPS * for non-SHMEM file, roll back filemap_nr_thps_inc if scan succeeded but copying failed Signed-off-by: Jiaqi Yan <jiaqiyan@google.com> --- mm/khugepaged.c | 74 ++++++++++++++++++++++++++++++------------------- 1 file changed, 45 insertions(+), 29 deletions(-)