@@ -361,6 +361,25 @@ static inline void copy_highpage(struct page *to, struct page *from)
#endif
+/*
+ * Machine check exception handled version of copy_highpage.
+ * Return true if copying page content failed; otherwise false.
+ * Note handling #MC requires arch opt-in.
+ */
+static inline bool copy_highpage_mc(struct page *to, struct page *from)
+{
+ char *vfrom, *vto;
+ unsigned long ret;
+
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE);
+ kunmap_local(vto);
+ kunmap_local(vfrom);
+
+ return ret > 0;
+}
+
static inline void memcpy_page(struct page *dst_page, size_t dst_off,
struct page *src_page, size_t src_off,
size_t len)
@@ -36,7 +36,8 @@
EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \
EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \
EM( SCAN_TRUNCATED, "truncated") \
- EMe(SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
+ EM( SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
+ EMe(SCAN_COPY_MC, "copy_poisoned_page") \
#undef EM
#undef EMe
@@ -55,6 +55,7 @@ enum scan_result {
SCAN_CGROUP_CHARGE_FAIL,
SCAN_TRUNCATED,
SCAN_PAGE_HAS_PRIVATE,
+ SCAN_COPY_MC,
};
#define CREATE_TRACE_POINTS
@@ -670,56 +671,125 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
return result;
}
-static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
- struct vm_area_struct *vma,
- unsigned long address,
- spinlock_t *ptl,
- struct list_head *compound_pagelist)
+/*
+ * __collapse_huge_page_copy - attempts to copy memory contents from normal
+ * pages to a hugepage. Cleanup the normal pages if copying succeeds;
+ * otherwise restore the original page table and release isolated normal pages.
+ * Returns true if copying succeeds, otherwise returns false.
+ *
+ * @pte: starting of the PTEs to copy from
+ * @page: the new hugepage to copy contents to
+ * @pmd: pointer to the new hugepage's PMD
+ * @rollback: the original normal pages' PMD
+ * @vma: the original normal pages' virtual memory area
+ * @address: starting address to copy
+ * @pte_ptl: lock on normal pages' PTEs
+ * @compound_pagelist: list that stores compound pages
+ */
+static bool __collapse_huge_page_copy(pte_t *pte,
+ struct page *page,
+ pmd_t *pmd,
+ pmd_t rollback,
+ struct vm_area_struct *vma,
+ unsigned long address,
+ spinlock_t *pte_ptl,
+ struct list_head *compound_pagelist)
{
struct page *src_page, *tmp;
pte_t *_pte;
- for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
- _pte++, page++, address += PAGE_SIZE) {
- pte_t pteval = *_pte;
+ pte_t pteval;
+ unsigned long _address;
+ spinlock_t *pmd_ptl;
+ bool copy_succeeded = true;
- if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
- clear_user_highpage(page, address);
- add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
- if (is_zero_pfn(pte_pfn(pteval))) {
+ /*
+ * Copying pages' contents is subject to memory poison at any iteration.
+ */
+ for (_pte = pte, _address = address;
+ _pte < pte + HPAGE_PMD_NR;
+ _pte++, page++, _address += PAGE_SIZE) {
+ pteval = *_pte;
+
+ if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval)))
+ clear_user_highpage(page, _address);
+ else {
+ src_page = pte_page(pteval);
+ if (copy_highpage_mc(page, src_page)) {
+ copy_succeeded = false;
+ break;
+ }
+ }
+ }
+
+ if (copy_succeeded) {
+ for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR;
+ _pte++, _address += PAGE_SIZE) {
+ pteval = *_pte;
+ if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
+ add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
+ if (is_zero_pfn(pte_pfn(pteval))) {
+ /*
+ * pte_ptl mostly unnecessary.
+ */
+ spin_lock(pte_ptl);
+ pte_clear(vma->vm_mm, _address, _pte);
+ spin_unlock(pte_ptl);
+ }
+ } else {
+ src_page = pte_page(pteval);
+ if (!PageCompound(src_page))
+ release_pte_page(src_page);
/*
- * ptl mostly unnecessary.
+ * pte_ptl mostly unnecessary, but preempt has to
+ * be disabled to update the per-cpu stats
+ * inside page_remove_rmap().
*/
- spin_lock(ptl);
- ptep_clear(vma->vm_mm, address, _pte);
- spin_unlock(ptl);
+ spin_lock(pte_ptl);
+ ptep_clear(vma->vm_mm, _address, _pte);
+ page_remove_rmap(src_page, vma, false);
+ spin_unlock(pte_ptl);
+ free_page_and_swap_cache(src_page);
}
- } else {
- src_page = pte_page(pteval);
- copy_user_highpage(page, src_page, address, vma);
- if (!PageCompound(src_page))
- release_pte_page(src_page);
- /*
- * ptl mostly unnecessary, but preempt has to
- * be disabled to update the per-cpu stats
- * inside page_remove_rmap().
- */
- spin_lock(ptl);
- ptep_clear(vma->vm_mm, address, _pte);
- page_remove_rmap(src_page, vma, false);
- spin_unlock(ptl);
- free_page_and_swap_cache(src_page);
+ }
+ list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
+ list_del(&src_page->lru);
+ mod_node_page_state(page_pgdat(src_page),
+ NR_ISOLATED_ANON + page_is_file_lru(src_page),
+ -compound_nr(src_page));
+ unlock_page(src_page);
+ free_swap_cache(src_page);
+ putback_lru_page(src_page);
+ }
+ } else {
+ /*
+ * Re-establish the regular PMD that points to the regular
+ * page table. Restoring PMD needs to be done prior to
+ * releasing pages. Since pages are still isolated and
+ * locked here, acquiring anon_vma_lock_write is unnecessary.
+ */
+ pmd_ptl = pmd_lock(vma->vm_mm, pmd);
+ pmd_populate(vma->vm_mm, pmd, pmd_pgtable(rollback));
+ spin_unlock(pmd_ptl);
+ /*
+ * Release both raw and compound pages isolated
+ * in __collapse_huge_page_isolate.
+ */
+ for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR;
+ _pte++, _address += PAGE_SIZE) {
+ pteval = *_pte;
+ if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval))) {
+ src_page = pte_page(pteval);
+ if (!PageCompound(src_page))
+ release_pte_page(src_page);
+ }
+ }
+ list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
+ list_del(&src_page->lru);
+ release_pte_page(src_page);
}
}
- list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
- list_del(&src_page->lru);
- mod_node_page_state(page_pgdat(src_page),
- NR_ISOLATED_ANON + page_is_file_lru(src_page),
- -compound_nr(src_page));
- unlock_page(src_page);
- free_swap_cache(src_page);
- putback_lru_page(src_page);
- }
+ return copy_succeeded;
}
static void khugepaged_alloc_sleep(void)
@@ -981,6 +1051,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
int result = SCAN_FAIL;
struct vm_area_struct *vma;
struct mmu_notifier_range range;
+ bool copied = false;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
@@ -1084,9 +1155,13 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
*/
anon_vma_unlock_write(vma->anon_vma);
- __collapse_huge_page_copy(pte, hpage, vma, address, pte_ptl,
- &compound_pagelist);
+ copied = __collapse_huge_page_copy(pte, hpage, pmd, _pmd,
+ vma, address, pte_ptl, &compound_pagelist);
pte_unmap(pte);
+ if (!copied) {
+ result = SCAN_COPY_MC;
+ goto out_up_write;
+ }
/*
* spin_lock() below is not the equivalent of smp_wmb(), but
* the smp_wmb() inside __SetPageUptodate() can be reused to
Make __collapse_huge_page_copy return whether copying anonymous pages succeeded, and make collapse_huge_page handle the return status. Break existing PTE scan loop into two for-loops. The first loop copies source pages into target huge page, and can fail gracefully when running into memory errors in source pages. If copying all pages succeeds, the second loop releases and clears up these normal pages. Otherwise, the second loop rolls back the page table and page states by: - re-establishing the original PTEs-to-PMD connection. - releasing source pages back to their LRU list. Tested manually: 0. Enable khugepaged on system under test. 1. Start a two-thread application. Each thread allocates a chunk of non-huge anonymous memory buffer. 2. Pick 4 random buffer locations (2 in each thread) and inject uncorrectable memory errors at corresponding physical addresses. 3. Signal both threads to make their memory buffer collapsible, i.e. calling madvise(MADV_HUGEPAGE). 4. Wait and check kernel log: khugepaged is able to recover from poisoned pages and skips collapsing them. 5. Signal both threads to inspect their buffer contents and make sure no data corruption. Signed-off-by: Jiaqi Yan <jiaqiyan@google.com> --- include/linux/highmem.h | 19 ++++ include/trace/events/huge_memory.h | 3 +- mm/khugepaged.c | 159 +++++++++++++++++++++-------- 3 files changed, 138 insertions(+), 43 deletions(-)