@@ -433,6 +433,9 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
page = ERR_PTR(ret);
goto out;
}
+ } else if (PageHWPoison(page)) {
+ page = ERR_PTR(-EHWPOISON);
+ goto out;
}
if (flags & FOLL_SPLIT && PageTransCompound(page)) {
@@ -540,8 +543,13 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
page = follow_huge_pd(vma, address,
__hugepd(pmd_val(pmdval)), flags,
PMD_SHIFT);
- if (page)
- return page;
+ if (page) {
+ struct page *p = check_page_hwpoison(page);
+
+ if (p == ERR_PTR(-EHWPOISON) && flags & FOLL_GET)
+ put_page(page);
+ return p;
+ }
return no_page_table(vma, flags);
}
retry:
@@ -643,7 +651,7 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
page = follow_huge_pud(mm, address, pud, flags);
if (page)
- return page;
+ return check_page_hwpoison(page);
return no_page_table(vma, flags);
}
if (is_hugepd(__hugepd(pud_val(*pud)))) {
@@ -652,6 +660,13 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma,
PUD_SHIFT);
if (page)
return page;
+ if (page) {
+ struct page *p = check_page_hwpoison(page);
+
+ if (p == ERR_PTR(-EHWPOISON) && flags & FOLL_GET)
+ put_page(page);
+ return p;
+ }
return no_page_table(vma, flags);
}
if (pud_devmap(*pud)) {
@@ -1087,10 +1102,14 @@ static long __get_user_pages(struct mm_struct *mm,
* struct page.
*/
goto next_page;
- } else if (IS_ERR(page)) {
+ } else if (PTR_ERR(page) == -EHWPOISON) {
+ ret = (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
+ goto out;
+ } else if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
}
+
if (pages) {
pages[i] = page;
flush_anon_page(vma, page, start);
@@ -1349,6 +1349,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
struct page *page = NULL;
+ struct page *tail = NULL;
assert_spin_locked(pmd_lockptr(mm, pmd));
@@ -1366,6 +1367,11 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
page = pmd_page(*pmd);
VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
+ tail = page + ((addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT);
+
+ if (PageHWPoison(tail))
+ return ERR_PTR(-EHWPOISON);
+
if (!try_grab_page(page, flags))
return ERR_PTR(-ENOMEM);
@@ -1405,11 +1411,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unlock_page(page);
}
skip_mlock:
- page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
VM_BUG_ON_PAGE(!PageCompound(page) && !is_zone_device_page(page), page);
out:
- return page;
+ return tail;
}
/* NUMA hinting page fault entry point for trans huge pmds */
@@ -4958,7 +4958,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
likely(pages) ? pages + i : NULL,
vmas ? vmas + i : NULL);
- if (pages) {
+ /* As we will filter out the hwpoison page, so don't try grab it */
+ if (pages && !PageHWPoison(page)) {
/*
* try_grab_compound_head() should always succeed here,
* because: a) we hold the ptl lock, and b) we've just
@@ -5581,6 +5582,11 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
pte = huge_ptep_get((pte_t *)pmd);
if (pte_present(pte)) {
page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ /* if hwpoison, we don't grab it */
+ if (PageHWPoison(compound_head(page))) {
+ page = ERR_PTR(-EHWPOISON);
+ goto out;
+ }
/*
* try_grab_page() should always succeed here, because: a) we
* hold the pmd (ptl) lock, and b) we've just checked that the
@@ -97,6 +97,19 @@ static inline void set_page_refcounted(struct page *page)
set_page_count(page, 1);
}
+/*
+ * Check the hwposion status of any page type, and if TRUE, return ERR ptr.
+ */
+static inline struct page *check_page_hwpoison(struct page *page)
+{
+ if (PageHWPoison(page))
+ return ERR_PTR(-EHWPOISON);
+ else if (PageHuge(page) && PageHWPoison(compound_head(page)))
+ return ERR_PTR(-EHWPOISON);
+
+ return page;
+}
+
extern unsigned long highest_memmap_pfn;
/*
When we call get_user_pages() to pin user page in memory, there may be hwpoison page, currently, we just handle the normal case that memory recovery jod is correctly finished, and we will not return the hwpoison page to callers, but for other cases like memory recovery fails and the user process related pte is not correctly set invalid, we will still return the hwpoison page, and may touch it and lead to panic. In gup.c, for normal page, after we call follow_page_mask(), we will return the related page pointer; or like another hwpoison case with pte invalid, it will return NULL. For NULL, we will handle it in if (!page) branch. In this patch, we will filter out the hwpoison page in follow_page_mask() and return error code for recovery failure cases. We will check the page hwpoison status as soon as possible and avoid doing followed normal procedure and try not to grab related pages. Signed-off-by: Aili Yao <yaoaili@kingsoft.com> Cc: David Hildenbrand <david@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Naoya Horiguchi <naoya.horiguchi@nec.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: stable@vger.kernel.org --- mm/gup.c | 27 +++++++++++++++++++++++---- mm/huge_memory.c | 9 +++++++-- mm/hugetlb.c | 8 +++++++- mm/internal.h | 13 +++++++++++++ 4 files changed, 50 insertions(+), 7 deletions(-)