@@ -1257,19 +1257,28 @@ static void kill_me_maybe(struct callback_head *cb)
{
struct task_struct *p = container_of(cb, struct task_struct, mce_kill_me);
int flags = MF_ACTION_REQUIRED;
+ int ret;
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
if (!p->mce_ripv)
flags |= MF_MUST_KILL;
- if (!memory_failure(p->mce_addr >> PAGE_SHIFT, flags) &&
- !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) {
+ ret = memory_failure(p->mce_addr >> PAGE_SHIFT, flags);
+ if (!ret && !(p->mce_kflags & MCE_IN_KERNEL_COPYIN)) {
set_mce_nospec(p->mce_addr >> PAGE_SHIFT, p->mce_whole_page);
sync_core();
return;
}
+ /*
+ * -EHWPOISON from memory_failure() means that it already sent SIGBUS
+ * to the current process with the proper error info, so no need to
+ * send it here again.
+ */
+ if (ret == -EHWPOISON)
+ return;
+
if (p->mce_vaddr != (void __user *)-1l) {
force_sig_mceerr(BUS_MCEERR_AR, p->mce_vaddr, PAGE_SHIFT);
} else {
@@ -323,6 +323,11 @@ static inline int is_hwpoison_entry(swp_entry_t entry)
return swp_type(entry) == SWP_HWPOISON;
}
+static inline unsigned long hwpoison_entry_to_pfn(swp_entry_t entry)
+{
+ return swp_offset(entry);
+}
+
static inline void num_poisoned_pages_inc(void)
{
atomic_long_inc(&num_poisoned_pages);
@@ -56,6 +56,7 @@
#include <linux/kfifo.h>
#include <linux/ratelimit.h>
#include <linux/page-isolation.h>
+#include <linux/pagewalk.h>
#include "internal.h"
#include "ras/ras_event.h"
@@ -554,6 +555,142 @@ static void collect_procs(struct page *page, struct list_head *tokill,
collect_procs_file(page, tokill, force_early);
}
+struct hwp_walk {
+ struct to_kill tk;
+ unsigned long pfn;
+ int flags;
+};
+
+static int set_to_kill(struct to_kill *tk, unsigned long addr, short shift)
+{
+ /* Abort pagewalk when finding multiple mappings to the error page. */
+ if (tk->addr)
+ return 1;
+ tk->addr = addr;
+ tk->size_shift = shift;
+ return 0;
+}
+
+static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
+ unsigned long poisoned_pfn, struct to_kill *tk)
+{
+ unsigned long pfn;
+
+ if (pte_present(pte)) {
+ pfn = pte_pfn(pte);
+ } else {
+ swp_entry_t swp = pte_to_swp_entry(pte);
+
+ if (is_hwpoison_entry(swp))
+ pfn = hwpoison_entry_to_pfn(swp);
+ }
+
+ if (!pfn || pfn != poisoned_pfn)
+ return 0;
+
+ return set_to_kill(tk, addr, shift);
+}
+
+static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
+ unsigned long end, struct mm_walk *walk)
+{
+ struct hwp_walk *hwp = (struct hwp_walk *)walk->private;
+ int ret;
+ pte_t *ptep;
+ spinlock_t *ptl;
+
+ ptl = pmd_trans_huge_lock(pmdp, walk->vma);
+ if (ptl) {
+ pmd_t pmd = *pmdp;
+
+ if (pmd_present(pmd)) {
+ unsigned long pfn = pmd_pfn(pmd);
+
+ if (pfn <= hwp->pfn && hwp->pfn < pfn + PMD_SIZE) {
+ unsigned long hwpoison_vaddr = addr +
+ (hwp->pfn << PAGE_SHIFT & ~PMD_MASK);
+
+ ret = set_to_kill(&hwp->tk, hwpoison_vaddr,
+ PAGE_SHIFT);
+ }
+ }
+ spin_unlock(ptl);
+ goto out;
+ }
+
+ if (pmd_trans_unstable(pmdp))
+ goto out;
+
+ ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp, addr, &ptl);
+ for (; addr != end; ptep++, addr += PAGE_SIZE) {
+ ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT,
+ hwp->pfn, &hwp->tk);
+ if (ret == 1)
+ break;
+ }
+ pte_unmap_unlock(ptep - 1, ptl);
+out:
+ cond_resched();
+ return ret;
+}
+
+#ifdef CONFIG_HUGETLB_PAGE
+static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
+ unsigned long addr, unsigned long end,
+ struct mm_walk *walk)
+{
+ struct hwp_walk *hwp = (struct hwp_walk *)walk->private;
+ pte_t pte = huge_ptep_get(ptep);
+ struct hstate *h = hstate_vma(walk->vma);
+
+ return check_hwpoisoned_entry(pte, addr, huge_page_shift(h),
+ hwp->pfn, &hwp->tk);
+}
+#else
+#define hwpoison_hugetlb_range NULL
+#endif
+
+static struct mm_walk_ops hwp_walk_ops = {
+ .pmd_entry = hwpoison_pte_range,
+ .hugetlb_entry = hwpoison_hugetlb_range,
+};
+
+/*
+ * Sends SIGBUS to the current process with the error info.
+ *
+ * This function is intended to handle "Action Required" MCEs on already
+ * hardware poisoned pages. They could happen, for example, when
+ * memory_failure() failed to unmap the error page at the first call, or
+ * when multiple Action Optional MCE events races on different CPUs with
+ * Local MCE enabled.
+ *
+ * MCE handler currently has no easy access to the error virtual address,
+ * so this function walks page table to find it. One challenge on this is
+ * to reliably get the proper virual address of the error to report to
+ * applications via SIGBUS. A process could map a page multiple times to
+ * different virtual addresses, then we now have no way to tell which virtual
+ * address was accessed when the Action Required MCE was generated.
+ * So in such a corner case, we now give up and fall back to sending SIGBUS
+ * with no error info.
+ */
+static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
+ int flags)
+{
+ int ret;
+ struct hwp_walk priv = {
+ .pfn = pfn,
+ };
+ priv.tk.tsk = p;
+
+ mmap_read_lock(p->mm);
+ ret = walk_page_range(p->mm, 0, TASK_SIZE_MAX, &hwp_walk_ops,
+ (void *)&priv);
+ if (!ret && priv.tk.addr)
+ kill_proc(&priv.tk, pfn, flags);
+ mmap_read_unlock(p->mm);
+ return ret ? -EFAULT : -EHWPOISON;
+}
+
static const char *action_name[] = {
[MF_IGNORED] = "Ignored",
[MF_FAILED] = "Failed",
@@ -1228,7 +1365,10 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
if (TestSetPageHWPoison(head)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n",
pfn);
- return -EHWPOISON;
+ res = -EHWPOISON;
+ if (flags & MF_ACTION_REQUIRED)
+ res = kill_accessing_process(current, page_to_pfn(head), flags);
+ return res;
}
num_poisoned_pages_inc();
@@ -1437,8 +1577,11 @@ int memory_failure(unsigned long pfn, int flags)
if (TestSetPageHWPoison(p)) {
pr_err("Memory failure: %#lx: already hardware poisoned\n",
pfn);
+ res = -EHWPOISON;
+ if (flags & MF_ACTION_REQUIRED)
+ res = kill_accessing_process(current, pfn, flags);
mutex_unlock(&mf_mutex);
- return -EHWPOISON;
+ return res;
}
orig_head = hpage = compound_head(p);