@@ -329,8 +329,8 @@ static inline void copy_highpage(struct page *to, struct page *from)
/*
* If architecture supports machine check exception handling, define the
* #MC versions of copy_user_highpage and copy_highpage. They copy a memory
- * page with #MC in source page (@from) handled, and return the number
- * of bytes not copied if there was a #MC, otherwise 0 for success.
+ * page with #MC in source page (@from) handled, and return -EFAULT if there
+ * was a #MC, otherwise 0 for success.
*/
static inline int copy_mc_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
@@ -349,7 +349,7 @@ static inline int copy_mc_user_highpage(struct page *to, struct page *from,
if (ret)
memory_failure_queue(page_to_pfn(from), 0);
- return ret;
+ return ret ? -EFAULT : 0;
}
static inline int copy_mc_highpage(struct page *to, struct page *from)
@@ -368,7 +368,7 @@ static inline int copy_mc_highpage(struct page *to, struct page *from)
if (ret)
memory_failure_queue(page_to_pfn(from), 0);
- return ret;
+ return ret ? -EFAULT : 0;
}
#else
static inline int copy_mc_user_highpage(struct page *to, struct page *from,
@@ -820,7 +820,7 @@ static int __collapse_huge_page_copy(pte_t *pte, struct folio *folio,
continue;
}
src_page = pte_page(pteval);
- if (copy_mc_user_highpage(page, src_page, src_addr, vma) > 0) {
+ if (copy_mc_user_highpage(page, src_page, src_addr, vma)) {
result = SCAN_COPY_MC;
break;
}
@@ -2081,7 +2081,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
}
for (i = 0; i < nr_pages; i++) {
- if (copy_mc_highpage(dst, folio_page(folio, i)) > 0) {
+ if (copy_mc_highpage(dst, folio_page(folio, i))) {
result = SCAN_COPY_MC;
goto rollback;
}