@@ -3969,18 +3969,32 @@ EXPORT_SYMBOL(might_fault);
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
+
+#ifndef ARCH_HAS_USER_NOCACHE
+#define ARCH_HAS_USER_NOCACHE 0
+#endif
+
+#if ARCH_HAS_USER_NOCACHE == 0
+#define clear_user_highpage_nocache clear_user_highpage
+#endif
+
static void clear_gigantic_page(struct page *page,
- unsigned long addr,
- unsigned int pages_per_huge_page)
+ unsigned long haddr, unsigned long fault_address,
+ unsigned int pages_per_huge_page)
{
int i;
struct page *p = page;
+ unsigned long vaddr;
+ int target = (fault_address - haddr) >> PAGE_SHIFT;
might_sleep();
- for (i = 0; i < pages_per_huge_page;
- i++, p = mem_map_next(p, page, i)) {
+ for (i = 0, vaddr = haddr; i < pages_per_huge_page;
+ i++, p = mem_map_next(p, page, i), vaddr += PAGE_SIZE) {
cond_resched();
- clear_user_highpage(p, addr + i * PAGE_SIZE);
+ if (!ARCH_HAS_USER_NOCACHE || i == target)
+ clear_user_highpage(p, vaddr);
+ else
+ clear_user_highpage_nocache(p, vaddr);
}
}
void clear_huge_page(struct page *page,
@@ -3988,16 +4002,23 @@ void clear_huge_page(struct page *page,
unsigned int pages_per_huge_page)
{
int i;
+ unsigned long vaddr;
+ int target = (fault_address - haddr) >> PAGE_SHIFT;
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
- clear_gigantic_page(page, haddr, pages_per_huge_page);
+ clear_gigantic_page(page, haddr, fault_address,
+ pages_per_huge_page);
return;
}
might_sleep();
- for (i = 0; i < pages_per_huge_page; i++) {
+ for (i = 0, vaddr = haddr; i < pages_per_huge_page;
+ i++, page++, vaddr += PAGE_SIZE) {
cond_resched();
- clear_user_highpage(page + i, haddr + i * PAGE_SIZE);
+ if (!ARCH_HAS_USER_NOCACHE || i == target)
+ clear_user_highpage(page, vaddr);
+ else
+ clear_user_highpage_nocache(page, vaddr);
}
}