@@ -3969,18 +3969,35 @@ EXPORT_SYMBOL(might_fault);
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
+
+#ifndef ARCH_HAS_USER_NOCACHE
+#define ARCH_HAS_USER_NOCACHE 0
+#endif
+
+#if ARCH_HAS_USER_NOCACHE == 0
+#define clear_user_highpage_nocache clear_user_highpage
+#endif
+
static void clear_gigantic_page(struct page *page,
unsigned long addr,
unsigned int pages_per_huge_page)
{
int i;
struct page *p = page;
+ unsigned long vaddr;
+ unsigned long haddr = addr & HPAGE_PMD_MASK;
+ int target = (addr - haddr) >> PAGE_SHIFT;
might_sleep();
+ vaddr = haddr;
for (i = 0; i < pages_per_huge_page;
i++, p = mem_map_next(p, page, i)) {
cond_resched();
- clear_user_highpage(p, addr + i * PAGE_SIZE);
+ vaddr = haddr + i*PAGE_SIZE;
+ if (!ARCH_HAS_USER_NOCACHE || i == target)
+ clear_user_highpage(p, vaddr);
+ else
+ clear_user_highpage_nocache(p, vaddr);
}
}
void clear_huge_page(struct page *page,
@@ -3988,16 +4005,23 @@ void clear_huge_page(struct page *page,
{
int i;
unsigned long haddr = addr & HPAGE_PMD_MASK;
+ unsigned long vaddr;
+ int target = (addr - haddr) >> PAGE_SHIFT;
if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
- clear_gigantic_page(page, haddr, pages_per_huge_page);
+ clear_gigantic_page(page, addr, pages_per_huge_page);
return;
}
might_sleep();
+ vaddr = haddr;
for (i = 0; i < pages_per_huge_page; i++) {
cond_resched();
- clear_user_highpage(page + i, haddr + i * PAGE_SIZE);
+ vaddr = haddr + i*PAGE_SIZE;
+ if (!ARCH_HAS_USER_NOCACHE || i == target)
+ clear_user_highpage(page + i, vaddr);
+ else
+ clear_user_highpage_nocache(page + i, vaddr);
}
}