@@ -44,14 +44,27 @@ void clear_page_orig(void *page);
void clear_page_rep(void *page);
void clear_page_erms(void *page);
+/* This is an assembly header, avoid including too much of kmsan.h */
+#ifdef CONFIG_KMSAN
+void kmsan_unpoison_shadow(const void *addr, size_t size);
+#endif
+__no_sanitize_memory
static inline void clear_page(void *page)
{
+#ifdef CONFIG_KMSAN
+ /* alternative_call_2() changes |page|. */
+ void *page_copy = page;
+#endif
alternative_call_2(clear_page_orig,
clear_page_rep, X86_FEATURE_REP_GOOD,
clear_page_erms, X86_FEATURE_ERMS,
"=D" (page),
"0" (page)
: "cc", "memory", "rax", "rcx");
+#ifdef CONFIG_KMSAN
+ /* Clear KMSAN shadow for the pages that have it. */
+ kmsan_unpoison_shadow(page_copy, PAGE_SIZE);
+#endif
}
void copy_page(void *to, void *from);
@@ -7,6 +7,7 @@
* (C) Copyright 1995 1996 Linus Torvalds
*/
+#include <linux/kmsan.h>
#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/io.h>
@@ -469,6 +470,8 @@ void iounmap(volatile void __iomem *addr)
return;
}
+ kmsan_iounmap_page_range((unsigned long)addr,
+ (unsigned long)addr + get_vm_area_size(p));
memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
/* Finally remove it */
@@ -5,6 +5,7 @@
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/bug.h>
+#include <linux/kmsan.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
@@ -255,6 +256,7 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
vfrom = kmap_atomic(from);
vto = kmap_atomic(to);
copy_user_page(vto, vfrom, vaddr, to);
+ kmsan_unpoison_shadow(page_address(to), PAGE_SIZE);
kunmap_atomic(vto);
kunmap_atomic(vfrom);
}
@@ -270,6 +272,7 @@ static inline void copy_highpage(struct page *to, struct page *from)
vfrom = kmap_atomic(from);
vto = kmap_atomic(to);
copy_page(vto, vfrom);
+ kmsan_copy_page_meta(to, from);
kunmap_atomic(vto);
kunmap_atomic(vfrom);
}
@@ -6,6 +6,7 @@
*
* (C) Copyright 1995 1996 Linus Torvalds
*/
+#include <linux/kmsan.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/sched.h>
@@ -214,6 +215,8 @@ int ioremap_page_range(unsigned long addr,
unsigned long start;
unsigned long next;
int err;
+ unsigned long old_addr = addr;
+ phys_addr_t old_phys_addr = phys_addr;
might_sleep();
BUG_ON(addr >= end);
@@ -228,6 +231,8 @@ int ioremap_page_range(unsigned long addr,
} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
flush_cache_vmap(start, end);
+ if (!err)
+ kmsan_ioremap_page_range(old_addr, end, old_phys_addr, prot);
return err;
}
@@ -4,6 +4,7 @@
#include <linux/err.h>
#include <linux/spinlock.h>
+#include <linux/kmsan.h>
#include <linux/mm.h>
#include <linux/memremap.h>
#include <linux/pagemap.h>
@@ -2710,6 +2711,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
gup_fast_permitted(start, end)) {
local_irq_save(flags);
gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
+ kmsan_gup_pgd_range(pages, nr_pinned);
local_irq_restore(flags);
}
@@ -2765,6 +2767,7 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages,
gup_fast_permitted(start, end)) {
local_irq_disable();
gup_pgd_range(addr, end, gup_flags, pages, &nr_pinned);
+ kmsan_gup_pgd_range(pages, nr_pinned);
local_irq_enable();
ret = nr_pinned;
}
@@ -51,6 +51,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/memremap.h>
+#include <linux/kmsan.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/export.h>
@@ -2676,6 +2677,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
put_page(old_page);
return 0;
}
+ kmsan_copy_page_meta(new_page, old_page);
}
if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
@@ -26,6 +26,8 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/kasan.h>
+#include <linux/kmsan.h>
+#include <linux/kmsan-checks.h>
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
@@ -1178,6 +1180,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
VM_BUG_ON_PAGE(PageTail(page), page);
trace_mm_page_free(page, order);
+ kmsan_free_page(page, order);
/*
* Check tail pages before head page information is cleared to
@@ -3199,6 +3202,7 @@ void split_page(struct page *page, unsigned int order)
VM_BUG_ON_PAGE(PageCompound(page), page);
VM_BUG_ON_PAGE(!page_count(page), page);
+ kmsan_split_page(page, order);
for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i);
split_page_owner(page, order);
@@ -3349,6 +3353,14 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
/*
* Allocate a page from the given zone. Use pcplists for order-0 allocations.
*/
+
+/*
+ * Do not instrument rmqueue() with KMSAN. This function may call
+ * __msan_poison_alloca() through a call to set_pfnblock_flags_mask().
+ * If __msan_poison_alloca() attempts to allocate pages for the stack depot, it
+ * may call rmqueue() again, which will result in a deadlock.
+ */
+__no_sanitize_memory
static inline
struct page *rmqueue(struct zone *preferred_zone,
struct zone *zone, unsigned int order,
@@ -4862,6 +4874,11 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
+ if (page)
+ if (kmsan_alloc_page(page, order, gfp_mask)) {
+ __free_pages(page, order);
+ page = NULL;
+ }
return page;
}
EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -29,6 +29,7 @@
#include <linux/rcupdate.h>
#include <linux/pfn.h>
#include <linux/kmemleak.h>
+#include <linux/kmsan.h>
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/llist.h>
@@ -127,7 +128,8 @@ static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
} while (p4d++, addr = next, addr != end);
}
-static void vunmap_page_range(unsigned long addr, unsigned long end)
+/* Exported for KMSAN, visible in mm/kmsan/kmsan.h only. */
+void __vunmap_page_range(unsigned long addr, unsigned long end)
{
pgd_t *pgd;
unsigned long next;
@@ -141,6 +143,13 @@ static void vunmap_page_range(unsigned long addr, unsigned long end)
vunmap_p4d_range(pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
+EXPORT_SYMBOL(__vunmap_page_range);
+
+static void vunmap_page_range(unsigned long addr, unsigned long end)
+{
+ kmsan_vunmap_page_range(addr, end);
+ __vunmap_page_range(addr, end);
+}
static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, pgprot_t prot, struct page **pages, int *nr)
@@ -224,8 +233,11 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
* will have pfns corresponding to the "pages" array.
*
* Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
+ *
+ * This function is exported for use in KMSAN, but is only declared in KMSAN
+ * headers.
*/
-static int vmap_page_range_noflush(unsigned long start, unsigned long end,
+int __vmap_page_range_noflush(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages)
{
pgd_t *pgd;
@@ -245,6 +257,14 @@ static int vmap_page_range_noflush(unsigned long start, unsigned long end,
return nr;
}
+EXPORT_SYMBOL(__vmap_page_range_noflush);
+
+static int vmap_page_range_noflush(unsigned long start, unsigned long end,
+ pgprot_t prot, struct page **pages)
+{
+ kmsan_vmap_page_range_noflush(start, end, prot, pages);
+ return __vmap_page_range_noflush(start, end, prot, pages);
+}
static int vmap_page_range(unsigned long start, unsigned long end,
pgprot_t prot, struct page **pages)
Insert KMSAN hooks that make the necessary bookkeeping changes: - allocate/split/deallocate metadata pages in alloc_pages()/split_page()/free_page(); - clear page shadow and origins in clear_page(), copy_user_highpage(); - copy page metadata in copy_highpage(), wp_page_copy(); - handle vmap()/vunmap()/iounmap(); Signed-off-by: Alexander Potapenko <glider@google.com> To: Alexander Potapenko <glider@google.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Eric Dumazet <edumazet@google.com> Cc: Wolfram Sang <wsa@the-dreams.de> Cc: Petr Mladek <pmladek@suse.com> Cc: Vegard Nossum <vegard.nossum@oracle.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Marco Elver <elver@google.com> Cc: Andrey Konovalov <andreyknvl@google.com> Cc: linux-mm@kvack.org --- This patch was previously called "kmsan: call KMSAN hooks where needed" v2: - dropped call to kmsan_handle_vprintk, updated comment in printk.c v3: - put KMSAN_INIT_VALUE on a separate line in vprintk_store() - dropped call to kmsan_handle_i2c_transfer() - minor style fixes v4: - split mm-unrelated bits to other patches as requested by Andrey Konovalov - dropped changes to mm/compaction.c - use kmsan_unpoison_shadow in page_64.h and highmem.h Change-Id: I1250a928d9263bf71fdaa067a070bdee686ef47b --- arch/x86/include/asm/page_64.h | 13 +++++++++++++ arch/x86/mm/ioremap.c | 3 +++ include/linux/highmem.h | 3 +++ lib/ioremap.c | 5 +++++ mm/gup.c | 3 +++ mm/memory.c | 2 ++ mm/page_alloc.c | 17 +++++++++++++++++ mm/vmalloc.c | 24 ++++++++++++++++++++++-- 8 files changed, 68 insertions(+), 2 deletions(-)