@@ -320,7 +320,17 @@ static void dump_pagetable(unsigned long address)
void vmalloc_sync_all(void)
{
+#ifdef CONFIG_KMSAN
+ /*
+ * For KMSAN, make sure metadata pages for vmalloc area and modules are
+ * also synced.
+ */
+ sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_META_END);
+ sync_global_pgds(MODULES_SHADOW_START & PGDIR_MASK,
+ MODULES_ORIGIN_END);
+#else
sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
+#endif
}
/*
@@ -337,7 +347,17 @@ static noinline int vmalloc_fault(unsigned long address)
pte_t *pte;
/* Make sure we are in vmalloc area: */
+#ifdef CONFIG_KMSAN
+ /*
+ * For KMSAN, make sure metadata pages for vmalloc area and modules are
+ * also synced.
+ */
+ if (!(address >= VMALLOC_START && address < VMALLOC_META_END) &&
+ !(address >= MODULES_SHADOW_START &&
+ address < MODULES_ORIGIN_END))
+#else
if (!(address >= VMALLOC_START && address < VMALLOC_END))
+#endif
return -1;
/*
KMSAN assumes shadow and origin pages for every allocated page are accessible. For pages in vmalloc region those metadata pages reside in [VMALLOC_END, VMALLOC_META_END), therefore we must sync a bigger memory region. Signed-off-by: Alexander Potapenko <glider@google.com> To: Alexander Potapenko <glider@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Vegard Nossum <vegard.nossum@oracle.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: linux-mm@kvack.org --- Change-Id: I0d54855489870ef1180b37fe2120b601da464bf7 --- arch/x86/mm/fault.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+)