@@ -478,7 +478,7 @@ export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
-export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN
+export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN CFLAGS_KMSAN
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
@@ -898,6 +898,7 @@ KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
endif
include scripts/Makefile.kasan
+include scripts/Makefile.kmsan
include scripts/Makefile.extrawarn
include scripts/Makefile.ubsan
@@ -12,6 +12,7 @@
#undef CONFIG_PARAVIRT_XXL
#undef CONFIG_PARAVIRT_SPINLOCKS
#undef CONFIG_KASAN
+#undef CONFIG_KMSAN
/* cpu_feature_enabled() cannot be used this early */
#define USE_EARLY_PGTABLE_L5
@@ -35,6 +35,7 @@
#include <asm/asm.h>
#include <asm/smap.h>
#include <asm/pgtable_types.h>
+#include <asm/kmsan.h>
#include <asm/export.h>
#include <asm/frame.h>
#include <asm/nospec-branch.h>
@@ -168,12 +169,14 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
PUSH_AND_CLEAR_REGS rax=$-ENOSYS
TRACE_IRQS_OFF
+ KMSAN_SYSCALL_ENTER
/* IRQs are off. */
movq %rax, %rdi
movq %rsp, %rsi
call do_syscall_64 /* returns with IRQs disabled */
+ KMSAN_SYSCALL_EXIT
TRACE_IRQS_IRETQ /* we're about to change IF */
/*
@@ -575,6 +578,7 @@ ENTRY(interrupt_entry)
1:
ENTER_IRQ_STACK old_rsp=%rdi save_ret=1
+ KMSAN_INTERRUPT_ENTER
/* We entered an interrupt context - irqs are off: */
TRACE_IRQS_OFF
@@ -604,12 +608,14 @@ common_interrupt:
addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
call interrupt_entry
UNWIND_HINT_REGS indirect=1
+ KMSAN_UNPOISON_PT_REGS
call do_IRQ /* rdi points to pt_regs */
/* 0(%rsp): old RSP */
ret_from_intr:
DISABLE_INTERRUPTS(CLBR_ANY)
TRACE_IRQS_OFF
+ KMSAN_INTERRUPT_EXIT
LEAVE_IRQ_STACK
testb $3, CS(%rsp)
@@ -802,6 +808,7 @@ ENTRY(\sym)
.Lcommon_\sym:
call interrupt_entry
UNWIND_HINT_REGS indirect=1
+ KMSAN_UNPOISON_PT_REGS
call \do_sym /* rdi points to pt_regs */
jmp ret_from_intr
END(\sym)
@@ -909,15 +916,18 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
.if \shift_ist != -1
subq $\ist_offset, CPU_TSS_IST(\shift_ist)
+ KMSAN_IST_ENTER(\shift_ist)
.endif
.if \read_cr2
movq %r12, %rdx /* Move CR2 into 3rd argument */
.endif
+ KMSAN_UNPOISON_PT_REGS
call \do_sym
.if \shift_ist != -1
+ KMSAN_IST_EXIT(\shift_ist)
addq $\ist_offset, CPU_TSS_IST(\shift_ist)
.endif
@@ -1079,7 +1089,9 @@ ENTRY(do_softirq_own_stack)
pushq %rbp
mov %rsp, %rbp
ENTER_IRQ_STACK regs=0 old_rsp=%r11
+ KMSAN_SOFTIRQ_ENTER
call __do_softirq
+ KMSAN_SOFTIRQ_EXIT
LEAVE_IRQ_STACK regs=0
leaveq
ret
@@ -1466,9 +1478,12 @@ ENTRY(nmi)
* done with the NMI stack.
*/
+ KMSAN_NMI_ENTER
movq %rsp, %rdi
movq $-1, %rsi
+ KMSAN_UNPOISON_PT_REGS
call do_nmi
+ KMSAN_NMI_EXIT
/*
* Return back to user mode. We must *not* do the normal exit
@@ -1678,10 +1693,13 @@ end_repeat_nmi:
call paranoid_entry
UNWIND_HINT_REGS
+ KMSAN_NMI_ENTER
/* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
movq %rsp, %rdi
movq $-1, %rsi
+ KMSAN_UNPOISON_PT_REGS
call do_nmi
+ KMSAN_NMI_EXIT
/* Always restore stashed CR3 value (see paranoid_entry) */
RESTORE_CR3 scratch_reg=%r15 save_reg=%r14
@@ -7,18 +7,20 @@
#endif
#ifdef CONFIG_KASAN
-#define KASAN_STACK_ORDER 1
+#define EXTRA_STACK_ORDER 1
+#elif defined(CONFIG_KMSAN)
+#define EXTRA_STACK_ORDER 2
#else
-#define KASAN_STACK_ORDER 0
+#define EXTRA_STACK_ORDER 0
#endif
-#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER)
+#define THREAD_SIZE_ORDER (2 + EXTRA_STACK_ORDER)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
-#define EXCEPTION_STACK_ORDER (0 + KASAN_STACK_ORDER)
+#define EXCEPTION_STACK_ORDER (0 + EXTRA_STACK_ORDER)
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
-#define IRQ_STACK_ORDER (2 + KASAN_STACK_ORDER)
+#define IRQ_STACK_ORDER (2 + EXTRA_STACK_ORDER)
#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
/*
@@ -78,6 +78,13 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
+#ifdef CONFIG_KMSAN
+ /*
+ * Merging consequent physical pages may not work correctly under KMSAN
+ * if their metadata pages aren't consequent. Just disable merging.
+ */
+ return false;
+#endif
if (addr1 + vec1->bv_len != addr2)
return false;
if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
@@ -216,6 +216,15 @@ struct page {
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
+#ifdef CONFIG_KMSAN
+ /*
+ * Bits in struct page are scarce, so the LSB in *shadow is used to
+ * indicate whether the page should be ignored by KMSAN or not.
+ */
+ struct page *shadow;
+ struct page *origin;
+#endif
+
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
int _last_cpupid;
#endif
@@ -15,6 +15,7 @@
#include <linux/sem.h>
#include <linux/shm.h>
#include <linux/kcov.h>
+#include <linux/kmsan.h>
#include <linux/mutex.h>
#include <linux/plist.h>
#include <linux/hrtimer.h>
@@ -1172,6 +1173,10 @@ struct task_struct {
unsigned int kasan_depth;
#endif
+#ifdef CONFIG_KMSAN
+ struct kmsan_task_state kmsan;
+#endif
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack: */
int curr_ret_stack;
@@ -32,6 +32,7 @@
#include <linux/nmi.h>
#include <linux/percpu.h>
#include <linux/kmod.h>
+#include <linux/kmsan.h>
#include <linux/vmalloc.h>
#include <linux/kernel_stat.h>
#include <linux/start_kernel.h>
@@ -554,6 +555,7 @@ static void __init mm_init(void)
*/
page_ext_init_flatmem();
report_meminit();
+ kmsan_initialize_shadow();
mem_init();
kmem_cache_init();
kmemleak_init();
@@ -625,6 +627,7 @@ asmlinkage __visible void __init start_kernel(void)
sort_main_extable();
trap_init();
mm_init();
+ kmsan_initialize();
ftrace_init();
@@ -233,8 +233,8 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
if (parts.slabindex > depot_index) {
WARN(1, "slab index %d out of bounds (%d) for stack id %08x\n",
parts.slabindex, depot_index, handle);
- __memset(trace, 0, sizeof(*trace));
- return;
+ *entries = NULL;
+ return 0;
}
slab = stack_slabs[parts.slabindex];
stack = slab + offset;
@@ -299,7 +299,8 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
* contexts and I/O.
*/
alloc_flags &= ~GFP_ZONEMASK;
- alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
+ alloc_flags &= (GFP_ATOMIC | GFP_KERNEL |
+ __GFP_NO_KMSAN_SHADOW);
alloc_flags |= __GFP_NOWARN;
page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
if (page)
@@ -71,6 +71,7 @@ obj-$(CONFIG_PAGE_POISONING) += page_poison.o
obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o
obj-$(CONFIG_KASAN) += kasan/
+obj-$(CONFIG_KMSAN) += kmsan/
obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_MEMTEST) += memtest.o
@@ -140,6 +140,12 @@ _c_flags += $(if $(patsubst n%,, \
$(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE))
endif
+ifeq ($(CONFIG_KMSAN),y)
+_c_flags += $(if $(patsubst n%,, \
+ $(KMSAN_SANITIZE_$(basetarget).o)$(KMSAN_SANITIZE)y), \
+ $(CFLAGS_KMSAN))
+endif
+
ifeq ($(CONFIG_UBSAN),y)
_c_flags += $(if $(patsubst n%,, \
$(UBSAN_SANITIZE_$(basetarget).o)$(UBSAN_SANITIZE)$(CONFIG_UBSAN_SANITIZE_ALL)), \
Do the following to make KMSAN usable: - add KMSAN declarations to struct page and struct task_struct; - add Makefile.kmsan to top-level Makefile; - call KMSAN initialization from init/main.c - add asm hooks to arch/x86/entry/entry_64.S; - increase task stack size under KMSAN; - disable page merging in block/blk.h; - disable CONFIG_KMSAN in arch/x86/boot/compressed/misc.h to use default string functions instead of KMSAN ones. Signed-off-by: Alexander Potapenko <glider@google.com> To: Alexander Potapenko <glider@google.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Andy Lutomirski <luto@kernel.org> Cc: Vegard Nossum <vegard.nossum@oracle.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: linux-mm@kvack.org --- Change-Id: I37e0b7f2d2f2b0aeac5753ff9d6b411485fc374e --- Makefile | 3 ++- arch/x86/boot/compressed/misc.h | 1 + arch/x86/entry/entry_64.S | 18 ++++++++++++++++++ arch/x86/include/asm/page_64_types.h | 12 +++++++----- block/blk.h | 7 +++++++ include/linux/mm_types.h | 9 +++++++++ include/linux/sched.h | 5 +++++ init/main.c | 3 +++ lib/stackdepot.c | 7 ++++--- mm/Makefile | 1 + scripts/Makefile.lib | 6 ++++++ 11 files changed, 63 insertions(+), 9 deletions(-)