@@ -172,21 +172,10 @@ void __init kasan_init(void)
phys_addr_t p_start, p_end;
u64 i;
- /*
- * Populate all kernel virtual address space with kasan_early_shadow_page
- * except for the linear mapping and the modules/kernel/BPF mapping.
- */
- kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
- (void *)kasan_mem_to_shadow((void *)
- VMEMMAP_END));
if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
kasan_shallow_populate(
(void *)kasan_mem_to_shadow((void *)VMALLOC_START),
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
- else
- kasan_populate_early_shadow(
- (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
- (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
/* Populate the linear mapping */
for_each_mem_range(i, &p_start, &p_end) {
When calling this function, all the shadow memory is already populated with kasan_early_shadow_pte which has PAGE_KERNEL protection. kasan_populate_early_shadow write-protects the mapping of the range of addresses passed in argument in zero_pte_populate, which actually write-protects all the shadow memory mapping since kasan_early_shadow_pte is used for all the shadow memory at this point. And then when using memblock API to populate the shadow memory, the first write access to the kernel stack triggers a trap. This becomes visible with the next commit that contains a fix for asan-stack. We already manually populate all the shadow memory in kasan_early_init and we write-protect kasan_early_shadow_pte at the end of kasan_init which makes the calls to kasan_populate_early_shadow superfluous so we can remove them. Signed-off-by: Alexandre Ghiti <alexandre.ghiti@canonical.com> --- arch/riscv/mm/kasan_init.c | 11 ----------- 1 file changed, 11 deletions(-)