@@ -22,8 +22,8 @@ architectures, but it has significant performance and memory overheads.
Software Tag-Based KASAN or SW_TAGS KASAN, enabled with CONFIG_KASAN_SW_TAGS,
can be used for both debugging and dogfood testing, similar to userspace HWASan.
-This mode is only supported for arm64, but its moderate memory overhead allows
-using it for testing on memory-restricted devices with real workloads.
+This mode is only supported on arm64 and riscv, but its moderate memory overhead
+allows using it for testing on memory-restricted devices with real workloads.
Hardware Tag-Based KASAN or HW_TAGS KASAN, enabled with CONFIG_KASAN_HW_TAGS,
is the mode intended to be used as an in-field memory bug detector or as a
@@ -340,12 +340,14 @@ Software Tag-Based KASAN
~~~~~~~~~~~~~~~~~~~~~~~~
Software Tag-Based KASAN uses a software memory tagging approach to checking
-access validity. It is currently only implemented for the arm64 architecture.
+access validity. It is currently only implemented for the arm64 and riscv
+architectures.
Software Tag-Based KASAN uses the Top Byte Ignore (TBI) feature of arm64 CPUs
-to store a pointer tag in the top byte of kernel pointers. It uses shadow memory
-to store memory tags associated with each 16-byte memory cell (therefore, it
-dedicates 1/16th of the kernel memory for shadow memory).
+or the pointer masking (Sspm) feature of RISC-V CPUs to store a pointer tag in
+the top byte of kernel pointers. It uses shadow memory to store memory tags
+associated with each 16-byte memory cell (therefore, it dedicates 1/16th of the
+kernel memory for shadow memory).
On each memory allocation, Software Tag-Based KASAN generates a random tag, tags
the allocated memory with this tag, and embeds the same tag into the returned
@@ -117,6 +117,7 @@ config RISCV
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
select HAVE_ARCH_KASAN if MMU && 64BIT
+ select HAVE_ARCH_KASAN_SW_TAGS if MMU && 64BIT
select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
select HAVE_ARCH_KFENCE if MMU && 64BIT
select HAVE_ARCH_KGDB if !XIP_KERNEL
@@ -277,7 +278,8 @@ config PAGE_OFFSET
config KASAN_SHADOW_OFFSET
hex
- depends on KASAN_GENERIC
+ depends on KASAN
+ default 0xffffffff00000000 if KASAN_SW_TAGS
default 0xdfffffff00000000 if 64BIT
default 0xffffffff if 32BIT
@@ -16,6 +16,10 @@
#define ARCH_KMALLOC_MINALIGN (8)
#endif
+#ifdef CONFIG_KASAN_SW_TAGS
+#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)
+#endif
+
/*
* RISC-V requires the stack pointer to be 16-byte aligned, so ensure that
* the flat loader aligns it accordingly.
@@ -25,7 +25,11 @@
* KASAN_SHADOW_OFFSET = KASAN_SHADOW_END -
* (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
*/
+#if defined(CONFIG_KASAN_GENERIC)
#define KASAN_SHADOW_SCALE_SHIFT 3
+#elif defined(CONFIG_KASAN_SW_TAGS)
+#define KASAN_SHADOW_SCALE_SHIFT 4
+#endif
#define KASAN_SHADOW_SIZE (UL(1) << ((VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT))
/*
@@ -37,6 +41,14 @@
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+#ifdef CONFIG_KASAN_SW_TAGS
+#define KASAN_TAG_KERNEL 0x7f /* native kernel pointers tag */
+#endif
+
+#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)
+#define arch_kasan_reset_tag(addr) __tag_reset(addr)
+#define arch_kasan_get_tag(addr) __tag_get(addr)
+
void kasan_init(void);
asmlinkage void kasan_early_init(void);
void kasan_swapper_init(void);
@@ -48,5 +60,13 @@ void kasan_swapper_init(void);
#endif /* CONFIG_KASAN */
+#ifdef CONFIG_KASAN_SW_TAGS
+bool kasan_boot_cpu_enabled(void);
+int kasan_cpu_enable(void);
+#else
+static inline bool kasan_boot_cpu_enabled(void) { return false; }
+static inline int kasan_cpu_enable(void) { return 0; }
+#endif
+
#endif
#endif /* __ASM_KASAN_H */
@@ -89,6 +89,16 @@ typedef struct page *pgtable_t;
#define PTE_FMT "%08lx"
#endif
+#ifdef CONFIG_KASAN_SW_TAGS
+#define __tag_set(addr, tag) ((void *)((((u64)(addr) << 7) >> 7) | ((u64)(tag) << 57)))
+#define __tag_reset(addr) ((void *)((s64)((u64)(addr) << 7) >> 7))
+#define __tag_get(addr) ((u8)((u64)(addr) >> 57))
+#else
+#define __tag_set(addr, tag) (addr)
+#define __tag_reset(addr) (addr)
+#define __tag_get(addr) 0
+#endif
+
#if defined(CONFIG_64BIT) && defined(CONFIG_MMU)
/*
* We override this value as its generic definition uses __pa too early in
@@ -155,7 +165,7 @@ phys_addr_t linear_mapping_va_to_pa(unsigned long x);
})
#define __va_to_pa_nodebug(x) ({ \
- unsigned long _x = x; \
+ unsigned long _x = (unsigned long)__tag_reset(x); \
is_linear_mapping(_x) ? \
linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
})
@@ -179,7 +189,10 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn)))
#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
-#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
+#define page_to_virt(page) ({ \
+ __typeof__(page) __page = page; \
+ __tag_set(pfn_to_virt(page_to_pfn(__page)), page_kasan_tag(__page)); \
+})
#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
@@ -196,7 +209,7 @@ static __always_inline void *pfn_to_kaddr(unsigned long pfn)
#endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) ({ \
- unsigned long _addr = (unsigned long)vaddr; \
+ unsigned long _addr = (unsigned long)__tag_reset(vaddr); \
(unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
})
@@ -910,7 +910,13 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
*/
#ifdef CONFIG_64BIT
#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
+/*
+ * When pointer masking is enabled for the kernel's privilege mode,
+ * __access_ok() must reject tagged aliases of kernel memory.
+ */
+#ifndef CONFIG_KASAN_SW_TAGS
#define TASK_SIZE_MAX LONG_MAX
+#endif
#ifdef CONFIG_COMPAT
#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
@@ -31,14 +31,14 @@ static inline void local_flush_tlb_all_asid(unsigned long asid)
/* Flush one page from local TLB */
static inline void local_flush_tlb_page(unsigned long addr)
{
- ALT_SFENCE_VMA_ADDR(addr);
+ ALT_SFENCE_VMA_ADDR(__tag_reset(addr));
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
if (asid != FLUSH_TLB_NO_ASID)
- ALT_SFENCE_VMA_ADDR_ASID(addr, asid);
+ ALT_SFENCE_VMA_ADDR_ASID(__tag_reset(addr), asid);
else
local_flush_tlb_page(addr);
}
@@ -299,6 +299,12 @@ void __init setup_arch(char **cmdline_p)
riscv_user_isa_enable();
}
+void __init smp_prepare_boot_cpu(void)
+{
+ if (kasan_boot_cpu_enabled())
+ kasan_init_sw_tags();
+}
+
bool arch_cpu_is_hotpluggable(int cpu)
{
return cpu_has_hotplug(cpu);
@@ -29,6 +29,7 @@
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
#include <asm/irq.h>
+#include <asm/kasan.h>
#include <asm/mmu_context.h>
#include <asm/numa.h>
#include <asm/tlbflush.h>
@@ -210,7 +211,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
asmlinkage __visible void smp_callin(void)
{
struct mm_struct *mm = &init_mm;
- unsigned int curr_cpuid = smp_processor_id();
+ unsigned int curr_cpuid;
+
+ /* Must be called first, before referencing any dynamic allocations */
+ if (kasan_boot_cpu_enabled() && kasan_cpu_enable())
+ return;
if (has_vector()) {
/*
@@ -225,6 +230,7 @@ asmlinkage __visible void smp_callin(void)
mmgrab(mm);
current->active_mm = mm;
+ curr_cpuid = smp_processor_id();
store_cpu_topology(curr_cpuid);
notify_cpu_starting(curr_cpuid);
@@ -20,3 +20,5 @@ lib-$(CONFIG_RISCV_ISA_ZBC) += crc32.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_RISCV_ISA_V) += xor.o
lib-$(CONFIG_RISCV_ISA_V) += riscv_v_helpers.o
+
+obj-$(CONFIG_KASAN_SW_TAGS) += kasan_sw_tags.o
new file mode 100644
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google LLC
+ * Copyright (C) 2024 SiFive
+ */
+
+#include <linux/linkage.h>
+
+/*
+ * Report a tag mismatch detected by tag-based KASAN.
+ *
+ * A compiler-generated thunk calls this with a custom calling convention.
+ * Upon entry to this function, the following registers have been modified:
+ *
+ * x1/ra: clobbered by call to this function
+ * x2/sp: decremented by 256
+ * x6/t1: tag from shadow memory
+ * x7/t2: tag from pointer
+ * x10/a0: fault address
+ * x11/a1: fault description
+ * x28/t3: clobbered by thunk
+ * x29/t4: clobbered by thunk
+ * x30/t5: clobbered by thunk
+ * x31/t6: clobbered by thunk
+ *
+ * The caller has decremented the SP by 256 bytes, and stored the following
+ * registers in slots on the stack according to their number (sp + 8 * xN):
+ *
+ * x1/ra: return address to user code
+ * x8/s0/fp: saved value from user code
+ * x10/a0: saved value from user code
+ * x11/a1: saved value from user code
+ */
+SYM_CODE_START(__hwasan_tag_mismatch)
+ /* Store the remaining unclobbered caller-saved regs */
+ sd t0, (8 * 5)(sp)
+ sd a2, (8 * 12)(sp)
+ sd a3, (8 * 13)(sp)
+ sd a4, (8 * 14)(sp)
+ sd a5, (8 * 15)(sp)
+ sd a6, (8 * 16)(sp)
+ sd a7, (8 * 17)(sp)
+
+ /* a0 and a1 are already set by the thunk */
+ ld a2, (8 * 1)(sp)
+ call kasan_tag_mismatch
+
+ ld ra, (8 * 1)(sp)
+ ld t0, (8 * 5)(sp)
+ ld a0, (8 * 10)(sp)
+ ld a1, (8 * 11)(sp)
+ ld a2, (8 * 12)(sp)
+ ld a3, (8 * 13)(sp)
+ ld a4, (8 * 14)(sp)
+ ld a5, (8 * 15)(sp)
+ ld a6, (8 * 16)(sp)
+ ld a7, (8 * 17)(sp)
+ addi sp, sp, 256
+ ret
+SYM_CODE_END(__hwasan_tag_mismatch)
+EXPORT_SYMBOL(__hwasan_tag_mismatch)
@@ -11,6 +11,10 @@
#include <asm/fixmap.h>
#include <asm/pgalloc.h>
+#ifdef CONFIG_KASAN_SW_TAGS
+static bool __kasan_boot_cpu_enabled __ro_after_init;
+#endif
+
/*
* Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
* which is right before the kernel.
@@ -323,8 +327,11 @@ asmlinkage void __init kasan_early_init(void)
{
uintptr_t i;
- BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
- KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC))
+ BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
+ KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
+ else
+ BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END);
for (i = 0; i < PTRS_PER_PTE; ++i)
set_pte(kasan_early_shadow_pte + i,
@@ -356,6 +363,8 @@ asmlinkage void __init kasan_early_init(void)
KASAN_SHADOW_START, KASAN_SHADOW_END);
local_flush_tlb_all();
+
+ __kasan_boot_cpu_enabled = !kasan_cpu_enable();
}
void __init kasan_swapper_init(void)
@@ -534,3 +543,20 @@ void __init kasan_init(void)
csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | satp_mode);
local_flush_tlb_all();
}
+
+#ifdef CONFIG_KASAN_SW_TAGS
+bool kasan_boot_cpu_enabled(void)
+{
+ return __kasan_boot_cpu_enabled;
+}
+
+int kasan_cpu_enable(void)
+{
+ struct sbiret ret;
+
+ /* sbi_fwft_set(POINTER_MASKING_PMLEN, 7, 0); */
+ ret = sbi_ecall(0x46574654, 0, 5, 7, 0, 0, 0, 0);
+
+ return sbi_err_map_linux_errno(ret.error);
+}
+#endif
@@ -8,6 +8,8 @@
phys_addr_t __virt_to_phys(unsigned long x)
{
+ x = __tag_reset(x);
+
/*
* Boundary checking aginst the kernel linear mapping space.
*/
@@ -24,6 +26,8 @@ phys_addr_t __phys_addr_symbol(unsigned long x)
unsigned long kernel_start = kernel_map.virt_addr;
unsigned long kernel_end = kernel_start + kernel_map.size;
+ x = __tag_reset(x);
+
/*
* Boundary checking aginst the kernel image mapping.
* __pa_symbol should only be used on kernel symbol addresses.
Implement support for software tag-based KASAN using the RISC-V pointer masking extension, which supports 7 and/or 16-bit tags. This implemen- tation uses 7-bit tags, so it is compatible with either hardware mode. Pointer masking is an optional ISA extension, and it must be enabled using an SBI call to firmware on each CPU. This SBI call must be made very early in smp_callin(), as dereferencing any tagged pointers before that point will crash the kernel. If the SBI call fails on the boot CPU, then KASAN is globally disabled, and the kernel boots normally (unless stack tagging is enabled). If the SBI call fails on any other CPU, that CPU is excluded from the system. When pointer masking is enabled for the kernel's privilege mode, it must be more careful about accepting tagged pointers from userspace. Normally, __access_ok() accepts tagged aliases of kernel memory as long as the MSB is zero, since those addresses cannot be dereferenced -- they will cause a page fault in the uaccess routines. But when the kernel is using pointer masking, those addresses are dereferenceable, so __access_ok() must specifically check the most-significant non-tag bit. Pointer masking does not apply to the operands of fence instructions, so software is responsible for untagging those addresses. Signed-off-by: Samuel Holland <samuel.holland@sifive.com> --- Documentation/dev-tools/kasan.rst | 14 ++++--- arch/riscv/Kconfig | 4 +- arch/riscv/include/asm/cache.h | 4 ++ arch/riscv/include/asm/kasan.h | 20 ++++++++++ arch/riscv/include/asm/page.h | 19 ++++++++-- arch/riscv/include/asm/pgtable.h | 6 +++ arch/riscv/include/asm/tlbflush.h | 4 +- arch/riscv/kernel/setup.c | 6 +++ arch/riscv/kernel/smpboot.c | 8 +++- arch/riscv/lib/Makefile | 2 + arch/riscv/lib/kasan_sw_tags.S | 61 +++++++++++++++++++++++++++++++ arch/riscv/mm/kasan_init.c | 30 ++++++++++++++- arch/riscv/mm/physaddr.c | 4 ++ 13 files changed, 167 insertions(+), 15 deletions(-) create mode 100644 arch/riscv/lib/kasan_sw_tags.S