Message ID | 20210508032912.2693212-1-liushixin2@huawei.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [RFC] riscv: Enable KFENCE for riscv64 | expand |
On Sat, 8 May 2021 at 04:56, Liu Shixin <liushixin2@huawei.com> wrote: > > Add architecture specific implementation details for KFENCE and enable > KFENCE for the riscv64 architecture. In particular, this implements the > required interface in <asm/kfence.h>. Nice to see KFENCE on more architectures. > KFENCE requires that attributes for pages from its memory pool can > individually be set. Therefore, force the kfence pool to be mapped at > page granularity. > > I tested this patch using the testcases in kfence_test.c and all passed. > > Signed-off-by: Liu Shixin <liushixin2@huawei.com> [...] > diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h > new file mode 100644 > index 000000000000..590c5b7e3514 > --- /dev/null > +++ b/arch/riscv/include/asm/kfence.h > @@ -0,0 +1,51 @@ > +/* SPDX-License-Identifier: GPL-2.0 */ > + > +#ifndef _ASM_RISCV_KFENCE_H > +#define _ASM_RISCV_KFENCE_H > + > +#include <linux/pfn.h> > +#include <linux/slab.h> > +#include <linux/kfence.h> > +#include <asm/pgtable.h> > + > +static inline bool arch_kfence_init_pool(void) > +{ > + int i; > + unsigned long addr; > + pte_t *pte; > + pmd_t *pmd; > + > + for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); > + addr += PAGE_SIZE) { > + pte = virt_to_kpte(addr); > + pmd = pmd_off_k(addr); > + > + if (!pmd_leaf(*pmd) && pte_present(*pte)) > + continue; > + > + pte = kmalloc(PAGE_SIZE, GFP_ATOMIC); Using kmalloc() to allocate pte looks weird. Does riscv have helpers for allocating pte? Otherwise, __get_free_page() perhaps? > + for (i = 0; i < PTRS_PER_PTE; i++) > + set_pte(pte + i, pfn_pte(PFN_DOWN(__pa((addr & PMD_MASK) + i * PAGE_SIZE)), PAGE_KERNEL)); > + > + set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(pte)), PAGE_TABLE)); > + flush_tlb_kernel_range(addr, addr + PMD_SIZE); > + } > + > + return true; > +} > + > +static inline bool kfence_protect_page(unsigned long addr, bool protect) > +{ > + pte_t *pte = virt_to_kpte(addr); > + > + if (protect) > + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); > + else > + set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); > + > + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); > + > + return true; > +} [...]
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index c426e7d20907..000d8aba1030 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -64,6 +64,7 @@ config RISCV select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if MMU && 64BIT select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT + select HAVE_ARCH_KFENCE if MMU && 64BIT select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB_QXFER_PKT select HAVE_ARCH_MMAP_RND_BITS if MMU diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h new file mode 100644 index 000000000000..590c5b7e3514 --- /dev/null +++ b/arch/riscv/include/asm/kfence.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_KFENCE_H +#define _ASM_RISCV_KFENCE_H + +#include <linux/pfn.h> +#include <linux/slab.h> +#include <linux/kfence.h> +#include <asm/pgtable.h> + +static inline bool arch_kfence_init_pool(void) +{ + int i; + unsigned long addr; + pte_t *pte; + pmd_t *pmd; + + for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); + addr += PAGE_SIZE) { + pte = virt_to_kpte(addr); + pmd = pmd_off_k(addr); + + if (!pmd_leaf(*pmd) && pte_present(*pte)) + continue; + + pte = kmalloc(PAGE_SIZE, GFP_ATOMIC); + for (i = 0; i < PTRS_PER_PTE; i++) + set_pte(pte + i, pfn_pte(PFN_DOWN(__pa((addr & PMD_MASK) + i * PAGE_SIZE)), PAGE_KERNEL)); + + set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(pte)), PAGE_TABLE)); + flush_tlb_kernel_range(addr, addr + PMD_SIZE); + } + + return true; +} + +static inline bool kfence_protect_page(unsigned long addr, bool protect) +{ + pte_t *pte = virt_to_kpte(addr); + + if (protect) + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); + else + set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); + + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + + return true; +} + +#endif /* _ASM_RISCV_KFENCE_H */ diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 096463cc6fff..aa08dd2f8fae 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -14,6 +14,7 @@ #include <linux/signal.h> #include <linux/uaccess.h> #include <linux/kprobes.h> +#include <linux/kfence.h> #include <asm/ptrace.h> #include <asm/tlbflush.h> @@ -45,7 +46,15 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr) * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ - msg = (addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request"; + if (addr < PAGE_SIZE) + msg = "NULL pointer dereference"; + else { + if (kfence_handle_page_fault(addr, regs->cause == EXC_STORE_PAGE_FAULT, regs)) + return; + + msg = "paging request"; + } + die_kernel_fault(msg, addr, regs); }
Add architecture specific implementation details for KFENCE and enable KFENCE for the riscv64 architecture. In particular, this implements the required interface in <asm/kfence.h>. KFENCE requires that attributes for pages from its memory pool can individually be set. Therefore, force the kfence pool to be mapped at page granularity. I tested this patch using the testcases in kfence_test.c and all passed. Signed-off-by: Liu Shixin <liushixin2@huawei.com> --- arch/riscv/Kconfig | 1 + arch/riscv/include/asm/kfence.h | 51 +++++++++++++++++++++++++++++++++ arch/riscv/mm/fault.c | 11 ++++++- 3 files changed, 62 insertions(+), 1 deletion(-) create mode 100644 arch/riscv/include/asm/kfence.h