Message ID | 20200217083223.2011-2-zong.li@sifive.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Support strict kernel memory permissions for security | expand |
On Mon, 17 Feb 2020 00:32:16 PST (-0800), zong.li@sifive.com wrote: > Add set_memory_ro/rw/x/nx architecture hooks to change the page > attribution. > > Use own set_memory.h rather than generic set_memory.h > (i.e. include/asm-generic/set_memory.h), because we want to add other > function prototypes here. > > Signed-off-by: Zong Li <zong.li@sifive.com> > --- > arch/riscv/Kconfig | 1 + > arch/riscv/include/asm/set_memory.h | 17 ++++ > arch/riscv/mm/Makefile | 1 + > arch/riscv/mm/pageattr.c | 150 ++++++++++++++++++++++++++++ > 4 files changed, 169 insertions(+) > create mode 100644 arch/riscv/include/asm/set_memory.h > create mode 100644 arch/riscv/mm/pageattr.c > > diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig > index 6e81da55b5e4..76ed36543b3a 100644 > --- a/arch/riscv/Kconfig > +++ b/arch/riscv/Kconfig > @@ -60,6 +60,7 @@ config RISCV > select HAVE_EBPF_JIT if 64BIT > select EDAC_SUPPORT > select ARCH_HAS_GIGANTIC_PAGE > + select ARCH_HAS_SET_MEMORY > select ARCH_WANT_HUGE_PMD_SHARE if 64BIT > select SPARSEMEM_STATIC if 32BIT > select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU > diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h > new file mode 100644 > index 000000000000..936f08063566 > --- /dev/null > +++ b/arch/riscv/include/asm/set_memory.h > @@ -0,0 +1,17 @@ > +/* SPDX-License-Identifier: GPL-2.0-only */ > +/* > + * Copyright (C) 2019 SiFive > + */ > + > +#ifndef _ASM_RISCV_SET_MEMORY_H > +#define _ASM_RISCV_SET_MEMORY_H > + > +/* > + * Functions to change memory attributes. > + */ > +int set_memory_ro(unsigned long addr, int numpages); > +int set_memory_rw(unsigned long addr, int numpages); > +int set_memory_x(unsigned long addr, int numpages); > +int set_memory_nx(unsigned long addr, int numpages); > + > +#endif /* _ASM_RISCV_SET_MEMORY_H */ > diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile > index 814e16a8d68a..b94643aee84c 100644 > --- a/arch/riscv/mm/Makefile > +++ b/arch/riscv/mm/Makefile > @@ -10,6 +10,7 @@ obj-y += extable.o > obj-$(CONFIG_MMU) += fault.o > obj-y += cacheflush.o > obj-y += context.o > +obj-y += pageattr.o > > ifeq ($(CONFIG_MMU),y) > obj-$(CONFIG_SMP) += tlbflush.o > diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c > new file mode 100644 > index 000000000000..fcd59ef2835b > --- /dev/null > +++ b/arch/riscv/mm/pageattr.c > @@ -0,0 +1,150 @@ > +// SPDX-License-Identifier: GPL-2.0-only > +/* > + * Copyright (C) 2019 SiFive > + */ > + > +#include <linux/pagewalk.h> > +#include <asm/pgtable.h> > +#include <asm/tlbflush.h> > +#include <asm/bitops.h> > + > +struct pageattr_masks { > + pgprot_t set_mask; > + pgprot_t clear_mask; > +}; > + > +static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk) > +{ > + struct pageattr_masks *masks = walk->private; > + unsigned long new_val = val; > + > + new_val &= ~(pgprot_val(masks->clear_mask)); > + new_val |= (pgprot_val(masks->set_mask)); > + > + return new_val; > +} > + > +static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr, > + unsigned long next, struct mm_walk *walk) > +{ > + pgd_t val = READ_ONCE(*pgd); > + > + if (pgd_leaf(val)) { > + val = __pgd(set_pageattr_masks(pgd_val(val), walk)); > + set_pgd(pgd, val); > + } > + > + return 0; > +} > + > +static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr, > + unsigned long next, struct mm_walk *walk) > +{ > + p4d_t val = READ_ONCE(*p4d); > + > + if (p4d_leaf(val)) { > + val = __p4d(set_pageattr_masks(p4d_val(val), walk)); > + set_p4d(p4d, val); > + } > + > + return 0; > +} > + > +static int pageattr_pud_entry(pud_t *pud, unsigned long addr, > + unsigned long next, struct mm_walk *walk) > +{ > + pud_t val = READ_ONCE(*pud); > + > + if (pud_leaf(val)) { > + val = __pud(set_pageattr_masks(pud_val(val), walk)); > + set_pud(pud, val); > + } > + > + return 0; > +} > + > +static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr, > + unsigned long next, struct mm_walk *walk) > +{ > + pmd_t val = READ_ONCE(*pmd); > + > + if (pmd_leaf(val)) { > + val = __pmd(set_pageattr_masks(pmd_val(val), walk)); > + set_pmd(pmd, val); > + } > + > + return 0; > +} > + > +static int pageattr_pte_entry(pte_t *pte, unsigned long addr, > + unsigned long next, struct mm_walk *walk) > +{ > + pte_t val = READ_ONCE(*pte); > + > + val = __pte(set_pageattr_masks(pte_val(val), walk)); > + set_pte(pte, val); > + > + return 0; > +} > + > +static int pageattr_pte_hole(unsigned long addr, unsigned long next, > + int depth, struct mm_walk *walk) > +{ > + /* Nothing to do here */ > + return 0; > +} > + > +const static struct mm_walk_ops pageattr_ops = { > + .pgd_entry = pageattr_pgd_entry, > + .p4d_entry = pageattr_p4d_entry, > + .pud_entry = pageattr_pud_entry, > + .pmd_entry = pageattr_pmd_entry, > + .pte_entry = pageattr_pte_entry, > + .pte_hole = pageattr_pte_hole, > +}; > + > +static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, > + pgprot_t clear_mask) > +{ > + int ret; > + unsigned long start = addr; > + unsigned long end = start + PAGE_SIZE * numpages; > + struct pageattr_masks masks = { > + .set_mask = set_mask, > + .clear_mask = clear_mask > + }; > + > + if (!numpages) > + return 0; > + > + down_read(&init_mm.mmap_sem); > + ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, > + &masks); > + up_read(&init_mm.mmap_sem); There are very few users of _novma, but I think it's correct here. > + > + flush_tlb_kernel_range(start, end); > + > + return ret; > +} > + > +int set_memory_ro(unsigned long addr, int numpages) > +{ > + return __set_memory(addr, numpages, __pgprot(_PAGE_READ), > + __pgprot(_PAGE_WRITE)); > +} > + > +int set_memory_rw(unsigned long addr, int numpages) > +{ > + return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE), > + __pgprot(0)); > +} > + > +int set_memory_x(unsigned long addr, int numpages) > +{ > + return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0)); > +} > + > +int set_memory_nx(unsigned long addr, int numpages) > +{ > + return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC)); > +} Reviewed-by: Palmer Dabbelt <palmerdabbelt@google.com> Thanks!
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 6e81da55b5e4..76ed36543b3a 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -60,6 +60,7 @@ config RISCV select HAVE_EBPF_JIT if 64BIT select EDAC_SUPPORT select ARCH_HAS_GIGANTIC_PAGE + select ARCH_HAS_SET_MEMORY select ARCH_WANT_HUGE_PMD_SHARE if 64BIT select SPARSEMEM_STATIC if 32BIT select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h new file mode 100644 index 000000000000..936f08063566 --- /dev/null +++ b/arch/riscv/include/asm/set_memory.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2019 SiFive + */ + +#ifndef _ASM_RISCV_SET_MEMORY_H +#define _ASM_RISCV_SET_MEMORY_H + +/* + * Functions to change memory attributes. + */ +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); + +#endif /* _ASM_RISCV_SET_MEMORY_H */ diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index 814e16a8d68a..b94643aee84c 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -10,6 +10,7 @@ obj-y += extable.o obj-$(CONFIG_MMU) += fault.o obj-y += cacheflush.o obj-y += context.o +obj-y += pageattr.o ifeq ($(CONFIG_MMU),y) obj-$(CONFIG_SMP) += tlbflush.o diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c new file mode 100644 index 000000000000..fcd59ef2835b --- /dev/null +++ b/arch/riscv/mm/pageattr.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2019 SiFive + */ + +#include <linux/pagewalk.h> +#include <asm/pgtable.h> +#include <asm/tlbflush.h> +#include <asm/bitops.h> + +struct pageattr_masks { + pgprot_t set_mask; + pgprot_t clear_mask; +}; + +static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk) +{ + struct pageattr_masks *masks = walk->private; + unsigned long new_val = val; + + new_val &= ~(pgprot_val(masks->clear_mask)); + new_val |= (pgprot_val(masks->set_mask)); + + return new_val; +} + +static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + pgd_t val = READ_ONCE(*pgd); + + if (pgd_leaf(val)) { + val = __pgd(set_pageattr_masks(pgd_val(val), walk)); + set_pgd(pgd, val); + } + + return 0; +} + +static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + p4d_t val = READ_ONCE(*p4d); + + if (p4d_leaf(val)) { + val = __p4d(set_pageattr_masks(p4d_val(val), walk)); + set_p4d(p4d, val); + } + + return 0; +} + +static int pageattr_pud_entry(pud_t *pud, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + pud_t val = READ_ONCE(*pud); + + if (pud_leaf(val)) { + val = __pud(set_pageattr_masks(pud_val(val), walk)); + set_pud(pud, val); + } + + return 0; +} + +static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + pmd_t val = READ_ONCE(*pmd); + + if (pmd_leaf(val)) { + val = __pmd(set_pageattr_masks(pmd_val(val), walk)); + set_pmd(pmd, val); + } + + return 0; +} + +static int pageattr_pte_entry(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ + pte_t val = READ_ONCE(*pte); + + val = __pte(set_pageattr_masks(pte_val(val), walk)); + set_pte(pte, val); + + return 0; +} + +static int pageattr_pte_hole(unsigned long addr, unsigned long next, + int depth, struct mm_walk *walk) +{ + /* Nothing to do here */ + return 0; +} + +const static struct mm_walk_ops pageattr_ops = { + .pgd_entry = pageattr_pgd_entry, + .p4d_entry = pageattr_p4d_entry, + .pud_entry = pageattr_pud_entry, + .pmd_entry = pageattr_pmd_entry, + .pte_entry = pageattr_pte_entry, + .pte_hole = pageattr_pte_hole, +}; + +static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, + pgprot_t clear_mask) +{ + int ret; + unsigned long start = addr; + unsigned long end = start + PAGE_SIZE * numpages; + struct pageattr_masks masks = { + .set_mask = set_mask, + .clear_mask = clear_mask + }; + + if (!numpages) + return 0; + + down_read(&init_mm.mmap_sem); + ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, + &masks); + up_read(&init_mm.mmap_sem); + + flush_tlb_kernel_range(start, end); + + return ret; +} + +int set_memory_ro(unsigned long addr, int numpages) +{ + return __set_memory(addr, numpages, __pgprot(_PAGE_READ), + __pgprot(_PAGE_WRITE)); +} + +int set_memory_rw(unsigned long addr, int numpages) +{ + return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE), + __pgprot(0)); +} + +int set_memory_x(unsigned long addr, int numpages) +{ + return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0)); +} + +int set_memory_nx(unsigned long addr, int numpages) +{ + return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC)); +}
Add set_memory_ro/rw/x/nx architecture hooks to change the page attribution. Use own set_memory.h rather than generic set_memory.h (i.e. include/asm-generic/set_memory.h), because we want to add other function prototypes here. Signed-off-by: Zong Li <zong.li@sifive.com> --- arch/riscv/Kconfig | 1 + arch/riscv/include/asm/set_memory.h | 17 ++++ arch/riscv/mm/Makefile | 1 + arch/riscv/mm/pageattr.c | 150 ++++++++++++++++++++++++++++ 4 files changed, 169 insertions(+) create mode 100644 arch/riscv/include/asm/set_memory.h create mode 100644 arch/riscv/mm/pageattr.c