Message ID | 20201203062949.5484-5-rppt@kernel.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | mm: introduce memfd_secret system call to create "secret" memory areas | expand |
On Thu, 3 Dec 2020 08:29:43 +0200 Mike Rapoport <rppt@kernel.org> wrote: > From: Mike Rapoport <rppt@linux.ibm.com> > > On arm64, set_direct_map_*() functions may return 0 without actually > changing the linear map. This behaviour can be controlled using kernel > parameters, so we need a way to determine at runtime whether calls to > set_direct_map_invalid_noflush() and set_direct_map_default_noflush() have > any effect. > > Extend set_memory API with can_set_direct_map() function that allows > checking if calling set_direct_map_*() will actually change the page table, > replace several occurrences of open coded checks in arm64 with the new > function and provide a generic stub for architectures that always modify > page tables upon calls to set_direct_map APIs. > > ... > > --- a/arch/arm64/mm/mmu.c > +++ b/arch/arm64/mm/mmu.c > @@ -22,6 +22,7 @@ > #include <linux/io.h> > #include <linux/mm.h> > #include <linux/vmalloc.h> > +#include <linux/set_memory.h> > > #include <asm/barrier.h> > #include <asm/cputype.h> > @@ -477,7 +478,7 @@ static void __init map_mem(pgd_t *pgdp) > int flags = 0; > u64 i; > > - if (rodata_full || debug_pagealloc_enabled()) > + if (can_set_direct_map()) > flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; Changes in -next turned this into if (can_set_direct_map() || crash_mem_map)
On Thu, Dec 03, 2020 at 03:36:10PM -0800, Andrew Morton wrote: > On Thu, 3 Dec 2020 08:29:43 +0200 Mike Rapoport <rppt@kernel.org> wrote: > > > From: Mike Rapoport <rppt@linux.ibm.com> > > > > On arm64, set_direct_map_*() functions may return 0 without actually > > changing the linear map. This behaviour can be controlled using kernel > > parameters, so we need a way to determine at runtime whether calls to > > set_direct_map_invalid_noflush() and set_direct_map_default_noflush() have > > any effect. > > > > Extend set_memory API with can_set_direct_map() function that allows > > checking if calling set_direct_map_*() will actually change the page table, > > replace several occurrences of open coded checks in arm64 with the new > > function and provide a generic stub for architectures that always modify > > page tables upon calls to set_direct_map APIs. > > > > ... > > > > --- a/arch/arm64/mm/mmu.c > > +++ b/arch/arm64/mm/mmu.c > > @@ -22,6 +22,7 @@ > > #include <linux/io.h> > > #include <linux/mm.h> > > #include <linux/vmalloc.h> > > +#include <linux/set_memory.h> > > > > #include <asm/barrier.h> > > #include <asm/cputype.h> > > @@ -477,7 +478,7 @@ static void __init map_mem(pgd_t *pgdp) > > int flags = 0; > > u64 i; > > > > - if (rodata_full || debug_pagealloc_enabled()) > > + if (can_set_direct_map()) > > flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; > > Changes in -next turned this into > > if (can_set_direct_map() || crash_mem_map) Thanks for updating!
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild index ff9cbb631212..4306136ef329 100644 --- a/arch/arm64/include/asm/Kbuild +++ b/arch/arm64/include/asm/Kbuild @@ -4,5 +4,4 @@ generic-y += local64.h generic-y += mcs_spinlock.h generic-y += qrwlock.h generic-y += qspinlock.h -generic-y += set_memory.h generic-y += user.h diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index d3598419a284..b1bdf83a73db 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h @@ -136,12 +136,6 @@ static __always_inline void __flush_icache_all(void) dsb(ish); } -int set_memory_valid(unsigned long addr, int numpages, int enable); - -int set_direct_map_invalid_noflush(struct page *page, int numpages); -int set_direct_map_default_noflush(struct page *page, int numpages); -bool kernel_page_present(struct page *page); - #include <asm-generic/cacheflush.h> #endif /* __ASM_CACHEFLUSH_H */ diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h new file mode 100644 index 000000000000..ecb6b0f449ab --- /dev/null +++ b/arch/arm64/include/asm/set_memory.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _ASM_ARM64_SET_MEMORY_H +#define _ASM_ARM64_SET_MEMORY_H + +#include <asm-generic/set_memory.h> + +bool can_set_direct_map(void); +#define can_set_direct_map can_set_direct_map + +int set_memory_valid(unsigned long addr, int numpages, int enable); + +int set_direct_map_invalid_noflush(struct page *page, int numpages); +int set_direct_map_default_noflush(struct page *page, int numpages); +bool kernel_page_present(struct page *page); + +#endif /* _ASM_ARM64_SET_MEMORY_H */ diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index a0b144cfaea7..0cbc50c4fa5a 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/kexec.h> #include <linux/page-flags.h> +#include <linux/set_memory.h> #include <linux/smp.h> #include <asm/cacheflush.h> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 86be6d1a78ab..aa5ec08cb902 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -22,6 +22,7 @@ #include <linux/io.h> #include <linux/mm.h> #include <linux/vmalloc.h> +#include <linux/set_memory.h> #include <asm/barrier.h> #include <asm/cputype.h> @@ -477,7 +478,7 @@ static void __init map_mem(pgd_t *pgdp) int flags = 0; u64 i; - if (rodata_full || debug_pagealloc_enabled()) + if (can_set_direct_map()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* @@ -1453,8 +1454,7 @@ int arch_add_memory(int nid, u64 start, u64 size, * KFENCE requires linear map to be mapped at page granularity, so that * it is possible to protect/unprotect single pages in the KFENCE pool. */ - if (rodata_full || debug_pagealloc_enabled() || - IS_ENABLED(CONFIG_KFENCE)) + if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE)) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index b53ef37bf95a..d505172265b0 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -19,6 +19,11 @@ struct page_change_data { bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED); +bool can_set_direct_map(void) +{ + return rodata_full || debug_pagealloc_enabled(); +} + static int change_page_range(pte_t *ptep, unsigned long addr, void *data) { struct page_change_data *cdata = data; @@ -156,7 +161,7 @@ int set_direct_map_invalid_noflush(struct page *page, int numpages) }; unsigned long size = PAGE_SIZE * numpages; - if (!debug_pagealloc_enabled() && !rodata_full) + if (!can_set_direct_map()) return 0; return apply_to_page_range(&init_mm, @@ -172,7 +177,7 @@ int set_direct_map_default_noflush(struct page *page, int numpages) }; unsigned long size = PAGE_SIZE * numpages; - if (!debug_pagealloc_enabled() && !rodata_full) + if (!can_set_direct_map()) return 0; return apply_to_page_range(&init_mm, @@ -183,7 +188,7 @@ int set_direct_map_default_noflush(struct page *page, int numpages) #ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { - if (!debug_pagealloc_enabled() && !rodata_full) + if (!can_set_direct_map()) return; set_memory_valid((unsigned long)page_address(page), numpages, enable); @@ -208,7 +213,7 @@ bool kernel_page_present(struct page *page) pte_t *ptep; unsigned long addr = (unsigned long)page_address(page); - if (!debug_pagealloc_enabled() && !rodata_full) + if (!can_set_direct_map()) return true; pgdp = pgd_offset_k(addr); diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index c650f82db813..7b4b6626032d 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -28,7 +28,19 @@ static inline bool kernel_page_present(struct page *page) { return true; } +#else /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ +/* + * Some architectures, e.g. ARM64 can disable direct map modifications at + * boot time. Let them overrive this query. + */ +#ifndef can_set_direct_map +static inline bool can_set_direct_map(void) +{ + return true; +} +#define can_set_direct_map can_set_direct_map #endif +#endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ #ifndef set_mce_nospec static inline int set_mce_nospec(unsigned long pfn, bool unmap)