Message ID | 20230531093817.665799-1-bjorn@kernel.org (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | [v2] riscv: mm: Pre-allocate PGD entries for vmalloc/modules area | expand |
Hi Björn, On 31/05/2023 11:38, Björn Töpel wrote: > From: Björn Töpel <bjorn@rivosinc.com> > > The RISC-V port requires that kernel PGD entries are to be > synchronized between MMs. This is done via the vmalloc_fault() > function, that simply copies the PGD entries from init_mm to the > faulting one. > > Historically, faulting in PGD entries have been a source for both bugs > [1], and poor performance. > > One way to get rid of vmalloc faults is by pre-allocating the PGD > entries. Pre-allocating the entries potientially wastes 64 * 4K (65 on > SV39). The pre-allocation function is pulled from Jörg Rödel's x86 > work, with the addition of 3-level page tables (PMD allocations). > > The pmd_alloc() function needs the ptlock cache to be initialized > (when split page locks is enabled), so the pre-allocation is done in a > RISC-V specific pgtable_cache_init() implementation. > > Pre-allocate the kernel PGD entries for the vmalloc/modules area, but > only for 64b platforms. > > Link: https://lore.kernel.org/lkml/20200508144043.13893-1-joro@8bytes.org/ # [1] > Signed-off-by: Björn Töpel <bjorn@rivosinc.com> > --- > v1->v2: Fixed broken !MMU build. > --- > arch/riscv/mm/fault.c | 16 ++---------- > arch/riscv/mm/init.c | 58 +++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 60 insertions(+), 14 deletions(-) > > diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c > index 8685f85a7474..b023fb311e28 100644 > --- a/arch/riscv/mm/fault.c > +++ b/arch/riscv/mm/fault.c > @@ -238,24 +238,12 @@ void handle_page_fault(struct pt_regs *regs) > * only copy the information from the master page table, > * nothing more. > */ > - if (unlikely((addr >= VMALLOC_START) && (addr < VMALLOC_END))) { > + if ((!IS_ENABLED(CONFIG_MMU) || !IS_ENABLED(CONFIG_64BIT)) && > + unlikely(addr >= VMALLOC_START && addr < VMALLOC_END)) { > vmalloc_fault(regs, code, addr); > return; > } > > -#ifdef CONFIG_64BIT > - /* > - * Modules in 64bit kernels lie in their own virtual region which is not > - * in the vmalloc region, but dealing with page faults in this region > - * or the vmalloc region amounts to doing the same thing: checking that > - * the mapping exists in init_mm.pgd and updating user page table, so > - * just use vmalloc_fault. > - */ > - if (unlikely(addr >= MODULES_VADDR && addr < MODULES_END)) { > - vmalloc_fault(regs, code, addr); > - return; > - } > -#endif > /* Enable interrupts if they were enabled in the parent context. */ > if (!regs_irqs_disabled(regs)) > local_irq_enable(); > diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c > index 747e5b1ef02d..45ceaff5679e 100644 > --- a/arch/riscv/mm/init.c > +++ b/arch/riscv/mm/init.c > @@ -1363,3 +1363,61 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, > return vmemmap_populate_basepages(start, end, node, NULL); > } > #endif > + > +#if defined(CONFIG_MMU) && defined(CONFIG_64BIT) > +/* > + * Pre-allocates page-table pages for a specific area in the kernel > + * page-table. Only the level which needs to be synchronized between > + * all page-tables is allocated because the synchronization can be > + * expensive. > + */ > +static void __init preallocate_pgd_pages_range(unsigned long start, unsigned long end, > + const char *area) > +{ > + unsigned long addr; > + const char *lvl; > + > + for (addr = start; addr < end && addr >= start; addr = ALIGN(addr + 1, PGDIR_SIZE)) { > + pgd_t *pgd = pgd_offset_k(addr); > + p4d_t *p4d; > + pud_t *pud; > + pmd_t *pmd; > + > + lvl = "p4d"; > + p4d = p4d_alloc(&init_mm, pgd, addr); > + if (!p4d) > + goto failed; > + > + if (pgtable_l5_enabled) > + continue; > + > + lvl = "pud"; > + pud = pud_alloc(&init_mm, p4d, addr); > + if (!pud) > + goto failed; > + > + if (pgtable_l4_enabled) > + continue; > + > + lvl = "pmd"; > + pmd = pmd_alloc(&init_mm, pud, addr); > + if (!pmd) > + goto failed; > + } > + return; > + > +failed: > + /* > + * The pages have to be there now or they will be missing in > + * process page-tables later. > + */ > + panic("Failed to pre-allocate %s pages for %s area\n", lvl, area); > +} > + > +void __init pgtable_cache_init(void) > +{ > + preallocate_pgd_pages_range(VMALLOC_START, VMALLOC_END, "vmalloc"); > + if (IS_ENABLED(CONFIG_MODULES)) > + preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, "bpf/modules"); > +} > +#endif > > base-commit: ac9a78681b921877518763ba0e89202254349d1b You can add: Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com> Thanks! Alex
Hello: This patch was applied to riscv/linux.git (for-next) by Palmer Dabbelt <palmer@rivosinc.com>: On Wed, 31 May 2023 11:38:17 +0200 you wrote: > From: Björn Töpel <bjorn@rivosinc.com> > > The RISC-V port requires that kernel PGD entries are to be > synchronized between MMs. This is done via the vmalloc_fault() > function, that simply copies the PGD entries from init_mm to the > faulting one. > > [...] Here is the summary with links: - [v2] riscv: mm: Pre-allocate PGD entries for vmalloc/modules area https://git.kernel.org/riscv/c/7d3332be011e You are awesome, thank you!
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 8685f85a7474..b023fb311e28 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -238,24 +238,12 @@ void handle_page_fault(struct pt_regs *regs) * only copy the information from the master page table, * nothing more. */ - if (unlikely((addr >= VMALLOC_START) && (addr < VMALLOC_END))) { + if ((!IS_ENABLED(CONFIG_MMU) || !IS_ENABLED(CONFIG_64BIT)) && + unlikely(addr >= VMALLOC_START && addr < VMALLOC_END)) { vmalloc_fault(regs, code, addr); return; } -#ifdef CONFIG_64BIT - /* - * Modules in 64bit kernels lie in their own virtual region which is not - * in the vmalloc region, but dealing with page faults in this region - * or the vmalloc region amounts to doing the same thing: checking that - * the mapping exists in init_mm.pgd and updating user page table, so - * just use vmalloc_fault. - */ - if (unlikely(addr >= MODULES_VADDR && addr < MODULES_END)) { - vmalloc_fault(regs, code, addr); - return; - } -#endif /* Enable interrupts if they were enabled in the parent context. */ if (!regs_irqs_disabled(regs)) local_irq_enable(); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 747e5b1ef02d..45ceaff5679e 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -1363,3 +1363,61 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, return vmemmap_populate_basepages(start, end, node, NULL); } #endif + +#if defined(CONFIG_MMU) && defined(CONFIG_64BIT) +/* + * Pre-allocates page-table pages for a specific area in the kernel + * page-table. Only the level which needs to be synchronized between + * all page-tables is allocated because the synchronization can be + * expensive. + */ +static void __init preallocate_pgd_pages_range(unsigned long start, unsigned long end, + const char *area) +{ + unsigned long addr; + const char *lvl; + + for (addr = start; addr < end && addr >= start; addr = ALIGN(addr + 1, PGDIR_SIZE)) { + pgd_t *pgd = pgd_offset_k(addr); + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + lvl = "p4d"; + p4d = p4d_alloc(&init_mm, pgd, addr); + if (!p4d) + goto failed; + + if (pgtable_l5_enabled) + continue; + + lvl = "pud"; + pud = pud_alloc(&init_mm, p4d, addr); + if (!pud) + goto failed; + + if (pgtable_l4_enabled) + continue; + + lvl = "pmd"; + pmd = pmd_alloc(&init_mm, pud, addr); + if (!pmd) + goto failed; + } + return; + +failed: + /* + * The pages have to be there now or they will be missing in + * process page-tables later. + */ + panic("Failed to pre-allocate %s pages for %s area\n", lvl, area); +} + +void __init pgtable_cache_init(void) +{ + preallocate_pgd_pages_range(VMALLOC_START, VMALLOC_END, "vmalloc"); + if (IS_ENABLED(CONFIG_MODULES)) + preallocate_pgd_pages_range(MODULES_VADDR, MODULES_END, "bpf/modules"); +} +#endif