Message ID | 20210313084505.16132-3-alex@ghiti.fr (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Improve KASAN_VMALLOC support | expand |
On Sat, 13 Mar 2021 00:45:05 PST (-0800), alex@ghiti.fr wrote: > When KASAN vmalloc region is populated, there is no userspace process and > the page table in use is swapper_pg_dir, so there is no need to read > SATP. Then we can use the same scheme used by kasan_populate_p*d > functions to go through the page table, which harmonizes the code. > > In addition, make use of set_pgd that goes through all unused page table > levels, contrary to p*d_populate functions, which makes this function work > whatever the number of page table levels. > > Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> > Reviewed-by: Palmer Dabbelt <palmerdabbelt@google.com> > --- > arch/riscv/mm/kasan_init.c | 59 ++++++++++++-------------------------- > 1 file changed, 18 insertions(+), 41 deletions(-) > > diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c > index 57bf4ae09361..c16178918239 100644 > --- a/arch/riscv/mm/kasan_init.c > +++ b/arch/riscv/mm/kasan_init.c > @@ -11,18 +11,6 @@ > #include <asm/fixmap.h> > #include <asm/pgalloc.h> > > -static __init void *early_alloc(size_t size, int node) > -{ > - void *ptr = memblock_alloc_try_nid(size, size, > - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node); > - > - if (!ptr) > - panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n", > - __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); > - > - return ptr; > -} > - > extern pgd_t early_pg_dir[PTRS_PER_PGD]; > asmlinkage void __init kasan_early_init(void) > { > @@ -155,38 +143,27 @@ static void __init kasan_populate(void *start, void *end) > memset(start, KASAN_SHADOW_INIT, end - start); > } > > -void __init kasan_shallow_populate(void *start, void *end) > +static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end) > { > - unsigned long vaddr = (unsigned long)start & PAGE_MASK; > - unsigned long vend = PAGE_ALIGN((unsigned long)end); > - unsigned long pfn; > - int index; > + unsigned long next; > void *p; > - pud_t *pud_dir, *pud_k; > - pgd_t *pgd_dir, *pgd_k; > - p4d_t *p4d_dir, *p4d_k; > - > - while (vaddr < vend) { > - index = pgd_index(vaddr); > - pfn = csr_read(CSR_SATP) & SATP_PPN; > - pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index; > - pgd_k = init_mm.pgd + index; > - pgd_dir = pgd_offset_k(vaddr); > - set_pgd(pgd_dir, *pgd_k); > - > - p4d_dir = p4d_offset(pgd_dir, vaddr); > - p4d_k = p4d_offset(pgd_k, vaddr); > - > - vaddr = (vaddr + PUD_SIZE) & PUD_MASK; > - pud_dir = pud_offset(p4d_dir, vaddr); > - pud_k = pud_offset(p4d_k, vaddr); > - > - if (pud_present(*pud_dir)) { > - p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); > - pud_populate(&init_mm, pud_dir, p); > + pgd_t *pgd_k = pgd_offset_k(vaddr); > + > + do { > + next = pgd_addr_end(vaddr, end); > + if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) { > + p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); > + set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); > } > - vaddr += PAGE_SIZE; > - } > + } while (pgd_k++, vaddr = next, vaddr != end); > +} > + > +static void __init kasan_shallow_populate(void *start, void *end) > +{ > + unsigned long vaddr = (unsigned long)start & PAGE_MASK; > + unsigned long vend = PAGE_ALIGN((unsigned long)end); > + > + kasan_shallow_populate_pgd(vaddr, vend); > > local_flush_tlb_all(); > } Thanks, this is on for-next.
Hi Palmer, On Tue, Mar 30, 2021 at 7:08 AM Palmer Dabbelt <palmer@dabbelt.com> wrote: > On Sat, 13 Mar 2021 00:45:05 PST (-0800), alex@ghiti.fr wrote: > > When KASAN vmalloc region is populated, there is no userspace process and > > the page table in use is swapper_pg_dir, so there is no need to read > > SATP. Then we can use the same scheme used by kasan_populate_p*d > > functions to go through the page table, which harmonizes the code. > > > > In addition, make use of set_pgd that goes through all unused page table > > levels, contrary to p*d_populate functions, which makes this function work > > whatever the number of page table levels. > > > > Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> > > Reviewed-by: Palmer Dabbelt <palmerdabbelt@google.com> > > --- > > arch/riscv/mm/kasan_init.c | 59 ++++++++++++-------------------------- > > 1 file changed, 18 insertions(+), 41 deletions(-) > > > > diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c > > index 57bf4ae09361..c16178918239 100644 > > --- a/arch/riscv/mm/kasan_init.c > > +++ b/arch/riscv/mm/kasan_init.c > > @@ -11,18 +11,6 @@ > > #include <asm/fixmap.h> > > #include <asm/pgalloc.h> > > > > -static __init void *early_alloc(size_t size, int node) > > -{ > > - void *ptr = memblock_alloc_try_nid(size, size, > > - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node); > > - > > - if (!ptr) > > - panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n", > > - __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); > > - > > - return ptr; > > -} > > - > > extern pgd_t early_pg_dir[PTRS_PER_PGD]; > > asmlinkage void __init kasan_early_init(void) > > { > > @@ -155,38 +143,27 @@ static void __init kasan_populate(void *start, void *end) > > memset(start, KASAN_SHADOW_INIT, end - start); > > } > > > > -void __init kasan_shallow_populate(void *start, void *end) > > +static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end) > > { > > - unsigned long vaddr = (unsigned long)start & PAGE_MASK; > > - unsigned long vend = PAGE_ALIGN((unsigned long)end); > > - unsigned long pfn; > > - int index; > > + unsigned long next; > > void *p; > > - pud_t *pud_dir, *pud_k; > > - pgd_t *pgd_dir, *pgd_k; > > - p4d_t *p4d_dir, *p4d_k; > > - > > - while (vaddr < vend) { > > - index = pgd_index(vaddr); > > - pfn = csr_read(CSR_SATP) & SATP_PPN; > > - pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index; > > - pgd_k = init_mm.pgd + index; > > - pgd_dir = pgd_offset_k(vaddr); > > - set_pgd(pgd_dir, *pgd_k); > > - > > - p4d_dir = p4d_offset(pgd_dir, vaddr); > > - p4d_k = p4d_offset(pgd_k, vaddr); > > - > > - vaddr = (vaddr + PUD_SIZE) & PUD_MASK; > > - pud_dir = pud_offset(p4d_dir, vaddr); > > - pud_k = pud_offset(p4d_k, vaddr); > > - > > - if (pud_present(*pud_dir)) { > > - p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); > > - pud_populate(&init_mm, pud_dir, p); > > + pgd_t *pgd_k = pgd_offset_k(vaddr); > > + > > + do { > > + next = pgd_addr_end(vaddr, end); > > + if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) { > > + p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); > > + set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); > > } > > - vaddr += PAGE_SIZE; > > - } > > + } while (pgd_k++, vaddr = next, vaddr != end); > > +} > > + > > +static void __init kasan_shallow_populate(void *start, void *end) > > +{ > > + unsigned long vaddr = (unsigned long)start & PAGE_MASK; > > + unsigned long vend = PAGE_ALIGN((unsigned long)end); > > + > > + kasan_shallow_populate_pgd(vaddr, vend); > > > > local_flush_tlb_all(); > > } > > Thanks, this is on for-next. Your for-next does not include your fixes branch, hence they now conflict, and for-next lacks the local_flush_tlb_all(). Gr{oetje,eeting}s, Geert
On Tue, 30 Mar 2021 02:47:30 PDT (-0700), geert@linux-m68k.org wrote: > Hi Palmer, > > On Tue, Mar 30, 2021 at 7:08 AM Palmer Dabbelt <palmer@dabbelt.com> wrote: >> On Sat, 13 Mar 2021 00:45:05 PST (-0800), alex@ghiti.fr wrote: >> > When KASAN vmalloc region is populated, there is no userspace process and >> > the page table in use is swapper_pg_dir, so there is no need to read >> > SATP. Then we can use the same scheme used by kasan_populate_p*d >> > functions to go through the page table, which harmonizes the code. >> > >> > In addition, make use of set_pgd that goes through all unused page table >> > levels, contrary to p*d_populate functions, which makes this function work >> > whatever the number of page table levels. >> > >> > Signed-off-by: Alexandre Ghiti <alex@ghiti.fr> >> > Reviewed-by: Palmer Dabbelt <palmerdabbelt@google.com> >> > --- >> > arch/riscv/mm/kasan_init.c | 59 ++++++++++++-------------------------- >> > 1 file changed, 18 insertions(+), 41 deletions(-) >> > >> > diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c >> > index 57bf4ae09361..c16178918239 100644 >> > --- a/arch/riscv/mm/kasan_init.c >> > +++ b/arch/riscv/mm/kasan_init.c >> > @@ -11,18 +11,6 @@ >> > #include <asm/fixmap.h> >> > #include <asm/pgalloc.h> >> > >> > -static __init void *early_alloc(size_t size, int node) >> > -{ >> > - void *ptr = memblock_alloc_try_nid(size, size, >> > - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node); >> > - >> > - if (!ptr) >> > - panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n", >> > - __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); >> > - >> > - return ptr; >> > -} >> > - >> > extern pgd_t early_pg_dir[PTRS_PER_PGD]; >> > asmlinkage void __init kasan_early_init(void) >> > { >> > @@ -155,38 +143,27 @@ static void __init kasan_populate(void *start, void *end) >> > memset(start, KASAN_SHADOW_INIT, end - start); >> > } >> > >> > -void __init kasan_shallow_populate(void *start, void *end) >> > +static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end) >> > { >> > - unsigned long vaddr = (unsigned long)start & PAGE_MASK; >> > - unsigned long vend = PAGE_ALIGN((unsigned long)end); >> > - unsigned long pfn; >> > - int index; >> > + unsigned long next; >> > void *p; >> > - pud_t *pud_dir, *pud_k; >> > - pgd_t *pgd_dir, *pgd_k; >> > - p4d_t *p4d_dir, *p4d_k; >> > - >> > - while (vaddr < vend) { >> > - index = pgd_index(vaddr); >> > - pfn = csr_read(CSR_SATP) & SATP_PPN; >> > - pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index; >> > - pgd_k = init_mm.pgd + index; >> > - pgd_dir = pgd_offset_k(vaddr); >> > - set_pgd(pgd_dir, *pgd_k); >> > - >> > - p4d_dir = p4d_offset(pgd_dir, vaddr); >> > - p4d_k = p4d_offset(pgd_k, vaddr); >> > - >> > - vaddr = (vaddr + PUD_SIZE) & PUD_MASK; >> > - pud_dir = pud_offset(p4d_dir, vaddr); >> > - pud_k = pud_offset(p4d_k, vaddr); >> > - >> > - if (pud_present(*pud_dir)) { >> > - p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); >> > - pud_populate(&init_mm, pud_dir, p); >> > + pgd_t *pgd_k = pgd_offset_k(vaddr); >> > + >> > + do { >> > + next = pgd_addr_end(vaddr, end); >> > + if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) { >> > + p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); >> > + set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); >> > } >> > - vaddr += PAGE_SIZE; >> > - } >> > + } while (pgd_k++, vaddr = next, vaddr != end); >> > +} >> > + >> > +static void __init kasan_shallow_populate(void *start, void *end) >> > +{ >> > + unsigned long vaddr = (unsigned long)start & PAGE_MASK; >> > + unsigned long vend = PAGE_ALIGN((unsigned long)end); >> > + >> > + kasan_shallow_populate_pgd(vaddr, vend); >> > >> > local_flush_tlb_all(); >> > } >> >> Thanks, this is on for-next. > > Your for-next does not include your fixes branch, hence they now conflict, > and for-next lacks the local_flush_tlb_all(). This came up before and I don't think we ever sorted out what the right thing to do is. Right now I'm keeping for-next pinned an at early RC, but fast-forwarding fixes to the latest RC every time I sent a PR. I don't have fixes merged back into for-next because I don't want those merges to show up when I send my merge window PRs. For this one I purposefully left out the local_flush_tlb_all() whene I pulled in this patch, and was planning on fixing it up along with any other merge conflicts when I send along the PR. It does all seem like a bit of a song and dance here, though, so I'm open to suggestions as to how to run this better -- though last time I went through that exercise it seemed like everyone had their own way of doing it, they all had a different set of issues, and I was at least familiar with this flavor of craziness. I was kind of tempted to convert for-next over into a branch that only contains merges, though, which would make it a bit easier to merge fixes in. > > Gr{oetje,eeting}s, > > Geert
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 57bf4ae09361..c16178918239 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -11,18 +11,6 @@ #include <asm/fixmap.h> #include <asm/pgalloc.h> -static __init void *early_alloc(size_t size, int node) -{ - void *ptr = memblock_alloc_try_nid(size, size, - __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node); - - if (!ptr) - panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n", - __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS)); - - return ptr; -} - extern pgd_t early_pg_dir[PTRS_PER_PGD]; asmlinkage void __init kasan_early_init(void) { @@ -155,38 +143,27 @@ static void __init kasan_populate(void *start, void *end) memset(start, KASAN_SHADOW_INIT, end - start); } -void __init kasan_shallow_populate(void *start, void *end) +static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end) { - unsigned long vaddr = (unsigned long)start & PAGE_MASK; - unsigned long vend = PAGE_ALIGN((unsigned long)end); - unsigned long pfn; - int index; + unsigned long next; void *p; - pud_t *pud_dir, *pud_k; - pgd_t *pgd_dir, *pgd_k; - p4d_t *p4d_dir, *p4d_k; - - while (vaddr < vend) { - index = pgd_index(vaddr); - pfn = csr_read(CSR_SATP) & SATP_PPN; - pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index; - pgd_k = init_mm.pgd + index; - pgd_dir = pgd_offset_k(vaddr); - set_pgd(pgd_dir, *pgd_k); - - p4d_dir = p4d_offset(pgd_dir, vaddr); - p4d_k = p4d_offset(pgd_k, vaddr); - - vaddr = (vaddr + PUD_SIZE) & PUD_MASK; - pud_dir = pud_offset(p4d_dir, vaddr); - pud_k = pud_offset(p4d_k, vaddr); - - if (pud_present(*pud_dir)) { - p = early_alloc(PAGE_SIZE, NUMA_NO_NODE); - pud_populate(&init_mm, pud_dir, p); + pgd_t *pgd_k = pgd_offset_k(vaddr); + + do { + next = pgd_addr_end(vaddr, end); + if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) { + p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); } - vaddr += PAGE_SIZE; - } + } while (pgd_k++, vaddr = next, vaddr != end); +} + +static void __init kasan_shallow_populate(void *start, void *end) +{ + unsigned long vaddr = (unsigned long)start & PAGE_MASK; + unsigned long vend = PAGE_ALIGN((unsigned long)end); + + kasan_shallow_populate_pgd(vaddr, vend); local_flush_tlb_all(); }