Message ID | 20210714193542.21857-7-joao.m.martins@oracle.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | mm, sparse-vmemmap: Introduce compound pagemaps | expand |
On Wed, Jul 14, 2021 at 12:36 PM Joao Martins <joao.m.martins@oracle.com> wrote: > > In preparation for describing a memmap with compound pages, move the > actual pte population logic into a separate function > vmemmap_populate_address() and have vmemmap_populate_basepages() walk > through all base pages it needs to populate. > > Signed-off-by: Joao Martins <joao.m.martins@oracle.com> > --- > mm/sparse-vmemmap.c | 44 ++++++++++++++++++++++++++------------------ > 1 file changed, 26 insertions(+), 18 deletions(-) > > diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c > index 80d3ba30d345..76f4158f6301 100644 > --- a/mm/sparse-vmemmap.c > +++ b/mm/sparse-vmemmap.c > @@ -570,33 +570,41 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) > return pgd; > } > > -int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, > - int node, struct vmem_altmap *altmap) > +static int __meminit vmemmap_populate_address(unsigned long addr, int node, > + struct vmem_altmap *altmap) > { > - unsigned long addr = start; > pgd_t *pgd; > p4d_t *p4d; > pud_t *pud; > pmd_t *pmd; > pte_t *pte; > > + pgd = vmemmap_pgd_populate(addr, node); > + if (!pgd) > + return -ENOMEM; > + p4d = vmemmap_p4d_populate(pgd, addr, node); > + if (!p4d) > + return -ENOMEM; > + pud = vmemmap_pud_populate(p4d, addr, node); > + if (!pud) > + return -ENOMEM; > + pmd = vmemmap_pmd_populate(pud, addr, node); > + if (!pmd) > + return -ENOMEM; > + pte = vmemmap_pte_populate(pmd, addr, node, altmap); > + if (!pte) > + return -ENOMEM; > + vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); Missing a return here: mm/sparse-vmemmap.c:598:1: error: control reaches end of non-void function [-Werror=return-type] Yes, it's fixed up in a later patch, but might as well not leave the bisect breakage lying around, and the kbuild robot would gripe about this eventually as well. > +} > + > +int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, > + int node, struct vmem_altmap *altmap) > +{ > + unsigned long addr = start; > + > for (; addr < end; addr += PAGE_SIZE) { > - pgd = vmemmap_pgd_populate(addr, node); > - if (!pgd) > - return -ENOMEM; > - p4d = vmemmap_p4d_populate(pgd, addr, node); > - if (!p4d) > - return -ENOMEM; > - pud = vmemmap_pud_populate(p4d, addr, node); > - if (!pud) > - return -ENOMEM; > - pmd = vmemmap_pmd_populate(pud, addr, node); > - if (!pmd) > - return -ENOMEM; > - pte = vmemmap_pte_populate(pmd, addr, node, altmap); > - if (!pte) > + if (vmemmap_populate_address(addr, node, altmap)) > return -ENOMEM; I'd prefer: rc = vmemmap_populate_address(addr, node, altmap); if (rc) return rc; ...in case future refactoring adds different error codes to pass up. > - vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); > } > > return 0; > -- > 2.17.1 >
On 7/28/21 7:04 AM, Dan Williams wrote: > On Wed, Jul 14, 2021 at 12:36 PM Joao Martins <joao.m.martins@oracle.com> wrote: >> >> In preparation for describing a memmap with compound pages, move the >> actual pte population logic into a separate function >> vmemmap_populate_address() and have vmemmap_populate_basepages() walk >> through all base pages it needs to populate. >> >> Signed-off-by: Joao Martins <joao.m.martins@oracle.com> >> --- >> mm/sparse-vmemmap.c | 44 ++++++++++++++++++++++++++------------------ >> 1 file changed, 26 insertions(+), 18 deletions(-) >> >> diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c >> index 80d3ba30d345..76f4158f6301 100644 >> --- a/mm/sparse-vmemmap.c >> +++ b/mm/sparse-vmemmap.c >> @@ -570,33 +570,41 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) >> return pgd; >> } >> >> -int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, >> - int node, struct vmem_altmap *altmap) >> +static int __meminit vmemmap_populate_address(unsigned long addr, int node, >> + struct vmem_altmap *altmap) >> { >> - unsigned long addr = start; >> pgd_t *pgd; >> p4d_t *p4d; >> pud_t *pud; >> pmd_t *pmd; >> pte_t *pte; >> >> + pgd = vmemmap_pgd_populate(addr, node); >> + if (!pgd) >> + return -ENOMEM; >> + p4d = vmemmap_p4d_populate(pgd, addr, node); >> + if (!p4d) >> + return -ENOMEM; >> + pud = vmemmap_pud_populate(p4d, addr, node); >> + if (!pud) >> + return -ENOMEM; >> + pmd = vmemmap_pmd_populate(pud, addr, node); >> + if (!pmd) >> + return -ENOMEM; >> + pte = vmemmap_pte_populate(pmd, addr, node, altmap); >> + if (!pte) >> + return -ENOMEM; >> + vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); > > Missing a return here: > > mm/sparse-vmemmap.c:598:1: error: control reaches end of non-void > function [-Werror=return-type] > > Yes, it's fixed up in a later patch That fixup definitely needs to be moved here. >, but might as well not leave the > bisect breakage lying around, and the kbuild robot would gripe about > this eventually as well. > Yeap. Fixed, thanks for noticing. > >> +} >> + >> +int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, >> + int node, struct vmem_altmap *altmap) >> +{ >> + unsigned long addr = start; >> + >> for (; addr < end; addr += PAGE_SIZE) { >> - pgd = vmemmap_pgd_populate(addr, node); >> - if (!pgd) >> - return -ENOMEM; >> - p4d = vmemmap_p4d_populate(pgd, addr, node); >> - if (!p4d) >> - return -ENOMEM; >> - pud = vmemmap_pud_populate(p4d, addr, node); >> - if (!pud) >> - return -ENOMEM; >> - pmd = vmemmap_pmd_populate(pud, addr, node); >> - if (!pmd) >> - return -ENOMEM; >> - pte = vmemmap_pte_populate(pmd, addr, node, altmap); >> - if (!pte) >> + if (vmemmap_populate_address(addr, node, altmap)) >> return -ENOMEM; > > I'd prefer: > > rc = vmemmap_populate_address(addr, node, altmap); > if (rc) > return rc; > > ...in case future refactoring adds different error codes to pass up. > Fixed.
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 80d3ba30d345..76f4158f6301 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -570,33 +570,41 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) return pgd; } -int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, - int node, struct vmem_altmap *altmap) +static int __meminit vmemmap_populate_address(unsigned long addr, int node, + struct vmem_altmap *altmap) { - unsigned long addr = start; pgd_t *pgd; p4d_t *p4d; pud_t *pud; pmd_t *pmd; pte_t *pte; + pgd = vmemmap_pgd_populate(addr, node); + if (!pgd) + return -ENOMEM; + p4d = vmemmap_p4d_populate(pgd, addr, node); + if (!p4d) + return -ENOMEM; + pud = vmemmap_pud_populate(p4d, addr, node); + if (!pud) + return -ENOMEM; + pmd = vmemmap_pmd_populate(pud, addr, node); + if (!pmd) + return -ENOMEM; + pte = vmemmap_pte_populate(pmd, addr, node, altmap); + if (!pte) + return -ENOMEM; + vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); +} + +int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, + int node, struct vmem_altmap *altmap) +{ + unsigned long addr = start; + for (; addr < end; addr += PAGE_SIZE) { - pgd = vmemmap_pgd_populate(addr, node); - if (!pgd) - return -ENOMEM; - p4d = vmemmap_p4d_populate(pgd, addr, node); - if (!p4d) - return -ENOMEM; - pud = vmemmap_pud_populate(p4d, addr, node); - if (!pud) - return -ENOMEM; - pmd = vmemmap_pmd_populate(pud, addr, node); - if (!pmd) - return -ENOMEM; - pte = vmemmap_pte_populate(pmd, addr, node, altmap); - if (!pte) + if (vmemmap_populate_address(addr, node, altmap)) return -ENOMEM; - vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); } return 0;
In preparation for describing a memmap with compound pages, move the actual pte population logic into a separate function vmemmap_populate_address() and have vmemmap_populate_basepages() walk through all base pages it needs to populate. Signed-off-by: Joao Martins <joao.m.martins@oracle.com> --- mm/sparse-vmemmap.c | 44 ++++++++++++++++++++++++++------------------ 1 file changed, 26 insertions(+), 18 deletions(-)