Message ID | 20200318220634.32100-4-mike.kravetz@oracle.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Clean up hugetlb boot command line processing | expand |
On Wed, Mar 18, 2020 at 3:07 PM Mike Kravetz <mike.kravetz@oracle.com> wrote: > > The routine hugetlb_add_hstate prints a warning if the hstate already > exists. This was originally done as part of kernel command line > parsing. If 'hugepagesz=' was specified more than once, the warning > pr_warn("hugepagesz= specified twice, ignoring\n"); > would be printed. > > Some architectures want to enable all huge page sizes. They would > call hugetlb_add_hstate for all supported sizes. However, this was > done after command line processing and as a result hstates could have > already been created for some sizes. To make sure no warning were > printed, there would often be code like: > if (!size_to_hstate(size) > hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT) > > The only time we want to print the warning is as the result of command > line processing. So, remove the warning from hugetlb_add_hstate and > add it to the single arch independent routine processing "hugepagesz=". > After this, calls to size_to_hstate() in arch specific code can be > removed and hugetlb_add_hstate can be called without worrying about > warning messages. > > Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> > --- > arch/arm64/mm/hugetlbpage.c | 16 ++++------------ > arch/powerpc/mm/hugetlbpage.c | 3 +-- > arch/riscv/mm/hugetlbpage.c | 2 +- > arch/sparc/mm/init_64.c | 19 ++++--------------- > arch/x86/mm/hugetlbpage.c | 2 +- > mm/hugetlb.c | 10 +++++++--- > 6 files changed, 18 insertions(+), 34 deletions(-) > > diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c > index 4aa9534a45d7..050809e6f0a9 100644 > --- a/arch/arm64/mm/hugetlbpage.c > +++ b/arch/arm64/mm/hugetlbpage.c > @@ -441,22 +441,14 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma, > clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); > } > > -static void __init add_huge_page_size(unsigned long size) > -{ > - if (size_to_hstate(size)) > - return; > - > - hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); > -} > - > static int __init hugetlbpage_init(void) > { > #ifdef CONFIG_ARM64_4K_PAGES > - add_huge_page_size(PUD_SIZE); > + hugetlb_add_hstate(ilog2(PUD_SIZE) - PAGE_SHIFT); > #endif > - add_huge_page_size(CONT_PMD_SIZE); > - add_huge_page_size(PMD_SIZE); > - add_huge_page_size(CONT_PTE_SIZE); > + hugetlb_add_hstate(ilog2(CONT_PMD_SIZE) - PAGE_SHIFT); > + hugetlb_add_hstate(ilog2(PMD_SIZE) - PAGE_SHIFT); > + hugetlb_add_hstate(ilog2(CONT_PTE_SIZE) - PAGE_SHIFT); > > return 0; > } > diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c > index 166960ba1236..f46464ba6fb4 100644 > --- a/arch/powerpc/mm/hugetlbpage.c > +++ b/arch/powerpc/mm/hugetlbpage.c > @@ -584,8 +584,7 @@ static int __init add_huge_page_size(unsigned long long size) > if (!arch_hugetlb_valid_size(size)) > return -EINVAL; > > - if (!size_to_hstate(size)) > - hugetlb_add_hstate(shift - PAGE_SHIFT); > + hugetlb_add_hstate(shift - PAGE_SHIFT); > return 0; > } > > diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c > index bdf89d7eb714..beaa91941db8 100644 > --- a/arch/riscv/mm/hugetlbpage.c > +++ b/arch/riscv/mm/hugetlbpage.c > @@ -26,7 +26,7 @@ bool __init arch_hugetlb_valid_size(unsigned long long size) > static __init int gigantic_pages_init(void) > { > /* With CONTIG_ALLOC, we can allocate gigantic pages at runtime */ > - if (IS_ENABLED(CONFIG_64BIT) && !size_to_hstate(1UL << PUD_SHIFT)) > + if (IS_ENABLED(CONFIG_64BIT)) > hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); > return 0; > } > diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c > index 5c29203fd460..8f619edc8f8c 100644 > --- a/arch/sparc/mm/init_64.c > +++ b/arch/sparc/mm/init_64.c > @@ -325,23 +325,12 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde > } > > #ifdef CONFIG_HUGETLB_PAGE > -static void __init add_huge_page_size(unsigned long size) > -{ > - unsigned int order; > - > - if (size_to_hstate(size)) > - return; > - > - order = ilog2(size) - PAGE_SHIFT; > - hugetlb_add_hstate(order); > -} > - > static int __init hugetlbpage_init(void) > { > - add_huge_page_size(1UL << HPAGE_64K_SHIFT); > - add_huge_page_size(1UL << HPAGE_SHIFT); > - add_huge_page_size(1UL << HPAGE_256MB_SHIFT); > - add_huge_page_size(1UL << HPAGE_2GB_SHIFT); > + hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT); > + hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT); > + hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT); > + hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT); > > return 0; > } > diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c > index dd3ed09f6c23..8a3f586e1217 100644 > --- a/arch/x86/mm/hugetlbpage.c > +++ b/arch/x86/mm/hugetlbpage.c > @@ -195,7 +195,7 @@ bool __init arch_hugetlb_valid_size(unsigned long long size) > static __init int gigantic_pages_init(void) > { > /* With compaction or CMA we can allocate gigantic pages at runtime */ > - if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT)) > + if (boot_cpu_has(X86_FEATURE_GBPAGES)) > hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); > return 0; > } > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > index cd4ec07080fb..cc85b4f156ca 100644 > --- a/mm/hugetlb.c > +++ b/mm/hugetlb.c > @@ -3116,8 +3116,7 @@ static int __init hugetlb_init(void) > } > > default_hstate_size = HPAGE_SIZE; > - if (!size_to_hstate(default_hstate_size)) > - hugetlb_add_hstate(HUGETLB_PAGE_ORDER); > + hugetlb_add_hstate(HUGETLB_PAGE_ORDER); > } > default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); > if (default_hstate_max_huge_pages) { > @@ -3155,7 +3154,6 @@ void __init hugetlb_add_hstate(unsigned int order) > unsigned long i; > > if (size_to_hstate(PAGE_SIZE << order)) { > - pr_warn("hugepagesz= specified twice, ignoring\n"); > return; > } > BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); > @@ -3231,6 +3229,12 @@ static int __init hugepagesz_setup(char *s) > return 0; > } > > + if (size_to_hstate(size)) { > + pr_warn("HugeTLB: hugepagesz %s specified twice, ignoring\n", > + saved_s); > + return 0; > + } > + Not too familiar with the code but I'm a bit confused by this print. AFAICT this prints the warning when hugepagesz= refers to a hugepage that is already added via hugetlb_add_hstate, but there is a default hstate added here without the user specifying hugepagesz, no? Does that mean the warning prints if you specify this size? Maybe then clarify the message like: 'HugeTLB: hugepage size already supported: xxxxx' or 'HugeTLB: hugepage size xxxx specified twice or is default size, ignoring.' Or don't print anything if it's the default size. > hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); > return 1; > } > -- > 2.24.1 > >
On 3/23/20 5:01 PM, Mina Almasry wrote: > On Wed, Mar 18, 2020 at 3:07 PM Mike Kravetz <mike.kravetz@oracle.com> wrote: >> >> The routine hugetlb_add_hstate prints a warning if the hstate already >> exists. This was originally done as part of kernel command line >> parsing. If 'hugepagesz=' was specified more than once, the warning >> pr_warn("hugepagesz= specified twice, ignoring\n"); >> would be printed. >> >> Some architectures want to enable all huge page sizes. They would >> call hugetlb_add_hstate for all supported sizes. However, this was >> done after command line processing and as a result hstates could have >> already been created for some sizes. To make sure no warning were >> printed, there would often be code like: >> if (!size_to_hstate(size) >> hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT) >> >> The only time we want to print the warning is as the result of command >> line processing. So, remove the warning from hugetlb_add_hstate and >> add it to the single arch independent routine processing "hugepagesz=". >> After this, calls to size_to_hstate() in arch specific code can be >> removed and hugetlb_add_hstate can be called without worrying about >> warning messages. >> >> Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> >> --- >> arch/arm64/mm/hugetlbpage.c | 16 ++++------------ >> arch/powerpc/mm/hugetlbpage.c | 3 +-- >> arch/riscv/mm/hugetlbpage.c | 2 +- >> arch/sparc/mm/init_64.c | 19 ++++--------------- >> arch/x86/mm/hugetlbpage.c | 2 +- >> mm/hugetlb.c | 10 +++++++--- >> 6 files changed, 18 insertions(+), 34 deletions(-) >> >> diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c >> index 4aa9534a45d7..050809e6f0a9 100644 >> --- a/arch/arm64/mm/hugetlbpage.c >> +++ b/arch/arm64/mm/hugetlbpage.c >> @@ -441,22 +441,14 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma, >> clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); >> } >> >> -static void __init add_huge_page_size(unsigned long size) >> -{ >> - if (size_to_hstate(size)) >> - return; >> - >> - hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); >> -} >> - >> static int __init hugetlbpage_init(void) >> { >> #ifdef CONFIG_ARM64_4K_PAGES >> - add_huge_page_size(PUD_SIZE); >> + hugetlb_add_hstate(ilog2(PUD_SIZE) - PAGE_SHIFT); >> #endif >> - add_huge_page_size(CONT_PMD_SIZE); >> - add_huge_page_size(PMD_SIZE); >> - add_huge_page_size(CONT_PTE_SIZE); >> + hugetlb_add_hstate(ilog2(CONT_PMD_SIZE) - PAGE_SHIFT); >> + hugetlb_add_hstate(ilog2(PMD_SIZE) - PAGE_SHIFT); >> + hugetlb_add_hstate(ilog2(CONT_PTE_SIZE) - PAGE_SHIFT); >> >> return 0; >> } >> diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c >> index 166960ba1236..f46464ba6fb4 100644 >> --- a/arch/powerpc/mm/hugetlbpage.c >> +++ b/arch/powerpc/mm/hugetlbpage.c >> @@ -584,8 +584,7 @@ static int __init add_huge_page_size(unsigned long long size) >> if (!arch_hugetlb_valid_size(size)) >> return -EINVAL; >> >> - if (!size_to_hstate(size)) >> - hugetlb_add_hstate(shift - PAGE_SHIFT); >> + hugetlb_add_hstate(shift - PAGE_SHIFT); >> return 0; >> } >> >> diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c >> index bdf89d7eb714..beaa91941db8 100644 >> --- a/arch/riscv/mm/hugetlbpage.c >> +++ b/arch/riscv/mm/hugetlbpage.c >> @@ -26,7 +26,7 @@ bool __init arch_hugetlb_valid_size(unsigned long long size) >> static __init int gigantic_pages_init(void) >> { >> /* With CONTIG_ALLOC, we can allocate gigantic pages at runtime */ >> - if (IS_ENABLED(CONFIG_64BIT) && !size_to_hstate(1UL << PUD_SHIFT)) >> + if (IS_ENABLED(CONFIG_64BIT)) >> hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); >> return 0; >> } >> diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c >> index 5c29203fd460..8f619edc8f8c 100644 >> --- a/arch/sparc/mm/init_64.c >> +++ b/arch/sparc/mm/init_64.c >> @@ -325,23 +325,12 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde >> } >> >> #ifdef CONFIG_HUGETLB_PAGE >> -static void __init add_huge_page_size(unsigned long size) >> -{ >> - unsigned int order; >> - >> - if (size_to_hstate(size)) >> - return; >> - >> - order = ilog2(size) - PAGE_SHIFT; >> - hugetlb_add_hstate(order); >> -} >> - >> static int __init hugetlbpage_init(void) >> { >> - add_huge_page_size(1UL << HPAGE_64K_SHIFT); >> - add_huge_page_size(1UL << HPAGE_SHIFT); >> - add_huge_page_size(1UL << HPAGE_256MB_SHIFT); >> - add_huge_page_size(1UL << HPAGE_2GB_SHIFT); >> + hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT); >> + hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT); >> + hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT); >> + hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT); >> >> return 0; >> } >> diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c >> index dd3ed09f6c23..8a3f586e1217 100644 >> --- a/arch/x86/mm/hugetlbpage.c >> +++ b/arch/x86/mm/hugetlbpage.c >> @@ -195,7 +195,7 @@ bool __init arch_hugetlb_valid_size(unsigned long long size) >> static __init int gigantic_pages_init(void) >> { >> /* With compaction or CMA we can allocate gigantic pages at runtime */ >> - if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT)) >> + if (boot_cpu_has(X86_FEATURE_GBPAGES)) >> hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); >> return 0; >> } >> diff --git a/mm/hugetlb.c b/mm/hugetlb.c >> index cd4ec07080fb..cc85b4f156ca 100644 >> --- a/mm/hugetlb.c >> +++ b/mm/hugetlb.c >> @@ -3116,8 +3116,7 @@ static int __init hugetlb_init(void) >> } >> >> default_hstate_size = HPAGE_SIZE; >> - if (!size_to_hstate(default_hstate_size)) >> - hugetlb_add_hstate(HUGETLB_PAGE_ORDER); >> + hugetlb_add_hstate(HUGETLB_PAGE_ORDER); >> } >> default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); >> if (default_hstate_max_huge_pages) { >> @@ -3155,7 +3154,6 @@ void __init hugetlb_add_hstate(unsigned int order) >> unsigned long i; >> >> if (size_to_hstate(PAGE_SIZE << order)) { >> - pr_warn("hugepagesz= specified twice, ignoring\n"); >> return; >> } >> BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); >> @@ -3231,6 +3229,12 @@ static int __init hugepagesz_setup(char *s) >> return 0; >> } >> >> + if (size_to_hstate(size)) { >> + pr_warn("HugeTLB: hugepagesz %s specified twice, ignoring\n", >> + saved_s); >> + return 0; >> + } >> + > > Not too familiar with the code but I'm a bit confused by this print. > AFAICT this prints the warning when hugepagesz= refers to a hugepage > that is already added via hugetlb_add_hstate, but there is a default > hstate added here without the user specifying hugepagesz, no? Correct. > Does > that mean the warning prints if you specify this size? The code which adds the default hstate (in hugetlb_init) runs after this code which is reading/processing command line options. So, the case you are concerned with will not happen. Thanks for taking a look,
On Mon, Mar 23, 2020 at 5:17 PM Mike Kravetz <mike.kravetz@oracle.com> wrote: > > On 3/23/20 5:01 PM, Mina Almasry wrote: > > On Wed, Mar 18, 2020 at 3:07 PM Mike Kravetz <mike.kravetz@oracle.com> wrote: > >> > >> The routine hugetlb_add_hstate prints a warning if the hstate already > >> exists. This was originally done as part of kernel command line > >> parsing. If 'hugepagesz=' was specified more than once, the warning > >> pr_warn("hugepagesz= specified twice, ignoring\n"); > >> would be printed. > >> > >> Some architectures want to enable all huge page sizes. They would > >> call hugetlb_add_hstate for all supported sizes. However, this was > >> done after command line processing and as a result hstates could have > >> already been created for some sizes. To make sure no warning were > >> printed, there would often be code like: > >> if (!size_to_hstate(size) > >> hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT) > >> > >> The only time we want to print the warning is as the result of command > >> line processing. So, remove the warning from hugetlb_add_hstate and > >> add it to the single arch independent routine processing "hugepagesz=". > >> After this, calls to size_to_hstate() in arch specific code can be > >> removed and hugetlb_add_hstate can be called without worrying about > >> warning messages. > >> > >> Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> > >> --- > >> arch/arm64/mm/hugetlbpage.c | 16 ++++------------ > >> arch/powerpc/mm/hugetlbpage.c | 3 +-- > >> arch/riscv/mm/hugetlbpage.c | 2 +- > >> arch/sparc/mm/init_64.c | 19 ++++--------------- > >> arch/x86/mm/hugetlbpage.c | 2 +- > >> mm/hugetlb.c | 10 +++++++--- > >> 6 files changed, 18 insertions(+), 34 deletions(-) > >> > >> diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c > >> index 4aa9534a45d7..050809e6f0a9 100644 > >> --- a/arch/arm64/mm/hugetlbpage.c > >> +++ b/arch/arm64/mm/hugetlbpage.c > >> @@ -441,22 +441,14 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma, > >> clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); > >> } > >> > >> -static void __init add_huge_page_size(unsigned long size) > >> -{ > >> - if (size_to_hstate(size)) > >> - return; > >> - > >> - hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); > >> -} > >> - > >> static int __init hugetlbpage_init(void) > >> { > >> #ifdef CONFIG_ARM64_4K_PAGES > >> - add_huge_page_size(PUD_SIZE); > >> + hugetlb_add_hstate(ilog2(PUD_SIZE) - PAGE_SHIFT); > >> #endif > >> - add_huge_page_size(CONT_PMD_SIZE); > >> - add_huge_page_size(PMD_SIZE); > >> - add_huge_page_size(CONT_PTE_SIZE); > >> + hugetlb_add_hstate(ilog2(CONT_PMD_SIZE) - PAGE_SHIFT); > >> + hugetlb_add_hstate(ilog2(PMD_SIZE) - PAGE_SHIFT); > >> + hugetlb_add_hstate(ilog2(CONT_PTE_SIZE) - PAGE_SHIFT); > >> > >> return 0; > >> } > >> diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c > >> index 166960ba1236..f46464ba6fb4 100644 > >> --- a/arch/powerpc/mm/hugetlbpage.c > >> +++ b/arch/powerpc/mm/hugetlbpage.c > >> @@ -584,8 +584,7 @@ static int __init add_huge_page_size(unsigned long long size) > >> if (!arch_hugetlb_valid_size(size)) > >> return -EINVAL; > >> > >> - if (!size_to_hstate(size)) > >> - hugetlb_add_hstate(shift - PAGE_SHIFT); > >> + hugetlb_add_hstate(shift - PAGE_SHIFT); > >> return 0; > >> } > >> > >> diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c > >> index bdf89d7eb714..beaa91941db8 100644 > >> --- a/arch/riscv/mm/hugetlbpage.c > >> +++ b/arch/riscv/mm/hugetlbpage.c > >> @@ -26,7 +26,7 @@ bool __init arch_hugetlb_valid_size(unsigned long long size) > >> static __init int gigantic_pages_init(void) > >> { > >> /* With CONTIG_ALLOC, we can allocate gigantic pages at runtime */ > >> - if (IS_ENABLED(CONFIG_64BIT) && !size_to_hstate(1UL << PUD_SHIFT)) > >> + if (IS_ENABLED(CONFIG_64BIT)) > >> hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); > >> return 0; > >> } > >> diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c > >> index 5c29203fd460..8f619edc8f8c 100644 > >> --- a/arch/sparc/mm/init_64.c > >> +++ b/arch/sparc/mm/init_64.c > >> @@ -325,23 +325,12 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde > >> } > >> > >> #ifdef CONFIG_HUGETLB_PAGE > >> -static void __init add_huge_page_size(unsigned long size) > >> -{ > >> - unsigned int order; > >> - > >> - if (size_to_hstate(size)) > >> - return; > >> - > >> - order = ilog2(size) - PAGE_SHIFT; > >> - hugetlb_add_hstate(order); > >> -} > >> - > >> static int __init hugetlbpage_init(void) > >> { > >> - add_huge_page_size(1UL << HPAGE_64K_SHIFT); > >> - add_huge_page_size(1UL << HPAGE_SHIFT); > >> - add_huge_page_size(1UL << HPAGE_256MB_SHIFT); > >> - add_huge_page_size(1UL << HPAGE_2GB_SHIFT); > >> + hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT); > >> + hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT); > >> + hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT); > >> + hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT); > >> > >> return 0; > >> } > >> diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c > >> index dd3ed09f6c23..8a3f586e1217 100644 > >> --- a/arch/x86/mm/hugetlbpage.c > >> +++ b/arch/x86/mm/hugetlbpage.c > >> @@ -195,7 +195,7 @@ bool __init arch_hugetlb_valid_size(unsigned long long size) > >> static __init int gigantic_pages_init(void) > >> { > >> /* With compaction or CMA we can allocate gigantic pages at runtime */ > >> - if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT)) > >> + if (boot_cpu_has(X86_FEATURE_GBPAGES)) > >> hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); > >> return 0; > >> } > >> diff --git a/mm/hugetlb.c b/mm/hugetlb.c > >> index cd4ec07080fb..cc85b4f156ca 100644 > >> --- a/mm/hugetlb.c > >> +++ b/mm/hugetlb.c > >> @@ -3116,8 +3116,7 @@ static int __init hugetlb_init(void) > >> } > >> > >> default_hstate_size = HPAGE_SIZE; > >> - if (!size_to_hstate(default_hstate_size)) > >> - hugetlb_add_hstate(HUGETLB_PAGE_ORDER); > >> + hugetlb_add_hstate(HUGETLB_PAGE_ORDER); > >> } > >> default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); > >> if (default_hstate_max_huge_pages) { > >> @@ -3155,7 +3154,6 @@ void __init hugetlb_add_hstate(unsigned int order) > >> unsigned long i; > >> > >> if (size_to_hstate(PAGE_SIZE << order)) { > >> - pr_warn("hugepagesz= specified twice, ignoring\n"); > >> return; > >> } > >> BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); > >> @@ -3231,6 +3229,12 @@ static int __init hugepagesz_setup(char *s) > >> return 0; > >> } > >> > >> + if (size_to_hstate(size)) { > >> + pr_warn("HugeTLB: hugepagesz %s specified twice, ignoring\n", > >> + saved_s); > >> + return 0; > >> + } > >> + > > > > Not too familiar with the code but I'm a bit confused by this print. > > AFAICT this prints the warning when hugepagesz= refers to a hugepage > > that is already added via hugetlb_add_hstate, but there is a default > > hstate added here without the user specifying hugepagesz, no? > > Correct. > > > Does > > that mean the warning prints if you specify this size? > > The code which adds the default hstate (in hugetlb_init) runs after this > code which is reading/processing command line options. So, the case you > are concerned with will not happen. > > Thanks for taking a look, > -- > Mike Kravetz > > > 'HugeTLB: hugepage size already supported: xxxxx' or > > 'HugeTLB: hugepage size xxxx specified twice or is default size, ignoring.' > > > > Or don't print anything if it's the default size. > > > >> hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); > >> return 1; > >> } > >> -- > >> 2.24.1 Acked-By: Mina Almasry <almasrymina@google.com>
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 4aa9534a45d7..050809e6f0a9 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -441,22 +441,14 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma, clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); } -static void __init add_huge_page_size(unsigned long size) -{ - if (size_to_hstate(size)) - return; - - hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); -} - static int __init hugetlbpage_init(void) { #ifdef CONFIG_ARM64_4K_PAGES - add_huge_page_size(PUD_SIZE); + hugetlb_add_hstate(ilog2(PUD_SIZE) - PAGE_SHIFT); #endif - add_huge_page_size(CONT_PMD_SIZE); - add_huge_page_size(PMD_SIZE); - add_huge_page_size(CONT_PTE_SIZE); + hugetlb_add_hstate(ilog2(CONT_PMD_SIZE) - PAGE_SHIFT); + hugetlb_add_hstate(ilog2(PMD_SIZE) - PAGE_SHIFT); + hugetlb_add_hstate(ilog2(CONT_PTE_SIZE) - PAGE_SHIFT); return 0; } diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 166960ba1236..f46464ba6fb4 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -584,8 +584,7 @@ static int __init add_huge_page_size(unsigned long long size) if (!arch_hugetlb_valid_size(size)) return -EINVAL; - if (!size_to_hstate(size)) - hugetlb_add_hstate(shift - PAGE_SHIFT); + hugetlb_add_hstate(shift - PAGE_SHIFT); return 0; } diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index bdf89d7eb714..beaa91941db8 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -26,7 +26,7 @@ bool __init arch_hugetlb_valid_size(unsigned long long size) static __init int gigantic_pages_init(void) { /* With CONTIG_ALLOC, we can allocate gigantic pages at runtime */ - if (IS_ENABLED(CONFIG_64BIT) && !size_to_hstate(1UL << PUD_SHIFT)) + if (IS_ENABLED(CONFIG_64BIT)) hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); return 0; } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 5c29203fd460..8f619edc8f8c 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -325,23 +325,12 @@ static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_inde } #ifdef CONFIG_HUGETLB_PAGE -static void __init add_huge_page_size(unsigned long size) -{ - unsigned int order; - - if (size_to_hstate(size)) - return; - - order = ilog2(size) - PAGE_SHIFT; - hugetlb_add_hstate(order); -} - static int __init hugetlbpage_init(void) { - add_huge_page_size(1UL << HPAGE_64K_SHIFT); - add_huge_page_size(1UL << HPAGE_SHIFT); - add_huge_page_size(1UL << HPAGE_256MB_SHIFT); - add_huge_page_size(1UL << HPAGE_2GB_SHIFT); + hugetlb_add_hstate(HPAGE_64K_SHIFT - PAGE_SHIFT); + hugetlb_add_hstate(HPAGE_SHIFT - PAGE_SHIFT); + hugetlb_add_hstate(HPAGE_256MB_SHIFT - PAGE_SHIFT); + hugetlb_add_hstate(HPAGE_2GB_SHIFT - PAGE_SHIFT); return 0; } diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index dd3ed09f6c23..8a3f586e1217 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -195,7 +195,7 @@ bool __init arch_hugetlb_valid_size(unsigned long long size) static __init int gigantic_pages_init(void) { /* With compaction or CMA we can allocate gigantic pages at runtime */ - if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT)) + if (boot_cpu_has(X86_FEATURE_GBPAGES)) hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); return 0; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index cd4ec07080fb..cc85b4f156ca 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3116,8 +3116,7 @@ static int __init hugetlb_init(void) } default_hstate_size = HPAGE_SIZE; - if (!size_to_hstate(default_hstate_size)) - hugetlb_add_hstate(HUGETLB_PAGE_ORDER); + hugetlb_add_hstate(HUGETLB_PAGE_ORDER); } default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size)); if (default_hstate_max_huge_pages) { @@ -3155,7 +3154,6 @@ void __init hugetlb_add_hstate(unsigned int order) unsigned long i; if (size_to_hstate(PAGE_SIZE << order)) { - pr_warn("hugepagesz= specified twice, ignoring\n"); return; } BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE); @@ -3231,6 +3229,12 @@ static int __init hugepagesz_setup(char *s) return 0; } + if (size_to_hstate(size)) { + pr_warn("HugeTLB: hugepagesz %s specified twice, ignoring\n", + saved_s); + return 0; + } + hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT); return 1; }
The routine hugetlb_add_hstate prints a warning if the hstate already exists. This was originally done as part of kernel command line parsing. If 'hugepagesz=' was specified more than once, the warning pr_warn("hugepagesz= specified twice, ignoring\n"); would be printed. Some architectures want to enable all huge page sizes. They would call hugetlb_add_hstate for all supported sizes. However, this was done after command line processing and as a result hstates could have already been created for some sizes. To make sure no warning were printed, there would often be code like: if (!size_to_hstate(size) hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT) The only time we want to print the warning is as the result of command line processing. So, remove the warning from hugetlb_add_hstate and add it to the single arch independent routine processing "hugepagesz=". After this, calls to size_to_hstate() in arch specific code can be removed and hugetlb_add_hstate can be called without worrying about warning messages. Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> --- arch/arm64/mm/hugetlbpage.c | 16 ++++------------ arch/powerpc/mm/hugetlbpage.c | 3 +-- arch/riscv/mm/hugetlbpage.c | 2 +- arch/sparc/mm/init_64.c | 19 ++++--------------- arch/x86/mm/hugetlbpage.c | 2 +- mm/hugetlb.c | 10 +++++++--- 6 files changed, 18 insertions(+), 34 deletions(-)