Message ID | 20210126044510.2491820-6-npiggin@gmail.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | huge vmalloc mappings | expand |
Reviewed-by: Ding Tianhong <dingtianhong@huawei.com> On 2021/1/26 12:45, Nicholas Piggin wrote: > This changes the awkward approach where architectures provide init > functions to determine which levels they can provide large mappings for, > to one where the arch is queried for each call. > > This removes code and indirection, and allows constant-folding of dead > code for unsupported levels. > > This also adds a prot argument to the arch query. This is unused > currently but could help with some architectures (e.g., some powerpc > processors can't map uncacheable memory with large pages). > > Cc: linuxppc-dev@lists.ozlabs.org > Cc: Catalin Marinas <catalin.marinas@arm.com> > Cc: Will Deacon <will@kernel.org> > Cc: linux-arm-kernel@lists.infradead.org > Cc: Thomas Gleixner <tglx@linutronix.de> > Cc: Ingo Molnar <mingo@redhat.com> > Cc: Borislav Petkov <bp@alien8.de> > Cc: x86@kernel.org > Cc: "H. Peter Anvin" <hpa@zytor.com> > Acked-by: Catalin Marinas <catalin.marinas@arm.com> [arm64] > Signed-off-by: Nicholas Piggin <npiggin@gmail.com> > --- > arch/arm64/include/asm/vmalloc.h | 8 ++ > arch/arm64/mm/mmu.c | 10 +-- > arch/powerpc/include/asm/vmalloc.h | 8 ++ > arch/powerpc/mm/book3s64/radix_pgtable.c | 8 +- > arch/x86/include/asm/vmalloc.h | 7 ++ > arch/x86/mm/ioremap.c | 12 +-- > include/linux/io.h | 9 --- > include/linux/vmalloc.h | 6 ++ > init/main.c | 1 - > mm/ioremap.c | 94 ++++++++++-------------- > 10 files changed, 85 insertions(+), 78 deletions(-) > > diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h > index 2ca708ab9b20..597b40405319 100644 > --- a/arch/arm64/include/asm/vmalloc.h > +++ b/arch/arm64/include/asm/vmalloc.h > @@ -1,4 +1,12 @@ > #ifndef _ASM_ARM64_VMALLOC_H > #define _ASM_ARM64_VMALLOC_H > > +#include <asm/page.h> > + > +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP > +bool arch_vmap_p4d_supported(pgprot_t prot); > +bool arch_vmap_pud_supported(pgprot_t prot); > +bool arch_vmap_pmd_supported(pgprot_t prot); > +#endif > + > #endif /* _ASM_ARM64_VMALLOC_H */ > diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c > index ae0c3d023824..1613d290cbd1 100644 > --- a/arch/arm64/mm/mmu.c > +++ b/arch/arm64/mm/mmu.c > @@ -1313,12 +1313,12 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) > return dt_virt; > } > > -int __init arch_ioremap_p4d_supported(void) > +bool arch_vmap_p4d_supported(pgprot_t prot) > { > - return 0; > + return false; > } > > -int __init arch_ioremap_pud_supported(void) > +bool arch_vmap_pud_supported(pgprot_t prot) > { > /* > * Only 4k granule supports level 1 block mappings. > @@ -1328,9 +1328,9 @@ int __init arch_ioremap_pud_supported(void) > !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); > } > > -int __init arch_ioremap_pmd_supported(void) > +bool arch_vmap_pmd_supported(pgprot_t prot) > { > - /* See arch_ioremap_pud_supported() */ > + /* See arch_vmap_pud_supported() */ > return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); > } > > diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h > index b992dfaaa161..105abb73f075 100644 > --- a/arch/powerpc/include/asm/vmalloc.h > +++ b/arch/powerpc/include/asm/vmalloc.h > @@ -1,4 +1,12 @@ > #ifndef _ASM_POWERPC_VMALLOC_H > #define _ASM_POWERPC_VMALLOC_H > > +#include <asm/page.h> > + > +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP > +bool arch_vmap_p4d_supported(pgprot_t prot); > +bool arch_vmap_pud_supported(pgprot_t prot); > +bool arch_vmap_pmd_supported(pgprot_t prot); > +#endif > + > #endif /* _ASM_POWERPC_VMALLOC_H */ > diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c > index 98f0b243c1ab..743807fc210f 100644 > --- a/arch/powerpc/mm/book3s64/radix_pgtable.c > +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c > @@ -1082,13 +1082,13 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, > set_pte_at(mm, addr, ptep, pte); > } > > -int __init arch_ioremap_pud_supported(void) > +bool arch_vmap_pud_supported(pgprot_t prot) > { > /* HPT does not cope with large pages in the vmalloc area */ > return radix_enabled(); > } > > -int __init arch_ioremap_pmd_supported(void) > +bool arch_vmap_pmd_supported(pgprot_t prot) > { > return radix_enabled(); > } > @@ -1182,7 +1182,7 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) > return 1; > } > > -int __init arch_ioremap_p4d_supported(void) > +bool arch_vmap_p4d_supported(pgprot_t prot) > { > - return 0; > + return false; > } > diff --git a/arch/x86/include/asm/vmalloc.h b/arch/x86/include/asm/vmalloc.h > index 29837740b520..094ea2b565f3 100644 > --- a/arch/x86/include/asm/vmalloc.h > +++ b/arch/x86/include/asm/vmalloc.h > @@ -1,6 +1,13 @@ > #ifndef _ASM_X86_VMALLOC_H > #define _ASM_X86_VMALLOC_H > > +#include <asm/page.h> > #include <asm/pgtable_areas.h> > > +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP > +bool arch_vmap_p4d_supported(pgprot_t prot); > +bool arch_vmap_pud_supported(pgprot_t prot); > +bool arch_vmap_pmd_supported(pgprot_t prot); > +#endif > + > #endif /* _ASM_X86_VMALLOC_H */ > diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c > index 9e5ccc56f8e0..fbaf0c447986 100644 > --- a/arch/x86/mm/ioremap.c > +++ b/arch/x86/mm/ioremap.c > @@ -481,24 +481,26 @@ void iounmap(volatile void __iomem *addr) > } > EXPORT_SYMBOL(iounmap); > > -int __init arch_ioremap_p4d_supported(void) > +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP > +bool arch_vmap_p4d_supported(pgprot_t prot) > { > - return 0; > + return false; > } > > -int __init arch_ioremap_pud_supported(void) > +bool arch_vmap_pud_supported(pgprot_t prot) > { > #ifdef CONFIG_X86_64 > return boot_cpu_has(X86_FEATURE_GBPAGES); > #else > - return 0; > + return false; > #endif > } > > -int __init arch_ioremap_pmd_supported(void) > +bool arch_vmap_pmd_supported(pgprot_t prot) > { > return boot_cpu_has(X86_FEATURE_PSE); > } > +#endif > > /* > * Convert a physical pointer to a virtual kernel pointer for /dev/mem > diff --git a/include/linux/io.h b/include/linux/io.h > index 8394c56babc2..f1effd4d7a3c 100644 > --- a/include/linux/io.h > +++ b/include/linux/io.h > @@ -31,15 +31,6 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end, > } > #endif > > -#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP > -void __init ioremap_huge_init(void); > -int arch_ioremap_p4d_supported(void); > -int arch_ioremap_pud_supported(void); > -int arch_ioremap_pmd_supported(void); > -#else > -static inline void ioremap_huge_init(void) { } > -#endif > - > /* > * Managed iomap interface > */ > diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h > index 80c0181c411d..00bd62bd701e 100644 > --- a/include/linux/vmalloc.h > +++ b/include/linux/vmalloc.h > @@ -83,6 +83,12 @@ struct vmap_area { > }; > }; > > +#ifndef CONFIG_HAVE_ARCH_HUGE_VMAP > +static inline bool arch_vmap_p4d_supported(pgprot_t prot) { return false; } > +static inline bool arch_vmap_pud_supported(pgprot_t prot) { return false; } > +static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return false; } > +#endif > + > /* > * Highlevel APIs for driver use > */ > diff --git a/init/main.c b/init/main.c > index c68d784376ca..bf9389e5b2e4 100644 > --- a/init/main.c > +++ b/init/main.c > @@ -834,7 +834,6 @@ static void __init mm_init(void) > pgtable_init(); > debug_objects_mem_init(); > vmalloc_init(); > - ioremap_huge_init(); > /* Should be run before the first non-init thread is created */ > init_espfix_bsp(); > /* Should be run after espfix64 is set up. */ > diff --git a/mm/ioremap.c b/mm/ioremap.c > index 3f4d36f9745a..3264d0203785 100644 > --- a/mm/ioremap.c > +++ b/mm/ioremap.c > @@ -16,49 +16,16 @@ > #include "pgalloc-track.h" > > #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP > -static int __read_mostly ioremap_p4d_capable; > -static int __read_mostly ioremap_pud_capable; > -static int __read_mostly ioremap_pmd_capable; > -static int __read_mostly ioremap_huge_disabled; > +static bool __ro_after_init iomap_max_page_shift = PAGE_SHIFT; > > static int __init set_nohugeiomap(char *str) > { > - ioremap_huge_disabled = 1; > + iomap_max_page_shift = P4D_SHIFT; > return 0; > } > early_param("nohugeiomap", set_nohugeiomap); > - > -void __init ioremap_huge_init(void) > -{ > - if (!ioremap_huge_disabled) { > - if (arch_ioremap_p4d_supported()) > - ioremap_p4d_capable = 1; > - if (arch_ioremap_pud_supported()) > - ioremap_pud_capable = 1; > - if (arch_ioremap_pmd_supported()) > - ioremap_pmd_capable = 1; > - } > -} > - > -static inline int ioremap_p4d_enabled(void) > -{ > - return ioremap_p4d_capable; > -} > - > -static inline int ioremap_pud_enabled(void) > -{ > - return ioremap_pud_capable; > -} > - > -static inline int ioremap_pmd_enabled(void) > -{ > - return ioremap_pmd_capable; > -} > - > -#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ > -static inline int ioremap_p4d_enabled(void) { return 0; } > -static inline int ioremap_pud_enabled(void) { return 0; } > -static inline int ioremap_pmd_enabled(void) { return 0; } > +#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ > +static const bool iomap_max_page_shift = PAGE_SHIFT; > #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ > > static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, > @@ -82,9 +49,13 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, > } > > static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, > - phys_addr_t phys_addr, pgprot_t prot) > + phys_addr_t phys_addr, pgprot_t prot, > + unsigned int max_page_shift) > { > - if (!ioremap_pmd_enabled()) > + if (max_page_shift < PMD_SHIFT) > + return 0; > + > + if (!arch_vmap_pmd_supported(prot)) > return 0; > > if ((end - addr) != PMD_SIZE) > @@ -104,7 +75,7 @@ static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, > > static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, > phys_addr_t phys_addr, pgprot_t prot, > - pgtbl_mod_mask *mask) > + unsigned int max_page_shift, pgtbl_mod_mask *mask) > { > pmd_t *pmd; > unsigned long next; > @@ -115,7 +86,8 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, > do { > next = pmd_addr_end(addr, end); > > - if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) { > + if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, > + max_page_shift)) { > *mask |= PGTBL_PMD_MODIFIED; > continue; > } > @@ -127,9 +99,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, > } > > static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, > - phys_addr_t phys_addr, pgprot_t prot) > + phys_addr_t phys_addr, pgprot_t prot, > + unsigned int max_page_shift) > { > - if (!ioremap_pud_enabled()) > + if (max_page_shift < PUD_SHIFT) > + return 0; > + > + if (!arch_vmap_pud_supported(prot)) > return 0; > > if ((end - addr) != PUD_SIZE) > @@ -149,7 +125,7 @@ static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, > > static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, > phys_addr_t phys_addr, pgprot_t prot, > - pgtbl_mod_mask *mask) > + unsigned int max_page_shift, pgtbl_mod_mask *mask) > { > pud_t *pud; > unsigned long next; > @@ -160,21 +136,27 @@ static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, > do { > next = pud_addr_end(addr, end); > > - if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot)) { > + if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, > + max_page_shift)) { > *mask |= PGTBL_PUD_MODIFIED; > continue; > } > > - if (vmap_pmd_range(pud, addr, next, phys_addr, prot, mask)) > + if (vmap_pmd_range(pud, addr, next, phys_addr, prot, > + max_page_shift, mask)) > return -ENOMEM; > } while (pud++, phys_addr += (next - addr), addr = next, addr != end); > return 0; > } > > static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, > - phys_addr_t phys_addr, pgprot_t prot) > + phys_addr_t phys_addr, pgprot_t prot, > + unsigned int max_page_shift) > { > - if (!ioremap_p4d_enabled()) > + if (max_page_shift < P4D_SHIFT) > + return 0; > + > + if (!arch_vmap_p4d_supported(prot)) > return 0; > > if ((end - addr) != P4D_SIZE) > @@ -194,7 +176,7 @@ static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, > > static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, > phys_addr_t phys_addr, pgprot_t prot, > - pgtbl_mod_mask *mask) > + unsigned int max_page_shift, pgtbl_mod_mask *mask) > { > p4d_t *p4d; > unsigned long next; > @@ -205,19 +187,22 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, > do { > next = p4d_addr_end(addr, end); > > - if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) { > + if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, > + max_page_shift)) { > *mask |= PGTBL_P4D_MODIFIED; > continue; > } > > - if (vmap_pud_range(p4d, addr, next, phys_addr, prot, mask)) > + if (vmap_pud_range(p4d, addr, next, phys_addr, prot, > + max_page_shift, mask)) > return -ENOMEM; > } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); > return 0; > } > > static int vmap_range(unsigned long addr, unsigned long end, > - phys_addr_t phys_addr, pgprot_t prot) > + phys_addr_t phys_addr, pgprot_t prot, > + unsigned int max_page_shift) > { > pgd_t *pgd; > unsigned long start; > @@ -232,7 +217,8 @@ static int vmap_range(unsigned long addr, unsigned long end, > pgd = pgd_offset_k(addr); > do { > next = pgd_addr_end(addr, end); > - err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, &mask); > + err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, > + max_page_shift, &mask); > if (err) > break; > } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); > @@ -248,7 +234,7 @@ static int vmap_range(unsigned long addr, unsigned long end, > int ioremap_page_range(unsigned long addr, > unsigned long end, phys_addr_t phys_addr, pgprot_t prot) > { > - return vmap_range(addr, end, phys_addr, prot); > + return vmap_range(addr, end, phys_addr, prot, iomap_max_page_shift); > } > > #ifdef CONFIG_GENERIC_IOREMAP >
Hi Nicholas, I love your patch! Yet something to improve: [auto build test ERROR on powerpc/next] [also build test ERROR on arm64/for-next/core v5.11-rc5 next-20210125] [cannot apply to hnaz-linux-mm/master] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Nicholas-Piggin/huge-vmalloc-mappings/20210126-143141 base: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next config: x86_64-randconfig-a002-20210126 (attached as .config) compiler: clang version 12.0.0 (https://github.com/llvm/llvm-project 925ae8c790c7e354f12ec14a6cac6aa49fc75b29) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # install x86_64 cross compiling tool for clang build # apt-get install binutils-x86-64-linux-gnu # https://github.com/0day-ci/linux/commit/e43d3c665212ea34b790ab8d150bbde9d42e35b8 git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Nicholas-Piggin/huge-vmalloc-mappings/20210126-143141 git checkout e43d3c665212ea34b790ab8d150bbde9d42e35b8 # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> All errors (new ones prefixed by >>): >> mm/debug_vm_pgtable.c:221:7: error: implicit declaration of function 'arch_ioremap_pmd_supported' [-Werror,-Wimplicit-function-declaration] if (!arch_ioremap_pmd_supported()) ^ mm/debug_vm_pgtable.c:221:7: note: did you mean 'arch_vmap_pmd_supported'? arch/x86/include/asm/vmalloc.h:10:6: note: 'arch_vmap_pmd_supported' declared here bool arch_vmap_pmd_supported(pgprot_t prot); ^ >> mm/debug_vm_pgtable.c:343:7: error: implicit declaration of function 'arch_ioremap_pud_supported' [-Werror,-Wimplicit-function-declaration] if (!arch_ioremap_pud_supported()) ^ mm/debug_vm_pgtable.c:343:7: note: did you mean 'arch_vmap_pud_supported'? arch/x86/include/asm/vmalloc.h:9:6: note: 'arch_vmap_pud_supported' declared here bool arch_vmap_pud_supported(pgprot_t prot); ^ 2 errors generated. vim +/arch_ioremap_pmd_supported +221 mm/debug_vm_pgtable.c a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 215 85a144632dcc71 Aneesh Kumar K.V 2020-10-15 216 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 217 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 218 { a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 219 pmd_t pmd; a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 220 85a144632dcc71 Aneesh Kumar K.V 2020-10-15 @221 if (!arch_ioremap_pmd_supported()) a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 222 return; 6315df41afccf1 Anshuman Khandual 2020-08-06 223 6315df41afccf1 Anshuman Khandual 2020-08-06 224 pr_debug("Validating PMD huge\n"); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 225 /* a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 226 * X86 defined pmd_set_huge() verifies that the given a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 227 * PMD is not a populated non-leaf entry. a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 228 */ a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 229 WRITE_ONCE(*pmdp, __pmd(0)); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 230 WARN_ON(!pmd_set_huge(pmdp, __pfn_to_phys(pfn), prot)); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 231 WARN_ON(!pmd_clear_huge(pmdp)); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 232 pmd = READ_ONCE(*pmdp); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 233 WARN_ON(!pmd_none(pmd)); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 234 } 85a144632dcc71 Aneesh Kumar K.V 2020-10-15 235 #else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ 85a144632dcc71 Aneesh Kumar K.V 2020-10-15 236 static void __init pmd_huge_tests(pmd_t *pmdp, unsigned long pfn, pgprot_t prot) { } 85a144632dcc71 Aneesh Kumar K.V 2020-10-15 237 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 238 a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 239 static void __init pmd_savedwrite_tests(unsigned long pfn, pgprot_t prot) a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 240 { a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 241 pmd_t pmd = pfn_pmd(pfn, prot); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 242 4200605b1f80e4 Aneesh Kumar K.V 2020-10-15 243 if (!IS_ENABLED(CONFIG_NUMA_BALANCING)) 4200605b1f80e4 Aneesh Kumar K.V 2020-10-15 244 return; 4200605b1f80e4 Aneesh Kumar K.V 2020-10-15 245 6315df41afccf1 Anshuman Khandual 2020-08-06 246 pr_debug("Validating PMD saved write\n"); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 247 WARN_ON(!pmd_savedwrite(pmd_mk_savedwrite(pmd_clear_savedwrite(pmd)))); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 248 WARN_ON(pmd_savedwrite(pmd_clear_savedwrite(pmd_mk_savedwrite(pmd)))); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 249 } a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 250 399145f9eb6c67 Anshuman Khandual 2020-06-04 251 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD 399145f9eb6c67 Anshuman Khandual 2020-06-04 252 static void __init pud_basic_tests(unsigned long pfn, pgprot_t prot) 399145f9eb6c67 Anshuman Khandual 2020-06-04 253 { 399145f9eb6c67 Anshuman Khandual 2020-06-04 254 pud_t pud = pfn_pud(pfn, prot); 399145f9eb6c67 Anshuman Khandual 2020-06-04 255 787d563b8642f3 Aneesh Kumar K.V 2020-06-10 256 if (!has_transparent_hugepage()) 787d563b8642f3 Aneesh Kumar K.V 2020-06-10 257 return; 787d563b8642f3 Aneesh Kumar K.V 2020-06-10 258 6315df41afccf1 Anshuman Khandual 2020-08-06 259 pr_debug("Validating PUD basic\n"); 399145f9eb6c67 Anshuman Khandual 2020-06-04 260 WARN_ON(!pud_same(pud, pud)); 399145f9eb6c67 Anshuman Khandual 2020-06-04 261 WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud)))); 399145f9eb6c67 Anshuman Khandual 2020-06-04 262 WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud)))); 399145f9eb6c67 Anshuman Khandual 2020-06-04 263 WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud)))); 399145f9eb6c67 Anshuman Khandual 2020-06-04 264 WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud)))); 399145f9eb6c67 Anshuman Khandual 2020-06-04 265 399145f9eb6c67 Anshuman Khandual 2020-06-04 266 if (mm_pmd_folded(mm)) 399145f9eb6c67 Anshuman Khandual 2020-06-04 267 return; 399145f9eb6c67 Anshuman Khandual 2020-06-04 268 399145f9eb6c67 Anshuman Khandual 2020-06-04 269 /* 399145f9eb6c67 Anshuman Khandual 2020-06-04 270 * A huge page does not point to next level page table 399145f9eb6c67 Anshuman Khandual 2020-06-04 271 * entry. Hence this must qualify as pud_bad(). 399145f9eb6c67 Anshuman Khandual 2020-06-04 272 */ 399145f9eb6c67 Anshuman Khandual 2020-06-04 273 WARN_ON(!pud_bad(pud_mkhuge(pud))); 399145f9eb6c67 Anshuman Khandual 2020-06-04 274 } a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 275 a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 276 static void __init pud_advanced_tests(struct mm_struct *mm, a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 277 struct vm_area_struct *vma, pud_t *pudp, a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 278 unsigned long pfn, unsigned long vaddr, a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 279 pgprot_t prot) a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 280 { a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 281 pud_t pud = pfn_pud(pfn, prot); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 282 a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 283 if (!has_transparent_hugepage()) a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 284 return; a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 285 6315df41afccf1 Anshuman Khandual 2020-08-06 286 pr_debug("Validating PUD advanced\n"); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 287 /* Align the address wrt HPAGE_PUD_SIZE */ a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 288 vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE; a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 289 a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 290 set_pud_at(mm, vaddr, pudp, pud); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 291 pudp_set_wrprotect(mm, vaddr, pudp); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 292 pud = READ_ONCE(*pudp); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 293 WARN_ON(pud_write(pud)); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 294 a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 295 #ifndef __PAGETABLE_PMD_FOLDED a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 296 pudp_huge_get_and_clear(mm, vaddr, pudp); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 297 pud = READ_ONCE(*pudp); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 298 WARN_ON(!pud_none(pud)); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 299 #endif /* __PAGETABLE_PMD_FOLDED */ a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 300 pud = pfn_pud(pfn, prot); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 301 pud = pud_wrprotect(pud); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 302 pud = pud_mkclean(pud); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 303 set_pud_at(mm, vaddr, pudp, pud); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 304 pud = pud_mkwrite(pud); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 305 pud = pud_mkdirty(pud); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 306 pudp_set_access_flags(vma, vaddr, pudp, pud, 1); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 307 pud = READ_ONCE(*pudp); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 308 WARN_ON(!(pud_write(pud) && pud_dirty(pud))); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 309 c3824e18d3f394 Aneesh Kumar K.V 2020-10-15 310 #ifndef __PAGETABLE_PMD_FOLDED c3824e18d3f394 Aneesh Kumar K.V 2020-10-15 311 pudp_huge_get_and_clear_full(mm, vaddr, pudp, 1); c3824e18d3f394 Aneesh Kumar K.V 2020-10-15 312 pud = READ_ONCE(*pudp); c3824e18d3f394 Aneesh Kumar K.V 2020-10-15 313 WARN_ON(!pud_none(pud)); c3824e18d3f394 Aneesh Kumar K.V 2020-10-15 314 #endif /* __PAGETABLE_PMD_FOLDED */ c3824e18d3f394 Aneesh Kumar K.V 2020-10-15 315 c3824e18d3f394 Aneesh Kumar K.V 2020-10-15 316 pud = pfn_pud(pfn, prot); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 317 pud = pud_mkyoung(pud); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 318 set_pud_at(mm, vaddr, pudp, pud); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 319 pudp_test_and_clear_young(vma, vaddr, pudp); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 320 pud = READ_ONCE(*pudp); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 321 WARN_ON(pud_young(pud)); 13af0506303323 Aneesh Kumar K.V 2020-10-15 322 13af0506303323 Aneesh Kumar K.V 2020-10-15 323 pudp_huge_get_and_clear(mm, vaddr, pudp); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 324 } a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 325 a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 326 static void __init pud_leaf_tests(unsigned long pfn, pgprot_t prot) a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 327 { a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 328 pud_t pud = pfn_pud(pfn, prot); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 329 6315df41afccf1 Anshuman Khandual 2020-08-06 330 pr_debug("Validating PUD leaf\n"); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 331 /* a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 332 * PUD based THP is a leaf entry. a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 333 */ a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 334 pud = pud_mkhuge(pud); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 335 WARN_ON(!pud_leaf(pud)); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 336 } a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 337 85a144632dcc71 Aneesh Kumar K.V 2020-10-15 338 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 339 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 340 { a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 341 pud_t pud; a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 342 85a144632dcc71 Aneesh Kumar K.V 2020-10-15 @343 if (!arch_ioremap_pud_supported()) a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 344 return; 6315df41afccf1 Anshuman Khandual 2020-08-06 345 6315df41afccf1 Anshuman Khandual 2020-08-06 346 pr_debug("Validating PUD huge\n"); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 347 /* a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 348 * X86 defined pud_set_huge() verifies that the given a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 349 * PUD is not a populated non-leaf entry. a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 350 */ a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 351 WRITE_ONCE(*pudp, __pud(0)); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 352 WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot)); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 353 WARN_ON(!pud_clear_huge(pudp)); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 354 pud = READ_ONCE(*pudp); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 355 WARN_ON(!pud_none(pud)); a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 356 } 85a144632dcc71 Aneesh Kumar K.V 2020-10-15 357 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 85a144632dcc71 Aneesh Kumar K.V 2020-10-15 358 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { } 85a144632dcc71 Aneesh Kumar K.V 2020-10-15 359 #endif /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 85a144632dcc71 Aneesh Kumar K.V 2020-10-15 360 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
Hi Nicholas,
I love your patch! Yet something to improve:
[auto build test ERROR on powerpc/next]
[also build test ERROR on arm64/for-next/core v5.11-rc5 next-20210125]
[cannot apply to hnaz-linux-mm/master]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]
url: https://github.com/0day-ci/linux/commits/Nicholas-Piggin/huge-vmalloc-mappings/20210126-143141
base: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git next
config: x86_64-allyesconfig (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce (this is a W=1 build):
# https://github.com/0day-ci/linux/commit/e43d3c665212ea34b790ab8d150bbde9d42e35b8
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Nicholas-Piggin/huge-vmalloc-mappings/20210126-143141
git checkout e43d3c665212ea34b790ab8d150bbde9d42e35b8
# save the attached .config to linux build tree
make W=1 ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All errors (new ones prefixed by >>):
mm/debug_vm_pgtable.c: In function 'pmd_huge_tests':
mm/debug_vm_pgtable.c:221:7: error: implicit declaration of function 'arch_ioremap_pmd_supported'; did you mean 'arch_vmap_pmd_supported'? [-Werror=implicit-function-declaration]
221 | if (!arch_ioremap_pmd_supported())
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
| arch_vmap_pmd_supported
mm/debug_vm_pgtable.c: In function 'pud_huge_tests':
>> mm/debug_vm_pgtable.c:343:7: error: implicit declaration of function 'arch_ioremap_pud_supported'; did you mean 'arch_vmap_pud_supported'? [-Werror=implicit-function-declaration]
343 | if (!arch_ioremap_pud_supported())
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
| arch_vmap_pud_supported
cc1: some warnings being treated as errors
vim +343 mm/debug_vm_pgtable.c
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 337
85a144632dcc71 Aneesh Kumar K.V 2020-10-15 338 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 339 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot)
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 340 {
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 341 pud_t pud;
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 342
85a144632dcc71 Aneesh Kumar K.V 2020-10-15 @343 if (!arch_ioremap_pud_supported())
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 344 return;
6315df41afccf1 Anshuman Khandual 2020-08-06 345
6315df41afccf1 Anshuman Khandual 2020-08-06 346 pr_debug("Validating PUD huge\n");
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 347 /*
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 348 * X86 defined pud_set_huge() verifies that the given
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 349 * PUD is not a populated non-leaf entry.
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 350 */
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 351 WRITE_ONCE(*pudp, __pud(0));
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 352 WARN_ON(!pud_set_huge(pudp, __pfn_to_phys(pfn), prot));
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 353 WARN_ON(!pud_clear_huge(pudp));
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 354 pud = READ_ONCE(*pudp);
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 355 WARN_ON(!pud_none(pud));
a5c3b9ffb0f404 Anshuman Khandual 2020-08-06 356 }
85a144632dcc71 Aneesh Kumar K.V 2020-10-15 357 #else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
85a144632dcc71 Aneesh Kumar K.V 2020-10-15 358 static void __init pud_huge_tests(pud_t *pudp, unsigned long pfn, pgprot_t prot) { }
85a144632dcc71 Aneesh Kumar K.V 2020-10-15 359 #endif /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
85a144632dcc71 Aneesh Kumar K.V 2020-10-15 360
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h index 2ca708ab9b20..597b40405319 100644 --- a/arch/arm64/include/asm/vmalloc.h +++ b/arch/arm64/include/asm/vmalloc.h @@ -1,4 +1,12 @@ #ifndef _ASM_ARM64_VMALLOC_H #define _ASM_ARM64_VMALLOC_H +#include <asm/page.h> + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot); +bool arch_vmap_pud_supported(pgprot_t prot); +bool arch_vmap_pmd_supported(pgprot_t prot); +#endif + #endif /* _ASM_ARM64_VMALLOC_H */ diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index ae0c3d023824..1613d290cbd1 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1313,12 +1313,12 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) return dt_virt; } -int __init arch_ioremap_p4d_supported(void) +bool arch_vmap_p4d_supported(pgprot_t prot) { - return 0; + return false; } -int __init arch_ioremap_pud_supported(void) +bool arch_vmap_pud_supported(pgprot_t prot) { /* * Only 4k granule supports level 1 block mappings. @@ -1328,9 +1328,9 @@ int __init arch_ioremap_pud_supported(void) !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); } -int __init arch_ioremap_pmd_supported(void) +bool arch_vmap_pmd_supported(pgprot_t prot) { - /* See arch_ioremap_pud_supported() */ + /* See arch_vmap_pud_supported() */ return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); } diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h index b992dfaaa161..105abb73f075 100644 --- a/arch/powerpc/include/asm/vmalloc.h +++ b/arch/powerpc/include/asm/vmalloc.h @@ -1,4 +1,12 @@ #ifndef _ASM_POWERPC_VMALLOC_H #define _ASM_POWERPC_VMALLOC_H +#include <asm/page.h> + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot); +bool arch_vmap_pud_supported(pgprot_t prot); +bool arch_vmap_pmd_supported(pgprot_t prot); +#endif + #endif /* _ASM_POWERPC_VMALLOC_H */ diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 98f0b243c1ab..743807fc210f 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -1082,13 +1082,13 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, set_pte_at(mm, addr, ptep, pte); } -int __init arch_ioremap_pud_supported(void) +bool arch_vmap_pud_supported(pgprot_t prot) { /* HPT does not cope with large pages in the vmalloc area */ return radix_enabled(); } -int __init arch_ioremap_pmd_supported(void) +bool arch_vmap_pmd_supported(pgprot_t prot) { return radix_enabled(); } @@ -1182,7 +1182,7 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) return 1; } -int __init arch_ioremap_p4d_supported(void) +bool arch_vmap_p4d_supported(pgprot_t prot) { - return 0; + return false; } diff --git a/arch/x86/include/asm/vmalloc.h b/arch/x86/include/asm/vmalloc.h index 29837740b520..094ea2b565f3 100644 --- a/arch/x86/include/asm/vmalloc.h +++ b/arch/x86/include/asm/vmalloc.h @@ -1,6 +1,13 @@ #ifndef _ASM_X86_VMALLOC_H #define _ASM_X86_VMALLOC_H +#include <asm/page.h> #include <asm/pgtable_areas.h> +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot); +bool arch_vmap_pud_supported(pgprot_t prot); +bool arch_vmap_pmd_supported(pgprot_t prot); +#endif + #endif /* _ASM_X86_VMALLOC_H */ diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 9e5ccc56f8e0..fbaf0c447986 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -481,24 +481,26 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap); -int __init arch_ioremap_p4d_supported(void) +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP +bool arch_vmap_p4d_supported(pgprot_t prot) { - return 0; + return false; } -int __init arch_ioremap_pud_supported(void) +bool arch_vmap_pud_supported(pgprot_t prot) { #ifdef CONFIG_X86_64 return boot_cpu_has(X86_FEATURE_GBPAGES); #else - return 0; + return false; #endif } -int __init arch_ioremap_pmd_supported(void) +bool arch_vmap_pmd_supported(pgprot_t prot) { return boot_cpu_has(X86_FEATURE_PSE); } +#endif /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem diff --git a/include/linux/io.h b/include/linux/io.h index 8394c56babc2..f1effd4d7a3c 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -31,15 +31,6 @@ static inline int ioremap_page_range(unsigned long addr, unsigned long end, } #endif -#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -void __init ioremap_huge_init(void); -int arch_ioremap_p4d_supported(void); -int arch_ioremap_pud_supported(void); -int arch_ioremap_pmd_supported(void); -#else -static inline void ioremap_huge_init(void) { } -#endif - /* * Managed iomap interface */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 80c0181c411d..00bd62bd701e 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -83,6 +83,12 @@ struct vmap_area { }; }; +#ifndef CONFIG_HAVE_ARCH_HUGE_VMAP +static inline bool arch_vmap_p4d_supported(pgprot_t prot) { return false; } +static inline bool arch_vmap_pud_supported(pgprot_t prot) { return false; } +static inline bool arch_vmap_pmd_supported(pgprot_t prot) { return false; } +#endif + /* * Highlevel APIs for driver use */ diff --git a/init/main.c b/init/main.c index c68d784376ca..bf9389e5b2e4 100644 --- a/init/main.c +++ b/init/main.c @@ -834,7 +834,6 @@ static void __init mm_init(void) pgtable_init(); debug_objects_mem_init(); vmalloc_init(); - ioremap_huge_init(); /* Should be run before the first non-init thread is created */ init_espfix_bsp(); /* Should be run after espfix64 is set up. */ diff --git a/mm/ioremap.c b/mm/ioremap.c index 3f4d36f9745a..3264d0203785 100644 --- a/mm/ioremap.c +++ b/mm/ioremap.c @@ -16,49 +16,16 @@ #include "pgalloc-track.h" #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP -static int __read_mostly ioremap_p4d_capable; -static int __read_mostly ioremap_pud_capable; -static int __read_mostly ioremap_pmd_capable; -static int __read_mostly ioremap_huge_disabled; +static bool __ro_after_init iomap_max_page_shift = PAGE_SHIFT; static int __init set_nohugeiomap(char *str) { - ioremap_huge_disabled = 1; + iomap_max_page_shift = P4D_SHIFT; return 0; } early_param("nohugeiomap", set_nohugeiomap); - -void __init ioremap_huge_init(void) -{ - if (!ioremap_huge_disabled) { - if (arch_ioremap_p4d_supported()) - ioremap_p4d_capable = 1; - if (arch_ioremap_pud_supported()) - ioremap_pud_capable = 1; - if (arch_ioremap_pmd_supported()) - ioremap_pmd_capable = 1; - } -} - -static inline int ioremap_p4d_enabled(void) -{ - return ioremap_p4d_capable; -} - -static inline int ioremap_pud_enabled(void) -{ - return ioremap_pud_capable; -} - -static inline int ioremap_pmd_enabled(void) -{ - return ioremap_pmd_capable; -} - -#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ -static inline int ioremap_p4d_enabled(void) { return 0; } -static inline int ioremap_pud_enabled(void) { return 0; } -static inline int ioremap_pmd_enabled(void) { return 0; } +#else /* CONFIG_HAVE_ARCH_HUGE_VMAP */ +static const bool iomap_max_page_shift = PAGE_SHIFT; #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, @@ -82,9 +49,13 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, } static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { - if (!ioremap_pmd_enabled()) + if (max_page_shift < PMD_SHIFT) + return 0; + + if (!arch_vmap_pmd_supported(prot)) return 0; if ((end - addr) != PMD_SIZE) @@ -104,7 +75,7 @@ static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end, static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, - pgtbl_mod_mask *mask) + unsigned int max_page_shift, pgtbl_mod_mask *mask) { pmd_t *pmd; unsigned long next; @@ -115,7 +86,8 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, do { next = pmd_addr_end(addr, end); - if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot)) { + if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot, + max_page_shift)) { *mask |= PGTBL_PMD_MODIFIED; continue; } @@ -127,9 +99,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, } static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { - if (!ioremap_pud_enabled()) + if (max_page_shift < PUD_SHIFT) + return 0; + + if (!arch_vmap_pud_supported(prot)) return 0; if ((end - addr) != PUD_SIZE) @@ -149,7 +125,7 @@ static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end, static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, - pgtbl_mod_mask *mask) + unsigned int max_page_shift, pgtbl_mod_mask *mask) { pud_t *pud; unsigned long next; @@ -160,21 +136,27 @@ static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end, do { next = pud_addr_end(addr, end); - if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot)) { + if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot, + max_page_shift)) { *mask |= PGTBL_PUD_MODIFIED; continue; } - if (vmap_pmd_range(pud, addr, next, phys_addr, prot, mask)) + if (vmap_pmd_range(pud, addr, next, phys_addr, prot, + max_page_shift, mask)) return -ENOMEM; } while (pud++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { - if (!ioremap_p4d_enabled()) + if (max_page_shift < P4D_SHIFT) + return 0; + + if (!arch_vmap_p4d_supported(prot)) return 0; if ((end - addr) != P4D_SIZE) @@ -194,7 +176,7 @@ static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end, static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot, - pgtbl_mod_mask *mask) + unsigned int max_page_shift, pgtbl_mod_mask *mask) { p4d_t *p4d; unsigned long next; @@ -205,19 +187,22 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, do { next = p4d_addr_end(addr, end); - if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) { + if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot, + max_page_shift)) { *mask |= PGTBL_P4D_MODIFIED; continue; } - if (vmap_pud_range(p4d, addr, next, phys_addr, prot, mask)) + if (vmap_pud_range(p4d, addr, next, phys_addr, prot, + max_page_shift, mask)) return -ENOMEM; } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); return 0; } static int vmap_range(unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot) + phys_addr_t phys_addr, pgprot_t prot, + unsigned int max_page_shift) { pgd_t *pgd; unsigned long start; @@ -232,7 +217,8 @@ static int vmap_range(unsigned long addr, unsigned long end, pgd = pgd_offset_k(addr); do { next = pgd_addr_end(addr, end); - err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, &mask); + err = vmap_p4d_range(pgd, addr, next, phys_addr, prot, + max_page_shift, &mask); if (err) break; } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); @@ -248,7 +234,7 @@ static int vmap_range(unsigned long addr, unsigned long end, int ioremap_page_range(unsigned long addr, unsigned long end, phys_addr_t phys_addr, pgprot_t prot) { - return vmap_range(addr, end, phys_addr, prot); + return vmap_range(addr, end, phys_addr, prot, iomap_max_page_shift); } #ifdef CONFIG_GENERIC_IOREMAP