Message ID | 1549984572-10867-1-git-send-email-rppt@linux.ibm.com (mailing list archive) |
---|---|
State | Accepted, archived |
Headers | show |
Series | [v2] parisc: use memblock_alloc() instead of custom get_memblock() | expand |
Any comments on this? On Tue, Feb 12, 2019 at 05:16:12PM +0200, Mike Rapoport wrote: > The get_memblock() function implements custom bottom-up memblock allocator. > Setting 'memblock_bottom_up = true' before any memblock allocation is done > allows replacing get_memblock() calls with memblock_alloc(). > > Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> > --- > v2: fix allocation alignment > > arch/parisc/mm/init.c | 52 +++++++++++++++++++-------------------------------- > 1 file changed, 19 insertions(+), 33 deletions(-) > > diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c > index 059187a..d0b1662 100644 > --- a/arch/parisc/mm/init.c > +++ b/arch/parisc/mm/init.c > @@ -79,36 +79,6 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; > physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; > int npmem_ranges __read_mostly; > > -/* > - * get_memblock() allocates pages via memblock. > - * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it > - * doesn't allocate from bottom to top which is needed because we only created > - * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code. > - */ > -static void * __init get_memblock(unsigned long size) > -{ > - static phys_addr_t search_addr __initdata; > - phys_addr_t phys; > - > - if (!search_addr) > - search_addr = PAGE_ALIGN(__pa((unsigned long) &_end)); > - search_addr = ALIGN(search_addr, size); > - while (!memblock_is_region_memory(search_addr, size) || > - memblock_is_region_reserved(search_addr, size)) { > - search_addr += size; > - } > - phys = search_addr; > - > - if (phys) > - memblock_reserve(phys, size); > - else > - panic("get_memblock() failed.\n"); > - > - memset(__va(phys), 0, size); > - > - return __va(phys); > -} > - > #ifdef CONFIG_64BIT > #define MAX_MEM (~0UL) > #else /* !CONFIG_64BIT */ > @@ -321,6 +291,13 @@ static void __init setup_bootmem(void) > max_pfn = start_pfn + npages; > } > > + /* > + * We can't use memblock top-down allocations because we only > + * created the initial mapping up to KERNEL_INITIAL_SIZE in > + * the assembly bootup code. > + */ > + memblock_set_bottom_up(true); > + > /* IOMMU is always used to access "high mem" on those boxes > * that can support enough mem that a PCI device couldn't > * directly DMA to any physical addresses. > @@ -442,7 +419,10 @@ static void __init map_pages(unsigned long start_vaddr, > */ > > if (!pmd) { > - pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER); > + pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER, > + PAGE_SIZE << PMD_ORDER); > + if (!pmd) > + panic("pmd allocation failed.\n"); > pmd = (pmd_t *) __pa(pmd); > } > > @@ -461,7 +441,10 @@ static void __init map_pages(unsigned long start_vaddr, > > pg_table = (pte_t *)pmd_address(*pmd); > if (!pg_table) { > - pg_table = (pte_t *) get_memblock(PAGE_SIZE); > + pg_table = memblock_alloc(PAGE_SIZE, > + PAGE_SIZE); > + if (!pg_table) > + panic("page table allocation failed\n"); > pg_table = (pte_t *) __pa(pg_table); > } > > @@ -700,7 +683,10 @@ static void __init pagetable_init(void) > } > #endif > > - empty_zero_page = get_memblock(PAGE_SIZE); > + empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); > + if (!empty_zero_page) > + panic("zero page allocation failed.\n"); > + > } > > static void __init gateway_init(void) > -- > 2.7.4 >
On 21.02.19 10:07, Mike Rapoport wrote: > On Tue, Feb 12, 2019 at 05:16:12PM +0200, Mike Rapoport wrote: >> The get_memblock() function implements custom bottom-up memblock allocator. >> Setting 'memblock_bottom_up = true' before any memblock allocation is done >> allows replacing get_memblock() calls with memblock_alloc(). >> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Acked-by: Helge Deller <deller@gmx.de> Tested-by: Helge Deller <deller@gmx.de> Thanks! Shall I push the patch upstream with the parisc tree? Helge >> --- >> v2: fix allocation alignment >> >> arch/parisc/mm/init.c | 52 +++++++++++++++++++-------------------------------- >> 1 file changed, 19 insertions(+), 33 deletions(-) >> >> diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c >> index 059187a..d0b1662 100644 >> --- a/arch/parisc/mm/init.c >> +++ b/arch/parisc/mm/init.c >> @@ -79,36 +79,6 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; >> physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; >> int npmem_ranges __read_mostly; >> >> -/* >> - * get_memblock() allocates pages via memblock. >> - * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it >> - * doesn't allocate from bottom to top which is needed because we only created >> - * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code. >> - */ >> -static void * __init get_memblock(unsigned long size) >> -{ >> - static phys_addr_t search_addr __initdata; >> - phys_addr_t phys; >> - >> - if (!search_addr) >> - search_addr = PAGE_ALIGN(__pa((unsigned long) &_end)); >> - search_addr = ALIGN(search_addr, size); >> - while (!memblock_is_region_memory(search_addr, size) || >> - memblock_is_region_reserved(search_addr, size)) { >> - search_addr += size; >> - } >> - phys = search_addr; >> - >> - if (phys) >> - memblock_reserve(phys, size); >> - else >> - panic("get_memblock() failed.\n"); >> - >> - memset(__va(phys), 0, size); >> - >> - return __va(phys); >> -} >> - >> #ifdef CONFIG_64BIT >> #define MAX_MEM (~0UL) >> #else /* !CONFIG_64BIT */ >> @@ -321,6 +291,13 @@ static void __init setup_bootmem(void) >> max_pfn = start_pfn + npages; >> } >> >> + /* >> + * We can't use memblock top-down allocations because we only >> + * created the initial mapping up to KERNEL_INITIAL_SIZE in >> + * the assembly bootup code. >> + */ >> + memblock_set_bottom_up(true); >> + >> /* IOMMU is always used to access "high mem" on those boxes >> * that can support enough mem that a PCI device couldn't >> * directly DMA to any physical addresses. >> @@ -442,7 +419,10 @@ static void __init map_pages(unsigned long start_vaddr, >> */ >> >> if (!pmd) { >> - pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER); >> + pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER, >> + PAGE_SIZE << PMD_ORDER); >> + if (!pmd) >> + panic("pmd allocation failed.\n"); >> pmd = (pmd_t *) __pa(pmd); >> } >> >> @@ -461,7 +441,10 @@ static void __init map_pages(unsigned long start_vaddr, >> >> pg_table = (pte_t *)pmd_address(*pmd); >> if (!pg_table) { >> - pg_table = (pte_t *) get_memblock(PAGE_SIZE); >> + pg_table = memblock_alloc(PAGE_SIZE, >> + PAGE_SIZE); >> + if (!pg_table) >> + panic("page table allocation failed\n"); >> pg_table = (pte_t *) __pa(pg_table); >> } >> >> @@ -700,7 +683,10 @@ static void __init pagetable_init(void) >> } >> #endif >> >> - empty_zero_page = get_memblock(PAGE_SIZE); >> + empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); >> + if (!empty_zero_page) >> + panic("zero page allocation failed.\n"); >> + >> } >> >> static void __init gateway_init(void) >> -- >> 2.7.4 >> >
On Thu, Feb 21, 2019 at 11:00:05AM +0100, Helge Deller wrote: > On 21.02.19 10:07, Mike Rapoport wrote: > > On Tue, Feb 12, 2019 at 05:16:12PM +0200, Mike Rapoport wrote: > >> The get_memblock() function implements custom bottom-up memblock allocator. > >> Setting 'memblock_bottom_up = true' before any memblock allocation is done > >> allows replacing get_memblock() calls with memblock_alloc(). > > >> Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> > > Acked-by: Helge Deller <deller@gmx.de> > Tested-by: Helge Deller <deller@gmx.de> > > Thanks! > Shall I push the patch upstream with the parisc tree? Yes, please. > Helge > > > > >> --- > >> v2: fix allocation alignment > >> > >> arch/parisc/mm/init.c | 52 +++++++++++++++++++-------------------------------- > >> 1 file changed, 19 insertions(+), 33 deletions(-) > >> > >> diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c > >> index 059187a..d0b1662 100644 > >> --- a/arch/parisc/mm/init.c > >> +++ b/arch/parisc/mm/init.c > >> @@ -79,36 +79,6 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; > >> physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; > >> int npmem_ranges __read_mostly; > >> > >> -/* > >> - * get_memblock() allocates pages via memblock. > >> - * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it > >> - * doesn't allocate from bottom to top which is needed because we only created > >> - * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code. > >> - */ > >> -static void * __init get_memblock(unsigned long size) > >> -{ > >> - static phys_addr_t search_addr __initdata; > >> - phys_addr_t phys; > >> - > >> - if (!search_addr) > >> - search_addr = PAGE_ALIGN(__pa((unsigned long) &_end)); > >> - search_addr = ALIGN(search_addr, size); > >> - while (!memblock_is_region_memory(search_addr, size) || > >> - memblock_is_region_reserved(search_addr, size)) { > >> - search_addr += size; > >> - } > >> - phys = search_addr; > >> - > >> - if (phys) > >> - memblock_reserve(phys, size); > >> - else > >> - panic("get_memblock() failed.\n"); > >> - > >> - memset(__va(phys), 0, size); > >> - > >> - return __va(phys); > >> -} > >> - > >> #ifdef CONFIG_64BIT > >> #define MAX_MEM (~0UL) > >> #else /* !CONFIG_64BIT */ > >> @@ -321,6 +291,13 @@ static void __init setup_bootmem(void) > >> max_pfn = start_pfn + npages; > >> } > >> > >> + /* > >> + * We can't use memblock top-down allocations because we only > >> + * created the initial mapping up to KERNEL_INITIAL_SIZE in > >> + * the assembly bootup code. > >> + */ > >> + memblock_set_bottom_up(true); > >> + > >> /* IOMMU is always used to access "high mem" on those boxes > >> * that can support enough mem that a PCI device couldn't > >> * directly DMA to any physical addresses. > >> @@ -442,7 +419,10 @@ static void __init map_pages(unsigned long start_vaddr, > >> */ > >> > >> if (!pmd) { > >> - pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER); > >> + pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER, > >> + PAGE_SIZE << PMD_ORDER); > >> + if (!pmd) > >> + panic("pmd allocation failed.\n"); > >> pmd = (pmd_t *) __pa(pmd); > >> } > >> > >> @@ -461,7 +441,10 @@ static void __init map_pages(unsigned long start_vaddr, > >> > >> pg_table = (pte_t *)pmd_address(*pmd); > >> if (!pg_table) { > >> - pg_table = (pte_t *) get_memblock(PAGE_SIZE); > >> + pg_table = memblock_alloc(PAGE_SIZE, > >> + PAGE_SIZE); > >> + if (!pg_table) > >> + panic("page table allocation failed\n"); > >> pg_table = (pte_t *) __pa(pg_table); > >> } > >> > >> @@ -700,7 +683,10 @@ static void __init pagetable_init(void) > >> } > >> #endif > >> > >> - empty_zero_page = get_memblock(PAGE_SIZE); > >> + empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); > >> + if (!empty_zero_page) > >> + panic("zero page allocation failed.\n"); > >> + > >> } > >> > >> static void __init gateway_init(void) > >> -- > >> 2.7.4 > >> > > >
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 059187a..d0b1662 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -79,36 +79,6 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; int npmem_ranges __read_mostly; -/* - * get_memblock() allocates pages via memblock. - * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it - * doesn't allocate from bottom to top which is needed because we only created - * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code. - */ -static void * __init get_memblock(unsigned long size) -{ - static phys_addr_t search_addr __initdata; - phys_addr_t phys; - - if (!search_addr) - search_addr = PAGE_ALIGN(__pa((unsigned long) &_end)); - search_addr = ALIGN(search_addr, size); - while (!memblock_is_region_memory(search_addr, size) || - memblock_is_region_reserved(search_addr, size)) { - search_addr += size; - } - phys = search_addr; - - if (phys) - memblock_reserve(phys, size); - else - panic("get_memblock() failed.\n"); - - memset(__va(phys), 0, size); - - return __va(phys); -} - #ifdef CONFIG_64BIT #define MAX_MEM (~0UL) #else /* !CONFIG_64BIT */ @@ -321,6 +291,13 @@ static void __init setup_bootmem(void) max_pfn = start_pfn + npages; } + /* + * We can't use memblock top-down allocations because we only + * created the initial mapping up to KERNEL_INITIAL_SIZE in + * the assembly bootup code. + */ + memblock_set_bottom_up(true); + /* IOMMU is always used to access "high mem" on those boxes * that can support enough mem that a PCI device couldn't * directly DMA to any physical addresses. @@ -442,7 +419,10 @@ static void __init map_pages(unsigned long start_vaddr, */ if (!pmd) { - pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER); + pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER, + PAGE_SIZE << PMD_ORDER); + if (!pmd) + panic("pmd allocation failed.\n"); pmd = (pmd_t *) __pa(pmd); } @@ -461,7 +441,10 @@ static void __init map_pages(unsigned long start_vaddr, pg_table = (pte_t *)pmd_address(*pmd); if (!pg_table) { - pg_table = (pte_t *) get_memblock(PAGE_SIZE); + pg_table = memblock_alloc(PAGE_SIZE, + PAGE_SIZE); + if (!pg_table) + panic("page table allocation failed\n"); pg_table = (pte_t *) __pa(pg_table); } @@ -700,7 +683,10 @@ static void __init pagetable_init(void) } #endif - empty_zero_page = get_memblock(PAGE_SIZE); + empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + if (!empty_zero_page) + panic("zero page allocation failed.\n"); + } static void __init gateway_init(void)
The get_memblock() function implements custom bottom-up memblock allocator. Setting 'memblock_bottom_up = true' before any memblock allocation is done allows replacing get_memblock() calls with memblock_alloc(). Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> --- v2: fix allocation alignment arch/parisc/mm/init.c | 52 +++++++++++++++++++-------------------------------- 1 file changed, 19 insertions(+), 33 deletions(-)