Message ID | 1549979990-6642-1-git-send-email-rppt@linux.ibm.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | parisc: use memblock_alloc() instead of custom get_memblock() | expand |
On Tue, Feb 12, 2019 at 03:59:50PM +0200, Mike Rapoport wrote: > -static void * __init get_memblock(unsigned long size) > -{ > - static phys_addr_t search_addr __initdata; > - phys_addr_t phys; > - > - if (!search_addr) > - search_addr = PAGE_ALIGN(__pa((unsigned long) &_end)); > - search_addr = ALIGN(search_addr, size); > - while (!memblock_is_region_memory(search_addr, size) || > - memblock_is_region_reserved(search_addr, size)) { > - search_addr += size; > - } > - phys = search_addr; This implies to me that the allocation will be 'size' aligned. > if (!pmd) { > - pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER); > + pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER, > + SMP_CACHE_BYTES); So why would this only need to be cacheline aligned? It's pretty common for hardware to require that pgd/pud/pmd/pte tables be naturally aligned. > @@ -700,7 +683,10 @@ static void __init pagetable_init(void) > } > #endif > > - empty_zero_page = get_memblock(PAGE_SIZE); > + empty_zero_page = memblock_alloc(PAGE_SIZE, SMP_CACHE_BYTES); ... and surely the zero page also needs to be page aligned, by definition.
On Tue, Feb 12, 2019 at 06:14:18AM -0800, Matthew Wilcox wrote: > On Tue, Feb 12, 2019 at 03:59:50PM +0200, Mike Rapoport wrote: > > -static void * __init get_memblock(unsigned long size) > > -{ > > - static phys_addr_t search_addr __initdata; > > - phys_addr_t phys; > > - > > - if (!search_addr) > > - search_addr = PAGE_ALIGN(__pa((unsigned long) &_end)); > > - search_addr = ALIGN(search_addr, size); > > - while (!memblock_is_region_memory(search_addr, size) || > > - memblock_is_region_reserved(search_addr, size)) { > > - search_addr += size; > > - } > > - phys = search_addr; > > This implies to me that the allocation will be 'size' aligned. > > > if (!pmd) { > > - pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER); > > + pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER, > > + SMP_CACHE_BYTES); > > So why would this only need to be cacheline aligned? It's pretty common > for hardware to require that pgd/pud/pmd/pte tables be naturally aligned. > > > @@ -700,7 +683,10 @@ static void __init pagetable_init(void) > > } > > #endif > > > > - empty_zero_page = get_memblock(PAGE_SIZE); > > + empty_zero_page = memblock_alloc(PAGE_SIZE, SMP_CACHE_BYTES); > > ... and surely the zero page also needs to be page aligned, by definition. Right, I've completely missed the alignment. Will fix.
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 059187a..38b928e 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -79,36 +79,6 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; int npmem_ranges __read_mostly; -/* - * get_memblock() allocates pages via memblock. - * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it - * doesn't allocate from bottom to top which is needed because we only created - * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code. - */ -static void * __init get_memblock(unsigned long size) -{ - static phys_addr_t search_addr __initdata; - phys_addr_t phys; - - if (!search_addr) - search_addr = PAGE_ALIGN(__pa((unsigned long) &_end)); - search_addr = ALIGN(search_addr, size); - while (!memblock_is_region_memory(search_addr, size) || - memblock_is_region_reserved(search_addr, size)) { - search_addr += size; - } - phys = search_addr; - - if (phys) - memblock_reserve(phys, size); - else - panic("get_memblock() failed.\n"); - - memset(__va(phys), 0, size); - - return __va(phys); -} - #ifdef CONFIG_64BIT #define MAX_MEM (~0UL) #else /* !CONFIG_64BIT */ @@ -321,6 +291,13 @@ static void __init setup_bootmem(void) max_pfn = start_pfn + npages; } + /* + * We can't use memblock top-down allocations because we only + * created the initial mapping up to KERNEL_INITIAL_SIZE in + * the assembly bootup code. + */ + memblock_set_bottom_up(true); + /* IOMMU is always used to access "high mem" on those boxes * that can support enough mem that a PCI device couldn't * directly DMA to any physical addresses. @@ -442,7 +419,10 @@ static void __init map_pages(unsigned long start_vaddr, */ if (!pmd) { - pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER); + pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER, + SMP_CACHE_BYTES); + if (!pmd) + panic("pmd allocation failed.\n"); pmd = (pmd_t *) __pa(pmd); } @@ -461,7 +441,10 @@ static void __init map_pages(unsigned long start_vaddr, pg_table = (pte_t *)pmd_address(*pmd); if (!pg_table) { - pg_table = (pte_t *) get_memblock(PAGE_SIZE); + pg_table = memblock_alloc(PAGE_SIZE, + SMP_CACHE_BYTES); + if (!pg_table) + panic("page table allocation failed\n"); pg_table = (pte_t *) __pa(pg_table); } @@ -700,7 +683,10 @@ static void __init pagetable_init(void) } #endif - empty_zero_page = get_memblock(PAGE_SIZE); + empty_zero_page = memblock_alloc(PAGE_SIZE, SMP_CACHE_BYTES); + if (!empty_zero_page) + panic("zero page allocation failed.\n"); + } static void __init gateway_init(void)
The get_memblock() function implements custom bottom-up memblock allocator. Setting 'memblock_bottom_up = true' before any memblock allocation is done allows replacing get_memblock() calls with memblock_alloc(). Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> --- arch/parisc/mm/init.c | 52 +++++++++++++++++++-------------------------------- 1 file changed, 19 insertions(+), 33 deletions(-)