@@ -202,8 +202,10 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
/* altmap lookups only work at section boundaries */
altmap = to_vmem_altmap(SECTION_ALIGN_DOWN(start));
-
- p = __vmemmap_alloc_block_buf(page_size, node, altmap);
+ if (altmap)
+ p = dev_pagemap_alloc_block_buf(page_size, altmap);
+ else
+ p = vmemmap_alloc_block_buf(page_size, node);
if (!p)
return -ENOMEM;
@@ -1371,7 +1371,10 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
if (pmd_none(*pmd)) {
void *p;
- p = __vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
+ if (altmap)
+ p = dev_pagemap_alloc_block_buf(PMD_SIZE, altmap);
+ else
+ p = vmemmap_alloc_block_buf(PMD_SIZE, node);
if (p) {
pte_t entry;
@@ -2546,13 +2546,9 @@ pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
void *vmemmap_alloc_block(unsigned long size, int node);
struct vmem_altmap;
-void *__vmemmap_alloc_block_buf(unsigned long size, int node,
+void *vmemmap_alloc_block_buf(unsigned long size, int node);
+void *dev_pagemap_alloc_block_buf(unsigned long size,
struct vmem_altmap *altmap);
-static inline void *vmemmap_alloc_block_buf(unsigned long size, int node)
-{
- return __vmemmap_alloc_block_buf(size, node, NULL);
-}
-
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
int vmemmap_populate_basepages(unsigned long start, unsigned long end,
int node);
@@ -74,7 +74,7 @@ void * __meminit vmemmap_alloc_block(unsigned long size, int node)
}
/* need to make sure size is all the same during early stage */
-static void * __meminit alloc_block_buf(unsigned long size, int node)
+void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node)
{
void *ptr;
@@ -129,7 +129,7 @@ static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
return pfn + nr_align;
}
-static void * __meminit altmap_alloc_block_buf(unsigned long size,
+void * __meminit dev_pagemap_alloc_block_buf(unsigned long size,
struct vmem_altmap *altmap)
{
unsigned long pfn, nr_pfns;
@@ -153,15 +153,6 @@ static void * __meminit altmap_alloc_block_buf(unsigned long size,
return ptr;
}
-/* need to make sure size is all the same during early stage */
-void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node,
- struct vmem_altmap *altmap)
-{
- if (altmap)
- return altmap_alloc_block_buf(size, altmap);
- return alloc_block_buf(size, node);
-}
-
void __meminit vmemmap_verify(pte_t *pte, int node,
unsigned long start, unsigned long end)
{
@@ -178,7 +169,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
pte_t *pte = pte_offset_kernel(pmd, addr);
if (pte_none(*pte)) {
pte_t entry;
- void *p = alloc_block_buf(PAGE_SIZE, node);
+ void *p = vmemmap_alloc_block_buf(PAGE_SIZE, node);
if (!p)
return NULL;
entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
No functional changes, just untangling the call chain. Signed-off-by: Christoph Hellwig <hch@lst.de> --- arch/powerpc/mm/init_64.c | 6 ++++-- arch/x86/mm/init_64.c | 5 ++++- include/linux/mm.h | 8 ++------ mm/sparse-vmemmap.c | 15 +++------------ 4 files changed, 13 insertions(+), 21 deletions(-)