@@ -529,7 +529,7 @@ static void * __init pcpu_fc_alloc(unsig
static void __init pcpu_fc_free(void *ptr, size_t size)
{
- memblock_free_early(__pa(ptr), size);
+ memblock_free(__pa(ptr), size);
}
void __init setup_per_cpu_areas(void)
@@ -56,8 +56,7 @@ void __init svm_swiotlb_init(void)
return;
- memblock_free_early(__pa(vstart),
- PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
+ memblock_free(__pa(vstart), PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
panic("SVM: Cannot allocate SWIOTLB buffer");
}
@@ -880,7 +880,7 @@ void __init smp_detect_cpus(void)
/* Add CPUs present at boot */
__smp_rescan_cpus(info, true);
- memblock_free_early((unsigned long)info, sizeof(*info));
+ memblock_free((unsigned long)info, sizeof(*info));
}
/*
@@ -166,7 +166,7 @@ static void * __init pcpu_fc_alloc(unsig
static void __init pcpu_fc_free(void *ptr, size_t size)
{
- memblock_free_early(__pa(ptr), size);
+ memblock_free(__pa(ptr), size);
}
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
@@ -139,7 +139,7 @@ int __init sclp_early_get_core_info(stru
}
sclp_fill_core_info(info, sccb);
out:
- memblock_free_early((unsigned long)sccb, length);
+ memblock_free((unsigned long)sccb, length);
return rc;
}
@@ -441,18 +441,6 @@ static inline void *memblock_alloc_node(
MEMBLOCK_ALLOC_ACCESSIBLE, nid);
}
-static inline void memblock_free_early(phys_addr_t base,
- phys_addr_t size)
-{
- memblock_free(base, size);
-}
-
-static inline void memblock_free_early_nid(phys_addr_t base,
- phys_addr_t size, int nid)
-{
- memblock_free(base, size);
-}
-
static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
{
__memblock_free_late(base, size);
@@ -247,7 +247,7 @@ swiotlb_init(int verbose)
return;
fail_free_mem:
- memblock_free_early(__pa(tlb), bytes);
+ memblock_free(__pa(tlb), bytes);
fail:
pr_warn("Cannot allocate buffer");
}
@@ -188,7 +188,7 @@ EXPORT_SYMBOL(free_cpumask_var);
*/
void __init free_bootmem_cpumask_var(cpumask_var_t mask)
{
- memblock_free_early(__pa(mask), cpumask_size());
+ memblock_free(__pa(mask), cpumask_size());
}
#endif
@@ -2472,7 +2472,7 @@ struct pcpu_alloc_info * __init pcpu_all
*/
void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
{
- memblock_free_early(__pa(ai), ai->__ai_size);
+ memblock_free(__pa(ai), ai->__ai_size);
}
/**
@@ -3134,7 +3134,7 @@ out_free_areas:
out_free:
pcpu_free_alloc_info(ai);
if (areas)
- memblock_free_early(__pa(areas), areas_size);
+ memblock_free(__pa(areas), areas_size);
return rc;
}
#endif /* BUILD_EMBED_FIRST_CHUNK */
@@ -3256,7 +3256,7 @@ enomem:
free_fn(page_address(pages[j]), PAGE_SIZE);
rc = -ENOMEM;
out_free_ar:
- memblock_free_early(__pa(pages), pages_size);
+ memblock_free(__pa(pages), pages_size);
pcpu_free_alloc_info(ai);
return rc;
}
@@ -3286,7 +3286,7 @@ static void * __init pcpu_dfl_fc_alloc(u
static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
{
- memblock_free_early(__pa(ptr), size);
+ memblock_free(__pa(ptr), size);
}
void __init setup_per_cpu_areas(void)
@@ -451,7 +451,7 @@ static void *sparsemap_buf_end __meminit
static inline void __meminit sparse_buffer_free(unsigned long size)
{
WARN_ON(!sparsemap_buf || size == 0);
- memblock_free_early(__pa(sparsemap_buf), size);
+ memblock_free(__pa(sparsemap_buf), size);
}
static void __init sparse_buffer_init(unsigned long size, int nid)