@@ -640,6 +640,8 @@ void __init early_init_fdt_scan_reserved_mem(void)
if (!initial_boot_params)
return;
+ memblock_memsize_disable_tracking();
+
/* Process header /memreserve/ fields */
for (n = 0; ; n++) {
fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
@@ -652,6 +654,7 @@ void __init early_init_fdt_scan_reserved_mem(void)
fdt_scan_reserved_mem();
fdt_reserve_elfcorehdr();
fdt_init_reserved_mem();
+ memblock_memsize_enable_tracking();
}
/**
@@ -1289,12 +1292,15 @@ void __init early_init_dt_scan_nodes(void)
if (rc)
pr_warn("No chosen node found, continuing without\n");
+ memblock_memsize_disable_tracking();
+
/* Setup memory, calling early_init_dt_add_memory_arch */
early_init_dt_scan_memory();
/* Handle linux,usable-memory-range property */
early_init_dt_check_for_usable_mem_range();
+ memblock_memsize_enable_tracking();
memblock_memsize_detect_hole();
}
@@ -611,6 +611,9 @@ extern void memblock_memsize_record(const char *name, phys_addr_t base,
extern void memblock_memsize_detect_hole(void);
extern void memblock_memsize_set_name(const char *name);
extern void memblock_memsize_unset_name(void);
+extern void memblock_memsize_enable_tracking(void);
+extern void memblock_memsize_disable_tracking(void);
+extern void memblock_memsize_mod_kernel_size(long size);
#else
static inline void memblock_memsize_record(const char *name, phys_addr_t base,
phys_addr_t size, bool nomap,
@@ -618,6 +621,9 @@ static inline void memblock_memsize_record(const char *name, phys_addr_t base,
static inline void memblock_memsize_detect_hole(void) { }
static inline void memblock_memsize_set_name(const char *name) { }
static inline void memblock_memsize_unset_name(void) { }
+static inline void memblock_memsize_enable_tracking(void){ }
+static inline void memblock_memsize_disable_tracking(void){ }
+static inline void memblock_memsize_mod_kernel_size(long size) { }
#endif
#endif /* _LINUX_MEMBLOCK_H */
@@ -230,10 +230,11 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
{
int ret;
+ memblock_memsize_disable_tracking();
ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
"reserved", res_cma);
if (ret)
- return ret;
+ goto out;
/* Architecture specific contiguous memory fixup. */
dma_contiguous_early_fixup(cma_get_base(*res_cma),
@@ -241,7 +242,9 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
memblock_memsize_record("dma_cma", cma_get_base(*res_cma),
cma_get_size(*res_cma), false, true);
- return 0;
+out:
+ memblock_memsize_enable_tracking();
+ return ret;
}
/**
@@ -1965,6 +1965,23 @@ struct memsize_rgn_struct {
#define MAX_MEMSIZE_RGN 64
static struct memsize_rgn_struct memsize_rgn[MAX_MEMSIZE_RGN] __initdata_memblock;
static int memsize_rgn_count __initdata_memblock;
+static long kernel_init_size;
+static bool do_memsize __initdata_memblock = true;
+
+void __init memblock_memsize_enable_tracking(void)
+{
+ do_memsize = true;
+}
+
+void __init memblock_memsize_disable_tracking(void)
+{
+ do_memsize = false;
+}
+
+void memblock_memsize_mod_kernel_size(long size)
+{
+ kernel_init_size += size;
+}
static inline struct memsize_rgn_struct * __init_memblock memsize_get_new_rgn(void)
{
@@ -2170,6 +2187,12 @@ static void __init_memblock memblock_memsize_record_add(struct memblock_type *ty
base, size, false, false);
else if (type == &memblock.memory)
memblock_memsize_free(base, size);
+ } else if (do_memsize) {
+ if (type == &memblock.reserved) {
+ memblock_dbg("%s: kernel %lu %+ld\n", __func__,
+ kernel_init_size, (unsigned long)size);
+ kernel_init_size += size;
+ }
}
}
@@ -2182,6 +2205,12 @@ static void __init_memblock memblock_memsize_record_remove(struct memblock_type
else if (type == &memblock.memory)
memblock_memsize_record(memblock_memsize_name,
base, size, true, false);
+ } else if (do_memsize) {
+ if (type == &memblock.reserved) {
+ memblock_dbg("%s: kernel %lu %+ld\n", __func__,
+ kernel_init_size, (unsigned long)size);
+ kernel_init_size -= size;
+ }
}
}
#endif /* MEMBLOCK_MEMSIZE */
@@ -2289,6 +2318,19 @@ static unsigned long __init __free_memory_core(phys_addr_t start,
unsigned long end_pfn = min_t(unsigned long,
PFN_DOWN(end), max_low_pfn);
+#ifdef CONFIG_MEMBLOCK_MEMSIZE
+ unsigned long start_align_up = PFN_ALIGN(start);
+ unsigned long end_align_down = PFN_PHYS(end_pfn);
+
+ if (start_pfn >= end_pfn) {
+ memblock_memsize_mod_kernel_size(end - start);
+ } else {
+ if (start_align_up > start)
+ memblock_memsize_mod_kernel_size(start_align_up - start);
+ if (end_pfn != max_low_pfn && end_align_down < end)
+ memblock_memsize_mod_kernel_size(end - end_align_down);
+ }
+#endif
if (start_pfn >= end_pfn)
return 0;
@@ -2374,6 +2416,8 @@ void __init memblock_free_all(void)
pages = free_low_memory_core_early();
totalram_pages_add(pages);
+
+ memblock_memsize_disable_tracking();
}
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
@@ -2441,6 +2485,8 @@ static int memblock_memsize_show(struct seq_file *m, void *private)
}
seq_printf(m, "\n");
+ seq_printf(m, " .kernel : %7lu KB\n",
+ DIV_ROUND_UP(kernel_init_size, SZ_1K));
seq_printf(m, " .unusable : %7lu KB\n",
DIV_ROUND_UP(reserved, SZ_1K));
seq_printf(m, " .reusable : %7lu KB\n",
@@ -8230,8 +8230,15 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char
free_reserved_page(page);
}
- if (pages && s)
+ if (pages && s) {
pr_info("Freeing %s memory: %ldK\n", s, K(pages));
+ if (!strcmp(s, "initrd") || !strcmp(s, "unused kernel")) {
+ long size;
+
+ size = -1 * (long)(pages << PAGE_SHIFT);
+ memblock_memsize_mod_kernel_size(size);
+ }
+ }
return pages;
}
Some memory regions are already being tracked by previous patches. But there are many memory allocations from memblock and frees to memblock during the boot time. This patch tracks the memblock size used for the common kernel. To to this, tracking memblock size is disabled for some memory handling logics like early param, device tree, and default cma size. For precise kernel size, this patch counts not actually freed size to buddy at boot time, and does not count freed size from ramdisk and init section. Additionally this patch does one important thing. This patch blocks memblock_add_range of memblock_remove_range not to update memsize if free pages were already released to the buddy allocator. This is an example. The kernel size is newly added by this patch. .kernel : 135137 KB .unusable : 788073 KB .reusable : 294912 KB Signed-off-by: Jaewon Kim <jaewon31.kim@samsung.com> --- drivers/of/fdt.c | 6 ++++++ include/linux/memblock.h | 6 ++++++ kernel/dma/contiguous.c | 7 ++++-- mm/memblock.c | 46 ++++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 9 +++++++- 5 files changed, 71 insertions(+), 3 deletions(-)