@@ -502,6 +502,8 @@ static void __init fdt_reserve_elfcorehdr(void)
}
memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
+ memblock_memsize_record("elfcorehdr", elfcorehdr_addr, elfcorehdr_size,
+ false, false);
pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
elfcorehdr_size >> 10, elfcorehdr_addr);
@@ -531,6 +533,7 @@ void __init early_init_fdt_scan_reserved_mem(void)
if (!size)
break;
memblock_reserve(base, size);
+ memblock_memsize_record("memreserve", base, size, false, false);
}
fdt_init_reserved_mem();
@@ -438,9 +438,10 @@ void __init fdt_init_reserved_mem(void)
struct reserved_mem *rmem = &reserved_mem[i];
unsigned long node = rmem->fdt_node;
int err = 0;
- bool nomap;
+ bool nomap, reusable;
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+ reusable = of_get_flat_dt_prop(node, "reusable", NULL) != NULL;
if (rmem->size == 0)
err = __reserved_mem_alloc_size(node, rmem->name,
@@ -457,14 +458,16 @@ void __init fdt_init_reserved_mem(void)
rmem->size);
} else {
phys_addr_t end = rmem->base + rmem->size - 1;
- bool reusable =
- (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
&rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
nomap ? "nomap" : "map",
reusable ? "reusable" : "non-reusable",
rmem->name ? rmem->name : "unknown");
+
+ memblock_memsize_record(rmem->name, rmem->base,
+ rmem->size, nomap,
+ reusable);
}
}
}
@@ -613,5 +613,14 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end) { }
static inline void memtest_report_meminfo(struct seq_file *m) { }
#endif
+#ifdef CONFIG_MEMBLOCK_MEMSIZE
+extern void memblock_memsize_record(const char *name, phys_addr_t base,
+ phys_addr_t size, bool nomap,
+ bool reusable);
+#else
+static inline void memblock_memsize_record(const char *name, phys_addr_t base,
+ phys_addr_t size, bool nomap,
+ bool reusable) { }
+#endif
#endif /* _LINUX_MEMBLOCK_H */
@@ -286,6 +286,8 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
dma_contiguous_early_fixup(cma_get_base(*res_cma),
cma_get_size(*res_cma));
+ memblock_memsize_record("dma_cma", cma_get_base(*res_cma),
+ cma_get_size(*res_cma), false, true);
return 0;
}
@@ -476,6 +476,22 @@ config HAVE_GUP_FAST
depends on MMU
bool
+config MAX_MEMBLOCK_MEMSIZE
+ int "Maximum number of tracking regions"
+ depends on MEMBLOCK_MEMSIZE
+ default 100
+ range 0 200
+ help
+ This number sets maximum number of tracking regions. If this is set to
+ 0, nothing will be saved.
+
+config MEMBLOCK_MEMSIZE
+ bool "memblock based reserved memory profiling"
+ default n
+ help
+ This patch introduce a node, memblock/memsize, to see reserved memory
+ easily.
+
# Don't discard allocated memory used to track "memory" and "reserved" memblocks
# after early boot, so it can still be used to test for validity of memory.
# Also, memblocks are updated with memory hot(un)plug.
@@ -19,6 +19,7 @@
#include <asm/sections.h>
#include <linux/io.h>
+#include <linux/sort.h>
#include "internal.h"
@@ -2025,6 +2026,66 @@ static int __init early_memblock(char *p)
}
early_param("memblock", early_memblock);
+#ifdef CONFIG_MEMBLOCK_MEMSIZE
+
+#define NAME_SIZE 100
+struct memsize_rgn_struct {
+ phys_addr_t base;
+ long size;
+ bool nomap; /* 1/32 byte */
+ bool reusable; /* 1/32 byte */
+ char name[NAME_SIZE]; /* 30/32 byte */
+};
+
+static struct memsize_rgn_struct memsize_rgn[CONFIG_MAX_MEMBLOCK_MEMSIZE] __initdata_memblock;
+static int memsize_rgn_count __initdata_memblock;
+
+static void __init_memblock memsize_get_valid_name(char *valid_name, const char *name)
+{
+ char *head, *tail, *found;
+ int val_size;
+
+ head = (char *)name;
+ tail = head + strlen(name);
+
+ /* get tail position after valid char */
+ found = strchr(name, '@');
+ if (found)
+ tail = found;
+
+ val_size = tail - head;
+ if (val_size > NAME_SIZE - 1)
+ val_size = NAME_SIZE - 1;
+ strscpy(valid_name, head, val_size);
+ valid_name[val_size] = '\0';
+}
+
+void __init_memblock memblock_memsize_record(const char *name, phys_addr_t base,
+ phys_addr_t size, bool nomap, bool reusable)
+{
+ struct memsize_rgn_struct *rgn;
+ phys_addr_t end;
+
+ if (memsize_rgn_count == CONFIG_MAX_MEMBLOCK_MEMSIZE) {
+ pr_err("not enough space on memsize_rgn\n");
+ return;
+ }
+ rgn = &memsize_rgn[memsize_rgn_count++];
+ rgn->base = base;
+ rgn->size = size;
+ rgn->nomap = nomap;
+ rgn->reusable = reusable;
+
+ if (!name)
+ strscpy(rgn->name, "unknown", sizeof(rgn->name));
+ else
+ memsize_get_valid_name(rgn->name, name);
+ end = base + size - 1;
+ memblock_dbg("%s %pa..%pa nomap:%d reusable:%d\n",
+ __func__, &base, &end, nomap, reusable);
+}
+#endif /* MEMBLOCK_MEMSIZE */
+
static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{
struct page *start_pg, *end_pg;
@@ -2289,6 +2350,61 @@ static int memblock_debug_show(struct seq_file *m, void *private)
}
DEFINE_SHOW_ATTRIBUTE(memblock_debug);
+#ifdef CONFIG_MEMBLOCK_MEMSIZE
+
+static int memsize_rgn_cmp(const void *a, const void *b)
+{
+ const struct memsize_rgn_struct *ra = a, *rb = b;
+
+ if (ra->base > rb->base)
+ return -1;
+
+ if (ra->base < rb->base)
+ return 1;
+
+ return 0;
+}
+
+static int memblock_memsize_show(struct seq_file *m, void *private)
+{
+ int i;
+ struct memsize_rgn_struct *rgn;
+ unsigned long reserved = 0, reusable = 0;
+
+ sort(memsize_rgn, memsize_rgn_count,
+ sizeof(memsize_rgn[0]), memsize_rgn_cmp, NULL);
+ for (i = 0; i < memsize_rgn_count; i++) {
+ phys_addr_t base, end;
+ long size;
+
+ rgn = &memsize_rgn[i];
+ base = rgn->base;
+ size = rgn->size;
+ end = base + size;
+
+ seq_printf(m, "0x%pK-0x%pK 0x%08lx ( %7lu KB ) %s %s %s\n",
+ (void *)base, (void *)end,
+ size, DIV_ROUND_UP(size, SZ_1K),
+ rgn->nomap ? "nomap" : " map",
+ rgn->reusable ? "reusable" : "unusable",
+ rgn->name);
+ if (rgn->reusable)
+ reusable += (unsigned long)rgn->size;
+ else
+ reserved += (unsigned long)rgn->size;
+ }
+
+ seq_puts(m, "\n");
+ seq_printf(m, " .unusable : %7lu KB\n",
+ DIV_ROUND_UP(reserved, SZ_1K));
+ seq_printf(m, " .reusable : %7lu KB\n",
+ DIV_ROUND_UP(reusable, SZ_1K));
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(memblock_memsize);
+#endif
+
static int __init memblock_init_debugfs(void)
{
struct dentry *root = debugfs_create_dir("memblock", NULL);
@@ -2301,6 +2417,10 @@ static int __init memblock_init_debugfs(void)
debugfs_create_file("physmem", 0444, root, &physmem,
&memblock_debug_fops);
#endif
+#ifdef CONFIG_MEMBLOCK_MEMSIZE
+ debugfs_create_file("memsize", 0444, root,
+ NULL, &memblock_memsize_fops);
+#endif
return 0;
}
Some of memory regions can be reserved for a specific purpose. They are usually defined through reserved-memory in device tree. If only size without address is specified in device tree, the address of the region will be determined at boot time. We may find the address of the memory regions through booting log, but it does not show all. And it could be hard to catch the very beginning log. The memblock_dump_all shows all memblock status but it does not show region name and its information is difficult to summarize. This patch introduce a debugfs node, memblock/memsize, to see reserved memory easily. The first patch here will show the only reserved-memory in device tree like following example. The next patches will show more information. There is a case in which the reserved memory region name has @ staring string at the end. That information is not actually needed. Let's remove those string. $ cat debugfs/memblock/memsize 0x0f9000000-0x0fb000000 0x02000000 ( 32768 KB ) map reusable linux,cma 0x0b1900000-0x0b1b00000 0x00200000 ( 2048 KB ) nomap unusable test1 0x0b0200000-0x0b0400000 0x00200000 ( 2048 KB ) map unusable test2 .unusable : 4096 KB .reusable : 32768 KB Signed-off-by: Jaewon Kim <jaewon31.kim@samsung.com> --- drivers/of/fdt.c | 3 + drivers/of/of_reserved_mem.c | 9 ++- include/linux/memblock.h | 9 +++ kernel/dma/contiguous.c | 2 + mm/Kconfig | 16 +++++ mm/memblock.c | 120 +++++++++++++++++++++++++++++++++++ 6 files changed, 156 insertions(+), 3 deletions(-)