@@ -1194,6 +1194,8 @@ void __init early_init_dt_scan_nodes(void)
/* Handle linux,usable-memory-range property */
early_init_dt_check_for_usable_mem_range();
+
+ memblock_memsize_detect_hole();
}
bool __init early_init_dt_scan(void *params)
@@ -617,10 +617,12 @@ static inline void memtest_report_meminfo(struct seq_file *m) { }
extern void memblock_memsize_record(const char *name, phys_addr_t base,
phys_addr_t size, bool nomap,
bool reusable);
+extern void memblock_memsize_detect_hole(void);
#else
static inline void memblock_memsize_record(const char *name, phys_addr_t base,
phys_addr_t size, bool nomap,
bool reusable) { }
+static inline void memblock_memsize_detect_hole(void) { }
#endif
#endif /* _LINUX_MEMBLOCK_H */
@@ -2084,6 +2084,51 @@ void __init_memblock memblock_memsize_record(const char *name, phys_addr_t base,
memblock_dbg("%s %pa..%pa nomap:%d reusable:%d\n",
__func__, &base, &end, nomap, reusable);
}
+
+/* This function will be called to by early_init_dt_scan_nodes */
+void __init memblock_memsize_detect_hole(void)
+{
+ phys_addr_t base, end;
+ phys_addr_t prev_end, hole_sz;
+ int idx;
+ struct memblock_region *rgn;
+ int memblock_cnt = (int)memblock.memory.cnt;
+
+ /* assume that the hole size is less than 1 GB */
+ for_each_memblock_type(idx, (&memblock.memory), rgn) {
+ prev_end = (idx == 0) ? round_down(rgn->base, SZ_1G) : end;
+ base = rgn->base;
+ end = rgn->base + rgn->size;
+
+ /* only for the last region, check a hole after the region */
+ if (idx + 1 == memblock_cnt) {
+ hole_sz = round_up(end, SZ_1G) - end;
+ if (hole_sz)
+ memblock_memsize_record(NULL, end, hole_sz,
+ true, false);
+ }
+
+ /* for each region, check a hole prior to the region */
+ hole_sz = base - prev_end;
+ if (!hole_sz)
+ continue;
+ if (hole_sz < SZ_1G) {
+ memblock_memsize_record(NULL, prev_end, hole_sz, true,
+ false);
+ } else {
+ phys_addr_t hole_sz1, hole_sz2;
+
+ hole_sz1 = round_up(prev_end, SZ_1G) - prev_end;
+ if (hole_sz1)
+ memblock_memsize_record(NULL, prev_end,
+ hole_sz1, true, false);
+ hole_sz2 = base % SZ_1G;
+ if (hole_sz2)
+ memblock_memsize_record(NULL, base - hole_sz2,
+ hole_sz2, true, false);
+ }
+ }
+}
#endif /* MEMBLOCK_MEMSIZE */
static void __init free_memmap(unsigned long start_pfn, unsigned long end_pfn)
Bootloader knows the actual memory size, but bootloader may reserve some memory for a specific purpose and pass the only remaining memory region to kernel. Even though kernel does not know what it is, we need to detect those regions to sum up all reserved memory. Let me call it memory hole. To expect the hole size, this patch assume two things. One is that each physical memory has 1GB aligned size and address. And the hole is less than 1GB. For the hole, let it be shown as unknown in memsize logic. This is an example. 0x0bf000000-0x0c0000000 0x01000000 ( 16384 KB ) nomap unusable unknown Signed-off-by: Jaewon Kim <jaewon31.kim@samsung.com> --- drivers/of/fdt.c | 2 ++ include/linux/memblock.h | 2 ++ mm/memblock.c | 45 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 49 insertions(+)