@@ -829,17 +829,42 @@ void __init paging_init(void)
#ifdef CONFIG_SPARSEMEM_VMEMMAP
#define PAGE_UNUSED 0xFD
+/*
+ * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges
+ * from unused_pmd_start to next PMD_SIZE boundary.
+ */
+static unsigned long unused_pmd_start __meminitdata;
+
+static void __meminit vmemmap_flush_unused_pmd(void)
+{
+ if (!unused_pmd_start)
+ return;
+ /*
+ * Clears (unused_pmd_start, PMD_END]
+ */
+ memset((void *)unused_pmd_start, PAGE_UNUSED,
+ ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start);
+ unused_pmd_start = 0;
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
/* Returns true if the PMD is completely unused and thus it can be freed */
static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end)
{
unsigned long start = ALIGN_DOWN(addr, PMD_SIZE);
+ /*
+ * Flush the unused range cache to ensure that memchr_inv() will work
+ * for the whole range.
+ */
+ vmemmap_flush_unused_pmd();
memset((void *)addr, PAGE_UNUSED, end - addr);
return !memchr_inv((void *)start, PAGE_UNUSED, PMD_SIZE);
}
+#endif
-static void __meminit vmemmap_use_sub_pmd(unsigned long start)
+static void __meminit __vmemmap_use_sub_pmd(unsigned long start)
{
/*
* As we expect to add in the same granularity as we remove, it's
@@ -851,13 +876,38 @@ static void __meminit vmemmap_use_sub_pmd(unsigned long start)
memset((void *)start, 0, sizeof(struct page));
}
+static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
+{
+ /*
+ * We only optimize if the new used range directly follows the
+ * previously unused range (esp., when populating consecutive sections).
+ */
+ if (unused_pmd_start == start) {
+ if (likely(IS_ALIGNED(end, PMD_SIZE)))
+ unused_pmd_start = 0;
+ else
+ unused_pmd_start = end;
+ return;
+ }
+
+ /*
+ * If the range does not contiguously follows previous one, make sure
+ * to mark the unused range of the previous one so it can be removed.
+ */
+ vmemmap_flush_unused_pmd();
+ __vmemmap_use_sub_pmd(start);
+}
+
+
static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
{
+ vmemmap_flush_unused_pmd();
+
/*
* Could be our memmap page is filled with PAGE_UNUSED already from a
* previous remove. Make sure to reset it.
*/
- vmemmap_use_sub_pmd(start);
+ __vmemmap_use_sub_pmd(start);
/*
* Mark with PAGE_UNUSED the unused parts of the new memmap range
@@ -865,9 +915,14 @@ static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long
if (!IS_ALIGNED(start, PMD_SIZE))
memset((void *)start, PAGE_UNUSED,
start - ALIGN_DOWN(start, PMD_SIZE));
+
+ /*
+ * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
+ * consecutive sections. Remember for the last added PMD where the
+ * unused range begins.
+ */
if (!IS_ALIGNED(end, PMD_SIZE))
- memset((void *)end, PAGE_UNUSED,
- ALIGN(end, PMD_SIZE) - end);
+ unused_pmd_start = end;
}
#endif
@@ -1537,7 +1592,7 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start,
return -ENOMEM; /* no fallback */
} else if (pmd_large(*pmd)) {
vmemmap_verify((pte_t *)pmd, node, addr, next);
- vmemmap_use_sub_pmd(addr);
+ vmemmap_use_sub_pmd(addr, next);
continue;
}
if (vmemmap_populate_basepages(addr, next, node, NULL))