@@ -502,21 +502,6 @@ void __init mark_linear_text_alias_ro(void)
PAGE_KERNEL_RO);
}
-static bool crash_mem_map __initdata;
-
-static int __init enable_crash_mem_map(char *arg)
-{
- /*
- * Proper parameter parsing is done by reserve_crashkernel(). We only
- * need to know if the linear map has to avoid block mappings so that
- * the crashkernel reservations can be unmapped later.
- */
- crash_mem_map = true;
-
- return 0;
-}
-early_param("crashkernel", enable_crash_mem_map);
-
static void __init map_mem(pgd_t *pgdp)
{
static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
@@ -547,11 +532,9 @@ static void __init map_mem(pgd_t *pgdp)
memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
#ifdef CONFIG_KEXEC_CORE
- if (crash_mem_map && !have_zone_dma()) {
- if (crashk_res.end)
- memblock_mark_nomap(crashk_res.start,
- resource_size(&crashk_res));
- }
+ if (crashk_res.end)
+ memblock_mark_nomap(crashk_res.start,
+ resource_size(&crashk_res));
#endif
/* map all the memory banks */
@@ -582,20 +565,17 @@ static void __init map_mem(pgd_t *pgdp)
memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
/*
- * Use page-level mappings here so that we can shrink the region
- * in page granularity and put back unused memory to buddy system
- * through /sys/kernel/kexec_crash_size interface.
+ * Use page-level mappings here so that we can protect crash kernel
+ * memory to allow post-mortem analysis when things go awry.
*/
#ifdef CONFIG_KEXEC_CORE
- if (crash_mem_map && !have_zone_dma()) {
- if (crashk_res.end) {
- __map_memblock(pgdp, crashk_res.start,
- crashk_res.end + 1,
- PAGE_KERNEL,
- NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
- memblock_clear_nomap(crashk_res.start,
- resource_size(&crashk_res));
- }
+ if (crashk_res.end) {
+ __map_memblock(pgdp, crashk_res.start,
+ crashk_res.end + 1,
+ PAGE_KERNEL,
+ NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
+ memblock_clear_nomap(crashk_res.start,
+ resource_size(&crashk_res));
}
#endif
}