@@ -392,9 +392,9 @@ static void __init memblock_x86_reserve_range_setup_data(void)
#ifdef CONFIG_KEXEC_CORE
+#ifdef CONFIG_X86_64
static int __init reserve_crashkernel_low(void)
{
-#ifdef CONFIG_X86_64
unsigned long long base, low_base = 0, low_size = 0;
unsigned long low_mem_limit;
int ret;
@@ -434,9 +434,10 @@ static int __init reserve_crashkernel_low(void)
crashk_low_res.start = low_base;
crashk_low_res.end = low_base + low_size - 1;
-#endif
+
return 0;
}
+#endif
static void __init reserve_crashkernel(void)
{
@@ -490,10 +491,12 @@ static void __init reserve_crashkernel(void)
}
}
+#ifdef CONFIG_X86_64
if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
memblock_phys_free(crash_base, crash_size);
return;
}
+#endif
pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
(unsigned long)(crash_size >> 20),
Currently, only X86_64 requires that at least 256M low memory be reserved. X86_32 does not have this requirement. So move all the code related to reserve_crashkernel_low() into macro CONFIG_X86_64. Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com> --- arch/x86/kernel/setup.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-)