@@ -85,7 +85,8 @@ static void __init reserve_crashkernel(void)
if (crash_base == 0) {
/* Current arm64 boot protocol requires 2MB alignment */
- crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
+ crash_base = memblock_find_in_range(0,
+ arm64_dma_phys_limit ? : arm64_dma32_phys_limit,
crash_size, SZ_2M);
if (crash_base == 0) {
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
@@ -443,7 +444,7 @@ void __init bootmem_init(void)
*/
reserve_crashkernel();
- dma_contiguous_reserve(arm64_dma32_phys_limit);
+ dma_contiguous_reserve(arm64_dma_phys_limit ? : arm64_dma32_phys_limit);
memblock_dump_all();
}
Currently, CMA and crashkernel are reserved in ZONE_DMA32, which is OK for majority of devices. But the ones that need them in ZONE_DMA need to configure it explicitly. Since patchset "arm64: Default to 32-bit wide ZONE_DMA", ZONE_DMA's size is fine-tuned. So we could directly reserve CMA and crashkernel in ZONE_DMA if CONFIG_ZONE_DMA is enabled, otherwise, reserving in ZONE_DMA32. Signed-off-by: Chen Zhou <chenzhou10@huawei.com> Suggested-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de> --- arch/arm64/mm/init.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)