From patchwork Mon Oct 19 23:38:53 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Geoff Levand X-Patchwork-Id: 7440641 Return-Path: X-Original-To: patchwork-linux-arm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 180A89F443 for ; Mon, 19 Oct 2015 23:51:36 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id D541D207D0 for ; Mon, 19 Oct 2015 23:51:30 +0000 (UTC) Received: from bombadil.infradead.org (bombadil.infradead.org [198.137.202.9]) (using TLSv1.2 with cipher AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id DB7952053E for ; Mon, 19 Oct 2015 23:50:18 +0000 (UTC) Received: from localhost ([127.0.0.1] helo=bombadil.infradead.org) by bombadil.infradead.org with esmtp (Exim 4.80.1 #2 (Red Hat Linux)) id 1ZoK3P-0007fC-GI; Mon, 19 Oct 2015 23:41:23 +0000 Received: from merlin.infradead.org ([2001:4978:20e::2]) by bombadil.infradead.org with esmtps (Exim 4.80.1 #2 (Red Hat Linux)) id 1ZoK12-0002eO-CU; Mon, 19 Oct 2015 23:38:56 +0000 Received: from geoff by merlin.infradead.org with local (Exim 4.85 #2 (Red Hat Linux)) id 1ZoK0z-0000wa-VU; Mon, 19 Oct 2015 23:38:53 +0000 Message-Id: In-Reply-To: References: From: Geoff Levand Patch-Date: Tue, 22 Sep 2015 15:36:30 -0700 Subject: [PATCH 08/16] arm64/kexec: Add core kexec support To: Catalin Marinas , Will Deacon Date: Mon, 19 Oct 2015 23:38:53 +0000 X-BeenThere: linux-arm-kernel@lists.infradead.org X-Mailman-Version: 2.1.20 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Mark Rutland , AKASHI@infradead.org, marc.zyngier@arm.com, kexec@lists.infradead.org, Takahiro , linux-arm-kernel@lists.infradead.org, christoffer.dall@linaro.org MIME-Version: 1.0 Sender: "linux-arm-kernel" Errors-To: linux-arm-kernel-bounces+patchwork-linux-arm=patchwork.kernel.org@lists.infradead.org X-Spam-Status: No, score=-4.2 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Add three new files, kexec.h, machine_kexec.c and relocate_kernel.S to the arm64 architecture that add support for the kexec re-boot mechanism (CONFIG_KEXEC) on arm64 platforms. Signed-off-by: Geoff Levand Tested-by: Pratyush Anand Tested-By: James Morse --- arch/arm64/Kconfig | 10 +++ arch/arm64/include/asm/kexec.h | 48 +++++++++++ arch/arm64/kernel/Makefile | 2 + arch/arm64/kernel/cpu-reset.S | 2 +- arch/arm64/kernel/machine_kexec.c | 141 +++++++++++++++++++++++++++++++ arch/arm64/kernel/relocate_kernel.S | 163 ++++++++++++++++++++++++++++++++++++ include/uapi/linux/kexec.h | 1 + 7 files changed, 366 insertions(+), 1 deletion(-) create mode 100644 arch/arm64/include/asm/kexec.h create mode 100644 arch/arm64/kernel/machine_kexec.c create mode 100644 arch/arm64/kernel/relocate_kernel.S diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 07d1811..73e8e31 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -491,6 +491,16 @@ config SECCOMP and the task is only allowed to execute a few safe syscalls defined by each seccomp mode. +config KEXEC + depends on (!SMP || PM_SLEEP_SMP) + select KEXEC_CORE + bool "kexec system call" + ---help--- + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + config XEN_DOM0 def_bool y depends on XEN diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h new file mode 100644 index 0000000..46d63cd --- /dev/null +++ b/arch/arm64/include/asm/kexec.h @@ -0,0 +1,48 @@ +/* + * kexec for arm64 + * + * Copyright (C) Linaro. + * Copyright (C) Huawei Futurewei Technologies. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#if !defined(_ARM64_KEXEC_H) +#define _ARM64_KEXEC_H + +/* Maximum physical address we can use pages from */ + +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) + +/* Maximum address we can reach in physical address mode */ + +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + +/* Maximum address we can use for the control code buffer */ + +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +#define KEXEC_CONTROL_PAGE_SIZE 4096 + +#define KEXEC_ARCH KEXEC_ARCH_ARM64 + +#if !defined(__ASSEMBLY__) + +/** + * crash_setup_regs() - save registers for the panic kernel + * + * @newregs: registers are saved here + * @oldregs: registers to be saved (may be %NULL) + */ + +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + /* Empty routine needed to avoid build errors. */ +} + +#endif /* !defined(__ASSEMBLY__) */ + +#endif diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 22dc9bc..989ccd7 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -36,6 +36,8 @@ arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o arm64-obj-$(CONFIG_PCI) += pci.o arm64-obj-$(CONFIG_ARMV8_DEPRECATED) += armv8_deprecated.o arm64-obj-$(CONFIG_ACPI) += acpi.o +arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \ + cpu-reset.o obj-y += $(arm64-obj-y) vdso/ obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index ffc9e385e..7cc7f56 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S @@ -3,7 +3,7 @@ * * Copyright (C) 2001 Deep Blue Solutions Ltd. * Copyright (C) 2012 ARM Ltd. - * Copyright (C) 2015 Huawei Futurewei Technologies. + * Copyright (C) Huawei Futurewei Technologies. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c new file mode 100644 index 0000000..1fae6ae --- /dev/null +++ b/arch/arm64/kernel/machine_kexec.c @@ -0,0 +1,141 @@ +/* + * kexec for arm64 + * + * Copyright (C) Linaro. + * Copyright (C) Huawei Futurewei Technologies. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include +#include + +#include "cpu-reset.h" + +/* Global variables for the relocate_kernel routine. */ +extern const unsigned char arm64_relocate_new_kernel[]; +extern const unsigned long arm64_relocate_new_kernel_size; +extern unsigned long arm64_kexec_kimage_head_offset; +extern unsigned long arm64_kexec_kimage_start_offset; + +static unsigned long kimage_head; +static unsigned long kimage_start; + +void machine_kexec_cleanup(struct kimage *image) +{ + /* Empty routine needed to avoid build errors. */ +} + +/** + * machine_kexec_prepare - Prepare for a kexec reboot. + * + * Called from the core kexec code when a kernel image is loaded. + */ +int machine_kexec_prepare(struct kimage *image) +{ + kimage_start = image->start; + return 0; +} + +/** + * kexec_list_flush - Helper to flush the kimage list to PoC. + */ +static void kexec_list_flush(unsigned long kimage_head) +{ + unsigned long *entry; + + for (entry = &kimage_head; ; entry++) { + unsigned int flag = *entry & IND_FLAGS; + void *addr = phys_to_virt(*entry & PAGE_MASK); + + switch (flag) { + case IND_INDIRECTION: + entry = (unsigned long *)addr - 1; + __flush_dcache_area(addr, PAGE_SIZE); + break; + case IND_DESTINATION: + break; + case IND_SOURCE: + __flush_dcache_area(addr, PAGE_SIZE); + break; + case IND_DONE: + return; + default: + BUG(); + } + } +} + +static void soft_restart(unsigned long addr) +{ + setup_mm_for_reboot(); + cpu_soft_restart(virt_to_phys(cpu_reset), addr, + is_hyp_mode_available()); + + BUG(); /* Should never get here. */ +} + +/** + * machine_kexec - Do the kexec reboot. + * + * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC. + */ +void machine_kexec(struct kimage *image) +{ + phys_addr_t reboot_code_buffer_phys; + void *reboot_code_buffer; + + BUG_ON(num_online_cpus() > 1); + + kimage_head = image->head; + + reboot_code_buffer_phys = page_to_phys(image->control_code_page); + reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys); + + /* + * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use + * after the kernel is shut down. + */ + memcpy(reboot_code_buffer, arm64_relocate_new_kernel, + arm64_relocate_new_kernel_size); + + /* Set the variables in reboot_code_buffer. */ + + memcpy(reboot_code_buffer + arm64_kexec_kimage_start_offset, + &kimage_start, sizeof(kimage_start)); + memcpy(reboot_code_buffer + arm64_kexec_kimage_head_offset, + &kimage_head, sizeof(kimage_head)); + + /* Flush the reboot_code_buffer in preparation for its execution. */ + __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size); + + /* Flush the kimage list. */ + kexec_list_flush(image->head); + + pr_info("Bye!\n"); + + /* Disable all DAIF exceptions. */ + asm volatile ("msr daifset, #0xf" : : : "memory"); + + /* + * soft_restart() will shutdown the MMU, disable data caches, then + * transfer control to the reboot_code_buffer which contains a copy of + * the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel + * will use physical addressing to relocate the new kernel to its final + * position and then will transfer control to the entry point of the new + * kernel. + */ + soft_restart(reboot_code_buffer_phys); +} + +void machine_crash_shutdown(struct pt_regs *regs) +{ + /* Empty routine needed to avoid build errors. */ +} diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S new file mode 100644 index 0000000..7b07a16 --- /dev/null +++ b/arch/arm64/kernel/relocate_kernel.S @@ -0,0 +1,163 @@ +/* + * kexec for arm64 + * + * Copyright (C) Linaro. + * Copyright (C) Huawei Futurewei Technologies. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#include +#include +#include +#include + + +/* + * arm64_relocate_new_kernel - Put a 2nd stage kernel image in place and boot it. + * + * The memory that the old kernel occupies may be overwritten when coping the + * new image to its final location. To assure that the + * arm64_relocate_new_kernel routine which does that copy is not overwritten, + * all code and data needed by arm64_relocate_new_kernel must be between the + * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The + * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec + * control_code_page, a special page which has been set up to be preserved + * during the copy operation. + */ +.globl arm64_relocate_new_kernel +arm64_relocate_new_kernel: + + /* Setup the list loop variables. */ + ldr x18, .Lkimage_head /* x18 = list entry */ + dcache_line_size x17, x0 /* x17 = dcache line size */ + mov x16, xzr /* x16 = segment start */ + mov x15, xzr /* x15 = entry ptr */ + mov x14, xzr /* x14 = copy dest */ + + /* Check if the new image needs relocation. */ + cbz x18, .Ldone + tbnz x18, IND_DONE_BIT, .Ldone + +.Lloop: + and x13, x18, PAGE_MASK /* x13 = addr */ + + /* Test the entry flags. */ +.Ltest_source: + tbz x18, IND_SOURCE_BIT, .Ltest_indirection + + mov x20, x14 /* x20 = copy dest */ + mov x21, x13 /* x21 = copy src */ + + /* Invalidate dest page to PoC. */ + mov x0, x20 + add x19, x0, #PAGE_SIZE + sub x1, x17, #1 + bic x0, x0, x1 +1: dc ivac, x0 + add x0, x0, x17 + cmp x0, x19 + b.lo 1b + dsb sy + + /* Copy page. */ +1: ldp x22, x23, [x21] + ldp x24, x25, [x21, #16] + ldp x26, x27, [x21, #32] + ldp x28, x29, [x21, #48] + add x21, x21, #64 + stnp x22, x23, [x20] + stnp x24, x25, [x20, #16] + stnp x26, x27, [x20, #32] + stnp x28, x29, [x20, #48] + add x20, x20, #64 + tst x21, #(PAGE_SIZE - 1) + b.ne 1b + + /* dest += PAGE_SIZE */ + add x14, x14, PAGE_SIZE + b .Lnext + +.Ltest_indirection: + tbz x18, IND_INDIRECTION_BIT, .Ltest_destination + + /* ptr = addr */ + mov x15, x13 + b .Lnext + +.Ltest_destination: + tbz x18, IND_DESTINATION_BIT, .Lnext + + mov x16, x13 + + /* dest = addr */ + mov x14, x13 + +.Lnext: + /* entry = *ptr++ */ + ldr x18, [x15], #8 + + /* while (!(entry & DONE)) */ + tbz x18, IND_DONE_BIT, .Lloop + +.Ldone: + dsb sy + isb + ic ialluis + dsb sy + isb + + /* Start new image. */ + ldr x4, .Lkimage_start + mov x0, xzr + mov x1, xzr + mov x2, xzr + mov x3, xzr + br x4 + +.align 3 /* To keep the 64-bit values below naturally aligned. */ + +/* The machine_kexec routine sets these variables via offsets from + * arm64_relocate_new_kernel. + */ + +/* + * .Lkimage_start - Copy of image->start, the entry point of the new + * image. + */ +.Lkimage_start: + .quad 0x0 + +/* + * .Lkimage_head - Copy of image->head, the list of kimage entries. + */ +.Lkimage_head: + .quad 0x0 + +.Lcopy_end: +.org KEXEC_CONTROL_PAGE_SIZE + +/* + * arm64_relocate_new_kernel_size - Number of bytes to copy to the control_code_page. + */ +.globl arm64_relocate_new_kernel_size +arm64_relocate_new_kernel_size: + .quad .Lcopy_end - arm64_relocate_new_kernel + +/* + * arm64_kexec_kimage_start_offset - Offset for writing .Lkimage_start. + */ +.globl arm64_kexec_kimage_start_offset +arm64_kexec_kimage_start_offset: + .quad .Lkimage_start - arm64_relocate_new_kernel + +/* + * arm64_kexec_kimage_head_offset - Offset for writing .Lkimage_head. + */ +.globl arm64_kexec_kimage_head_offset +arm64_kexec_kimage_head_offset: + .quad .Lkimage_head - arm64_relocate_new_kernel diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 99048e5..ccec467 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -39,6 +39,7 @@ #define KEXEC_ARCH_SH (42 << 16) #define KEXEC_ARCH_MIPS_LE (10 << 16) #define KEXEC_ARCH_MIPS ( 8 << 16) +#define KEXEC_ARCH_ARM64 (183 << 16) /* The artificial cap on the number of segments passed to kexec_load. */ #define KEXEC_SEGMENT_MAX 16