From patchwork Tue Sep 30 23:59:25 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Geoff Levand X-Patchwork-Id: 5008951 Return-Path: X-Original-To: patchwork-linux-arm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.19.201]) by patchwork1.web.kernel.org (Postfix) with ESMTP id EDDCB9F32B for ; Wed, 1 Oct 2014 00:02:21 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 7DC652016C for ; Wed, 1 Oct 2014 00:02:20 +0000 (UTC) Received: from bombadil.infradead.org (bombadil.infradead.org [198.137.202.9]) (using TLSv1.2 with cipher DHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 0813120165 for ; Wed, 1 Oct 2014 00:02:19 +0000 (UTC) Received: from localhost ([127.0.0.1] helo=bombadil.infradead.org) by bombadil.infradead.org with esmtp (Exim 4.80.1 #2 (Red Hat Linux)) id 1XZ7KO-0005yS-9X; Tue, 30 Sep 2014 23:59:32 +0000 Received: from 107-1-141-74-ip-static.hfc.comcastbusiness.net ([107.1.141.74] helo=[192.168.254.170]) by bombadil.infradead.org with esmtpsa (Exim 4.80.1 #2 (Red Hat Linux)) id 1XZ7KK-0005xd-Hm; Tue, 30 Sep 2014 23:59:28 +0000 Message-ID: <1412121565.6630.64.camel@smoke> Subject: [PATCH V2 6/7] arm64/kexec: Add core kexec support From: Geoff Levand To: Catalin Marinas Date: Tue, 30 Sep 2014 16:59:25 -0700 In-Reply-To: <471af24b7de659a30a5941e793634bffec380cb9.1411604443.git.geoff@infradead.org> References: <471af24b7de659a30a5941e793634bffec380cb9.1411604443.git.geoff@infradead.org> X-Mailer: Evolution 3.10.4-0ubuntu2 Mime-Version: 1.0 Cc: marc.zyngier@arm.com, Will Deacon , kexec@lists.infradead.org, Vivek Goyal , christoffer.dall@linaro.org, linux-arm-kernel@lists.infradead.org X-BeenThere: linux-arm-kernel@lists.infradead.org X-Mailman-Version: 2.1.18-1 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: "linux-arm-kernel" Errors-To: linux-arm-kernel-bounces+patchwork-linux-arm=patchwork.kernel.org@lists.infradead.org X-Spam-Status: No, score=-2.6 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_NONE, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Add three new files, kexec.h, machine_kexec.c and relocate_kernel.S to the arm64 architecture that add support for the kexec re-boot mechanism (CONFIG_KEXEC) on arm64 platforms. Signed-off-by: Geoff Levand --- V2: o Add comment about soft_restart(). o Fold kexec_list_walk() and flush callback into kexec_list_flush(). o Add arm64_ prefix to global variables. arch/arm64/Kconfig | 9 ++ arch/arm64/include/asm/kexec.h | 47 +++++++++ arch/arm64/kernel/Makefile | 1 + arch/arm64/kernel/machine_kexec.c | 171 +++++++++++++++++++++++++++++++++ arch/arm64/kernel/relocate_kernel.S | 184 ++++++++++++++++++++++++++++++++++++ include/uapi/linux/kexec.h | 1 + 6 files changed, 413 insertions(+) create mode 100644 arch/arm64/include/asm/kexec.h create mode 100644 arch/arm64/kernel/machine_kexec.c create mode 100644 arch/arm64/kernel/relocate_kernel.S diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f0d3a2d..af03449 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -313,6 +313,15 @@ config ARCH_HAS_CACHE_LINE_SIZE source "mm/Kconfig" +config KEXEC + depends on (!SMP || PM_SLEEP_SMP) + bool "kexec system call" + ---help--- + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + config XEN_DOM0 def_bool y depends on XEN diff --git a/arch/arm64/include/asm/kexec.h b/arch/arm64/include/asm/kexec.h new file mode 100644 index 0000000..e7bd7ab --- /dev/null +++ b/arch/arm64/include/asm/kexec.h @@ -0,0 +1,47 @@ +/* + * kexec for arm64 + * + * Copyright (C) Linaro. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#if !defined(_ARM64_KEXEC_H) +#define _ARM64_KEXEC_H + +/* Maximum physical address we can use pages from */ + +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) + +/* Maximum address we can reach in physical address mode */ + +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + +/* Maximum address we can use for the control code buffer */ + +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +#define KEXEC_CONTROL_PAGE_SIZE 4096 + +#define KEXEC_ARCH KEXEC_ARCH_ARM64 + +#if !defined(__ASSEMBLY__) + +/** + * crash_setup_regs() - save registers for the panic kernel + * + * @newregs: registers are saved here + * @oldregs: registers to be saved (may be %NULL) + */ + +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + /* Empty routine needed to avoid build errors. */ +} + +#endif /* !defined(__ASSEMBLY__) */ + +#endif diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 6e9538c..77a7351 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -30,6 +30,7 @@ arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o arm64-obj-$(CONFIG_KGDB) += kgdb.o arm64-obj-$(CONFIG_EFI) += efi.o efi-stub.o efi-entry.o +arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o obj-y += $(arm64-obj-y) vdso/ obj-m += $(arm64-obj-m) diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c new file mode 100644 index 0000000..8924bb0 --- /dev/null +++ b/arch/arm64/kernel/machine_kexec.c @@ -0,0 +1,171 @@ +/* + * kexec for arm64 + * + * Copyright (C) Linaro. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include +#include + +/* Global variables for the relocate_kernel routine. */ + +extern const unsigned char relocate_new_kernel[]; +extern const unsigned long relocate_new_kernel_size; +extern unsigned long arm64_kexec_dtb_addr; +extern unsigned long arm64_kexec_kimage_head; +extern unsigned long arm64_kexec_kimage_start; + +/** + * kexec_is_dtb - Helper routine to check the device tree header signature. + */ + +static bool kexec_is_dtb(const void *dtb) +{ + __be32 magic; + + return get_user(magic, (__be32 *)dtb) ? false : + (be32_to_cpu(magic) == OF_DT_HEADER); +} + +/** + * kexec_find_dtb_seg - Helper routine to find the dtb segment. + */ + +static const struct kexec_segment *kexec_find_dtb_seg( + const struct kimage *image) +{ + int i; + + for (i = 0; i < image->nr_segments; i++) { + if (kexec_is_dtb(image->segment[i].buf)) + return &image->segment[i]; + } + + return NULL; +} + +void machine_kexec_cleanup(struct kimage *image) +{ + /* Empty routine needed to avoid build errors. */ +} + +/** + * machine_kexec_prepare - Prepare for a kexec reboot. + * + * Called from the core kexec code when a kernel image is loaded. + */ + +int machine_kexec_prepare(struct kimage *image) +{ + const struct kexec_segment *dtb_seg; + + dtb_seg = kexec_find_dtb_seg(image); + + if (!dtb_seg) + return -EINVAL; + + arm64_kexec_dtb_addr = dtb_seg->mem; + arm64_kexec_kimage_start = image->start; + + return 0; +} + +/** + * kexec_list_flush - Helper to flush the kimage list to PoC. + */ + +static void kexec_list_flush(unsigned long kimage_head) +{ + void *dest; + unsigned long *entry; + + for (entry = &kimage_head, dest = NULL; ; entry++) { + unsigned int flag = *entry & + (IND_DESTINATION | IND_INDIRECTION | IND_DONE | + IND_SOURCE); + void *addr = phys_to_virt(*entry & PAGE_MASK); + + switch (flag) { + case IND_INDIRECTION: + entry = (unsigned long *)addr - 1; + __flush_dcache_area(addr, PAGE_SIZE); + break; + case IND_DESTINATION: + dest = addr; + break; + case IND_SOURCE: + __flush_dcache_area(addr, PAGE_SIZE); + dest += PAGE_SIZE; + break; + case IND_DONE: + return; + default: + break; + } + } +} + +/** + * machine_kexec - Do the kexec reboot. + * + * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC. + */ + +void machine_kexec(struct kimage *image) +{ + phys_addr_t reboot_code_buffer_phys; + void *reboot_code_buffer; + + BUG_ON(num_online_cpus() > 1); + + arm64_kexec_kimage_head = image->head; + + reboot_code_buffer_phys = page_to_phys(image->control_code_page); + reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys); + + /* + * Copy relocate_new_kernel to the reboot_code_buffer for use + * after the kernel is shut down. + */ + + memcpy(reboot_code_buffer, relocate_new_kernel, + relocate_new_kernel_size); + + /* Flush the reboot_code_buffer in preparation for its execution. */ + + __flush_dcache_area(reboot_code_buffer, relocate_new_kernel_size); + + /* Flush the kimage list. */ + + kexec_list_flush(image->head); + + pr_info("Bye!\n"); + + /* Disable all DAIF exceptions. */ + + asm volatile ("msr daifset, #0xf" : : : "memory"); + + /* + * soft_restart() will shutdown the MMU, disable data caches, then + * transfer control to the reboot_code_buffer which contains a copy of + * the relocate_new_kernel routine. relocate_new_kernel will use + * physical addressing to relocate the new kernel to its final position + * and then will transfer control to the entry point of the new kernel. + */ + + soft_restart(reboot_code_buffer_phys); +} + +void machine_crash_shutdown(struct pt_regs *regs) +{ + /* Empty routine needed to avoid build errors. */ +} diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S new file mode 100644 index 0000000..ac68322 --- /dev/null +++ b/arch/arm64/kernel/relocate_kernel.S @@ -0,0 +1,184 @@ +/* + * kexec for arm64 + * + * Copyright (C) Linaro. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +/* The list entry flags. */ + +#define IND_DESTINATION_BIT 0 +#define IND_INDIRECTION_BIT 1 +#define IND_DONE_BIT 2 +#define IND_SOURCE_BIT 3 + +/* + * relocate_new_kernel - Put the 2nd stage kernel image in place and boot it. + * + * The memory that the old kernel occupies may be overwritten when coping the + * new kernel to its final location. To assure that the relocate_new_kernel + * routine which does that copy is not overwritten all code and data needed + * by relocate_new_kernel must be between the symbols relocate_new_kernel and + * relocate_new_kernel_end. The machine_kexec() routine will copy + * relocate_new_kernel to the kexec control_code_page, a special page which + * has been set up to be preserved during the kernel copy operation. + */ + +.globl relocate_new_kernel +relocate_new_kernel: + + /* Setup the list loop variables. */ + + ldr x18, arm64_kexec_kimage_head /* x18 = list entry */ + dcache_line_size x17, x0 /* x17 = dcache line size */ + mov x16, xzr /* x16 = segment start */ + mov x15, xzr /* x15 = entry ptr */ + mov x14, xzr /* x14 = copy dest */ + + /* Check if the new kernel needs relocation. */ + + cbz x18, .Ldone + tbnz x18, IND_DONE_BIT, .Ldone + +.Lloop: + and x13, x18, PAGE_MASK /* x13 = addr */ + + /* Test the entry flags. */ + +.Ltest_source: + tbz x18, IND_SOURCE_BIT, .Ltest_indirection + + /* copy_page(x20 = dest, x21 = src) */ + + mov x20, x14 + mov x21, x13 + +1: ldp x22, x23, [x21] + ldp x24, x25, [x21, #16] + ldp x26, x27, [x21, #32] + ldp x28, x29, [x21, #48] + add x21, x21, #64 + stnp x22, x23, [x20] + stnp x24, x25, [x20, #16] + stnp x26, x27, [x20, #32] + stnp x28, x29, [x20, #48] + add x20, x20, #64 + tst x21, #(PAGE_SIZE - 1) + b.ne 1b + + /* dest += PAGE_SIZE */ + + add x14, x14, PAGE_SIZE + b .Lnext + +.Ltest_indirection: + tbz x18, IND_INDIRECTION_BIT, .Ltest_destination + + /* ptr = addr */ + + mov x15, x13 + b .Lnext + +.Ltest_destination: + tbz x18, IND_DESTINATION_BIT, .Lnext + + /* flush segment */ + + bl .Lflush + mov x16, x13 + + /* dest = addr */ + + mov x14, x13 + +.Lnext: + /* entry = *ptr++ */ + + ldr x18, [x15], #8 + + /* while (!(entry & DONE)) */ + + tbz x18, IND_DONE_BIT, .Lloop + +.Ldone: + /* flush last segment */ + + bl .Lflush + + dsb sy + isb + ic ialluis + dsb sy + isb + + /* start_new_kernel */ + + ldr x4, arm64_kexec_kimage_start + ldr x0, arm64_kexec_dtb_addr + mov x1, xzr + mov x2, xzr + mov x3, xzr + br x4 + +/* flush - x17 = line size, x16 = start addr, x14 = end addr. */ + +.Lflush: + cbz x16, 2f + mov x0, x16 + sub x1, x17, #1 + bic x0, x0, x1 +1: dc civac, x0 + add x0, x0, x17 + cmp x0, x14 + b.lo 1b +2: ret + +.align 3 /* To keep the 64-bit values below naturally aligned. */ + +/* The machine_kexec routines set these variables. */ + +/* + * arm64_kexec_dtb_addr - Physical address of the new kernel's device tree. + */ + +.globl arm64_kexec_dtb_addr +arm64_kexec_dtb_addr: + .quad 0x0 + +/* + * arm64_kexec_kimage_head - Copy of image->head, the list of kimage entries. + */ + +.globl arm64_kexec_kimage_head +arm64_kexec_kimage_head: + .quad 0x0 + +/* + * arm64_kexec_kimage_start - Copy of image->start, the entry point of the new + * kernel. + */ + +.globl arm64_kexec_kimage_start +arm64_kexec_kimage_start: + .quad 0x0 + +.Lrelocate_new_kernel_end: + +/* + * relocate_new_kernel_size - Number of bytes to copy to the control_code_page. + */ + +.globl relocate_new_kernel_size +relocate_new_kernel_size: + .quad .Lrelocate_new_kernel_end - relocate_new_kernel + +.org KEXEC_CONTROL_PAGE_SIZE diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 6925f5b..04626b9 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -39,6 +39,7 @@ #define KEXEC_ARCH_SH (42 << 16) #define KEXEC_ARCH_MIPS_LE (10 << 16) #define KEXEC_ARCH_MIPS ( 8 << 16) +#define KEXEC_ARCH_ARM64 (183 << 16) /* The artificial cap on the number of segments passed to kexec_load. */ #define KEXEC_SEGMENT_MAX 16