@@ -21,5 +21,6 @@ extern char __exittext_begin[], __exittext_end[];
extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __mmuoff_data_start[], __mmuoff_data_end[];
extern char __entry_tramp_text_start[], __entry_tramp_text_end[];
+extern char __relocate_new_kernel_start[], __relocate_new_kernel_end[];
#endif /* __ASM_SECTIONS_H */
@@ -21,14 +21,11 @@
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
+#include <asm/sections.h>
#include <asm/trans_pgd.h>
#include "cpu-reset.h"
-/* Global variables for the arm64_relocate_new_kernel routine. */
-extern const unsigned char arm64_relocate_new_kernel[];
-extern const unsigned long arm64_relocate_new_kernel_size;
-
/**
* kexec_image_info - For debugging output.
*/
@@ -163,6 +160,7 @@ static void *kexec_page_alloc(void *arg)
int machine_kexec_post_load(struct kimage *kimage)
{
void *reloc_code = page_to_virt(kimage->control_code_page);
+ long reloc_size;
struct trans_pgd_info info = {
.trans_alloc_page = kexec_page_alloc,
.trans_alloc_arg = kimage,
@@ -183,17 +181,15 @@ int machine_kexec_post_load(struct kimage *kimage)
return rc;
}
- memcpy(reloc_code, arm64_relocate_new_kernel,
- arm64_relocate_new_kernel_size);
+ reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
+ memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
kimage->arch.kern_reloc = __pa(reloc_code);
/* Flush the reloc_code in preparation for its execution. */
dcache_clean_inval_poc((unsigned long)reloc_code,
- (unsigned long)reloc_code +
- arm64_relocate_new_kernel_size);
+ (unsigned long)reloc_code + reloc_size);
icache_inval_pou((uintptr_t)reloc_code,
- (uintptr_t)reloc_code +
- arm64_relocate_new_kernel_size);
+ (uintptr_t)reloc_code + reloc_size);
kexec_list_flush(kimage);
kexec_image_info(kimage);
@@ -15,6 +15,7 @@
#include <asm/sysreg.h>
#include <asm/virt.h>
+.section ".kexec_relocate.text", "ax"
/*
* arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
*
@@ -77,16 +78,3 @@ SYM_CODE_START(arm64_relocate_new_kernel)
mov x3, xzr
br x4 /* Jumps from el1 */
SYM_CODE_END(arm64_relocate_new_kernel)
-
-.align 3 /* To keep the 64-bit values below naturally aligned. */
-
-.Lcopy_end:
-.org KEXEC_CONTROL_PAGE_SIZE
-
-/*
- * arm64_relocate_new_kernel_size - Number of bytes to copy to the
- * control_code_page.
- */
-.globl arm64_relocate_new_kernel_size
-arm64_relocate_new_kernel_size:
- .quad .Lcopy_end - arm64_relocate_new_kernel
@@ -63,6 +63,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
#include <asm/kernel-pgtable.h>
+#include <asm/kexec.h>
#include <asm/memory.h>
#include <asm/page.h>
@@ -100,6 +101,16 @@ jiffies = jiffies_64;
#define HIBERNATE_TEXT
#endif
+#ifdef CONFIG_KEXEC_CORE
+#define KEXEC_TEXT \
+ . = ALIGN(SZ_4K); \
+ __relocate_new_kernel_start = .; \
+ *(.kexec_relocate.text) \
+ __relocate_new_kernel_end = .;
+#else
+#define KEXEC_TEXT
+#endif
+
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
#define TRAMP_TEXT \
. = ALIGN(PAGE_SIZE); \
@@ -160,6 +171,7 @@ SECTIONS
HYPERVISOR_TEXT
IDMAP_TEXT
HIBERNATE_TEXT
+ KEXEC_TEXT
TRAMP_TEXT
*(.fixup)
*(.gnu.warning)
@@ -348,3 +360,10 @@ ASSERT(swapper_pg_dir - reserved_pg_dir == RESERVED_SWAPPER_OFFSET,
ASSERT(swapper_pg_dir - tramp_pg_dir == TRAMP_SWAPPER_OFFSET,
"TRAMP_SWAPPER_OFFSET is wrong!")
#endif
+
+#ifdef CONFIG_KEXEC_CORE
+/* kexec relocation code should fit into one KEXEC_CONTROL_PAGE_SIZE */
+ASSERT(__relocate_new_kernel_end - (__relocate_new_kernel_start & ~(SZ_4K - 1))
+ <= SZ_4K, "kexec relocation code is too big or misaligned")
+ASSERT(KEXEC_CONTROL_PAGE_SIZE >= SZ_4K, "KEXEC_CONTROL_PAGE_SIZE is broken")
+#endif