@@ -364,6 +364,13 @@ struct kimage {
size_t ima_buffer_size;
#endif
+#ifdef CONFIG_KEXEC_HANDOVER
+ struct {
+ struct kexec_segment *scratch;
+ struct kexec_segment *fdt;
+ } kho;
+#endif
+
/* Core ELF header buffer */
void *elf_headers;
unsigned long elf_headers_sz;
@@ -1053,6 +1053,10 @@ int kernel_kexec(void)
goto Unlock;
}
+ error = kho_copy_fdt(kexec_image);
+ if (error)
+ goto Unlock;
+
#ifdef CONFIG_KEXEC_JUMP
if (kexec_image->preserve_context) {
/*
@@ -253,6 +253,11 @@ kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
/* IMA needs to pass the measurement list to the next kernel. */
ima_add_kexec_buffer(image);
+ /* If KHO is active, add its images to the list */
+ ret = kho_fill_kimage(image);
+ if (ret)
+ goto out;
+
/* Call image load handler */
ldata = kexec_image_load_default(image);
@@ -636,6 +641,14 @@ int kexec_locate_mem_hole(struct kexec_buf *kbuf)
if (kbuf->mem != KEXEC_BUF_MEM_UNKNOWN)
return 0;
+ /*
+ * If KHO is active, only use KHO scratch memory. All other memory
+ * could potentially be handed over.
+ */
+ ret = kho_locate_mem_hole(kbuf, locate_mem_hole_callback);
+ if (ret <= 0)
+ return ret;
+
if (!IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
ret = kexec_walk_resources(kbuf, locate_mem_hole_callback);
else
@@ -764,6 +777,12 @@ static int kexec_calculate_store_digests(struct kimage *image)
if (ksegment->kbuf == pi->purgatory_buf)
continue;
+#ifdef CONFIG_KEXEC_HANDOVER
+ /* Skip KHO FDT as its contects are copied in kernel_kexec(). */
+ if (ksegment == image->kho.fdt)
+ continue;
+#endif
+
ret = crypto_shash_update(desc, ksegment->kbuf,
ksegment->bufsz);
if (ret)
@@ -245,6 +245,85 @@ int kho_node_check_compatible(const struct kho_in_node *node,
}
EXPORT_SYMBOL_GPL(kho_node_check_compatible);
+int kho_fill_kimage(struct kimage *image)
+{
+ ssize_t scratch_size;
+ int err = 0;
+
+ if (!kho_enable)
+ return 0;
+
+ /* Allocate target memory for KHO FDT */
+ struct kexec_buf fdt = {
+ .image = image,
+ .buffer = NULL,
+ .bufsz = 0,
+ .mem = KEXEC_BUF_MEM_UNKNOWN,
+ .memsz = kho_out.fdt_max,
+ .buf_align = SZ_64K, /* Makes it easier to map */
+ .buf_max = ULONG_MAX,
+ .top_down = true,
+ };
+ err = kexec_add_buffer(&fdt);
+ if (err) {
+ pr_err("failed to reserved a segment for KHO FDT: %d\n", err);
+ return err;
+ }
+ image->kho.fdt = &image->segment[image->nr_segments - 1];
+
+ scratch_size = sizeof(*kho_scratch) * kho_scratch_cnt;
+ struct kexec_buf scratch = {
+ .image = image,
+ .buffer = kho_scratch,
+ .bufsz = scratch_size,
+ .mem = KEXEC_BUF_MEM_UNKNOWN,
+ .memsz = scratch_size,
+ .buf_align = SZ_64K, /* Makes it easier to map */
+ .buf_max = ULONG_MAX,
+ .top_down = true,
+ };
+ err = kexec_add_buffer(&scratch);
+ if (err)
+ return err;
+ image->kho.scratch = &image->segment[image->nr_segments - 1];
+
+ return 0;
+}
+
+static int kho_walk_scratch(struct kexec_buf *kbuf,
+ int (*func)(struct resource *, void *))
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < kho_scratch_cnt; i++) {
+ struct resource res = {
+ .start = kho_scratch[i].addr,
+ .end = kho_scratch[i].addr + kho_scratch[i].size - 1,
+ };
+
+ /* Try to fit the kimage into our KHO scratch region */
+ ret = func(&res, kbuf);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+int kho_locate_mem_hole(struct kexec_buf *kbuf,
+ int (*func)(struct resource *, void *))
+{
+ int ret;
+
+ if (!kho_enable || kbuf->image->type == KEXEC_TYPE_CRASH)
+ return 1;
+
+ ret = kho_walk_scratch(kbuf, func);
+
+ return ret == 1 ? 0 : -EADDRNOTAVAIL;
+}
+
/*
* Keep track of memory that is to be preserved across KHO.
*
@@ -1141,6 +1220,35 @@ static int kho_finalize(void)
return err;
}
+int kho_copy_fdt(struct kimage *image)
+{
+ int err = 0;
+ void *fdt;
+
+ if (!kho_enable || !image->file_mode)
+ return 0;
+
+ if (!kho_out.fdt) {
+ err = kho_finalize();
+ kho_out_update_debugfs_fdt();
+ if (err)
+ return err;
+ }
+
+ fdt = kimage_map_segment(image, image->kho.fdt->mem,
+ PAGE_ALIGN(kho_out.fdt_max));
+ if (!fdt) {
+ pr_err("failed to vmap fdt ksegment in kimage\n");
+ return -ENOMEM;
+ }
+
+ memcpy(fdt, kho_out.fdt, fdt_totalsize(kho_out.fdt));
+
+ kimage_unmap_segment(fdt);
+
+ return 0;
+}
+
/* Handling for debug/kho/out */
static int kho_out_finalize_get(void *data, u64 *val)
{
@@ -39,4 +39,22 @@ extern size_t kexec_purgatory_size;
#else /* CONFIG_KEXEC_FILE */
static inline void kimage_file_post_load_cleanup(struct kimage *image) { }
#endif /* CONFIG_KEXEC_FILE */
+
+struct kexec_buf;
+
+#ifdef CONFIG_KEXEC_HANDOVER
+int kho_locate_mem_hole(struct kexec_buf *kbuf,
+ int (*func)(struct resource *, void *));
+int kho_fill_kimage(struct kimage *image);
+int kho_copy_fdt(struct kimage *image);
+#else
+static inline int kho_locate_mem_hole(struct kexec_buf *kbuf,
+ int (*func)(struct resource *, void *))
+{
+ return 1;
+}
+
+static inline int kho_fill_kimage(struct kimage *image) { return 0; }
+static inline int kho_copy_fdt(struct kimage *image) { return 0; }
+#endif /* CONFIG_KEXEC_HANDOVER */
#endif /* LINUX_KEXEC_INTERNAL_H */