@@ -49,6 +49,9 @@ static inline void ima_appraise_parse_cmdline(void) {}
#ifdef CONFIG_IMA_KEXEC
extern void ima_add_kexec_buffer(struct kimage *image);
+extern void ima_kexec_post_load(struct kimage *image);
+#else
+static inline void ima_kexec_post_load(struct kimage *image) {}
#endif
#else
@@ -493,6 +493,15 @@ static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, g
static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { }
#endif
+#define for_each_kimage_entry(image, ptr, entry) \
+ for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
+ ptr = (entry & IND_INDIRECTION) ? \
+ boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
+
+extern void *kimage_map_segment(struct kimage *image,
+ unsigned long addr, unsigned long size);
+extern void kimage_unmap_segment(void *buffer);
+
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
@@ -500,6 +509,10 @@ static inline void __crash_kexec(struct pt_regs *regs) { }
static inline void crash_kexec(struct pt_regs *regs) { }
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
static inline int kexec_crash_loaded(void) { return 0; }
+static inline void *kimage_map_segment(struct kimage *image,
+ unsigned long addr, unsigned long size)
+{ return NULL; }
+static inline void kimage_unmap_segment(void *buffer) { }
#define kexec_in_progress false
#endif /* CONFIG_KEXEC_CORE */
@@ -594,11 +594,6 @@ void kimage_terminate(struct kimage *image)
*image->entry = IND_DONE;
}
-#define for_each_kimage_entry(image, ptr, entry) \
- for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
- ptr = (entry & IND_INDIRECTION) ? \
- boot_phys_to_virt((entry & PAGE_MASK)) : ptr + 1)
-
static void kimage_free_entry(kimage_entry_t entry)
{
struct page *page;
@@ -921,6 +916,60 @@ int kimage_load_segment(struct kimage *image,
return result;
}
+void *kimage_map_segment(struct kimage *image,
+ unsigned long addr, unsigned long size)
+{
+ unsigned long eaddr = addr + size;
+ unsigned long src_page_addr, dest_page_addr;
+ unsigned int npages;
+ struct page **src_pages;
+ int i;
+ kimage_entry_t *ptr, entry;
+ void *vaddr = NULL;
+
+ /*
+ * Collect the source pages and map them in a contiguous VA range.
+ */
+ npages = PFN_UP(eaddr) - PFN_DOWN(addr);
+ src_pages = kmalloc_array(npages, sizeof(*src_pages), GFP_KERNEL);
+ if (!src_pages) {
+ pr_err("%s: Could not allocate ima pages array.\n", __func__);
+ return NULL;
+ }
+
+ i = 0;
+ for_each_kimage_entry(image, ptr, entry) {
+ if (entry & IND_DESTINATION)
+ dest_page_addr = entry & PAGE_MASK;
+ else if (entry & IND_SOURCE) {
+ if (dest_page_addr >= addr && dest_page_addr < eaddr) {
+ src_page_addr = entry & PAGE_MASK;
+ src_pages[i++] =
+ virt_to_page(__va(src_page_addr));
+ if (i == npages)
+ break;
+ dest_page_addr += PAGE_SIZE;
+ }
+ }
+ }
+
+ /* Sanity check. */
+ WARN_ON(i < npages);
+
+ vaddr = vmap(src_pages, npages, VM_MAP, PAGE_KERNEL);
+ kfree(src_pages);
+
+ if (!vaddr)
+ pr_err("%s: Could not map imap buffer.\n", __func__);
+
+ return vaddr;
+}
+
+void kimage_unmap_segment(void *segment_buffer)
+{
+ vunmap(segment_buffer);
+}
+
struct kexec_load_limit {
/* Mutex protects the limit count. */
struct mutex mutex;
@@ -12,12 +12,15 @@
#include <linux/kexec.h>
#include <linux/of.h>
#include <linux/ima.h>
+#include <linux/reboot.h>
+#include <asm/page.h>
#include "ima.h"
#ifdef CONFIG_IMA_KEXEC
struct seq_file ima_kexec_file;
static void *ima_kexec_buffer;
static size_t kexec_segment_size;
+static bool ima_kexec_update_registered;
void ima_free_kexec_file_buf(struct seq_file *sf)
{
@@ -201,6 +204,7 @@ static int ima_update_kexec_buffer(struct notifier_block *self,
}
memcpy(ima_kexec_buffer, buf, buf_size);
out:
+ kimage_unmap_segment(ima_kexec_buffer);
ima_kexec_buffer = NULL;
if (resume)
@@ -213,6 +217,34 @@ struct notifier_block update_buffer_nb = {
.notifier_call = ima_update_kexec_buffer,
};
+/*
+ * Create a mapping for the source pages that contain the IMA buffer
+ * so we can update it later.
+ */
+void ima_kexec_post_load(struct kimage *image)
+{
+ if (ima_kexec_buffer) {
+ kimage_unmap_segment(ima_kexec_buffer);
+ ima_kexec_buffer = NULL;
+ }
+
+ if (!image->ima_buffer_addr)
+ return;
+
+ ima_kexec_buffer = kimage_map_segment(image,
+ image->ima_buffer_addr,
+ image->ima_buffer_size);
+ if (!ima_kexec_buffer) {
+ pr_err("%s: Could not map measurements buffer.\n", __func__);
+ return;
+ }
+
+ if (!ima_kexec_update_registered) {
+ register_reboot_notifier(&update_buffer_nb);
+ ima_kexec_update_registered = true;
+ }
+}
+
#endif /* IMA_KEXEC */
/*
Implement kimage_map_segment() to enable mapping of IMA buffer source pages to the kimage structure post kexec 'load'. This function, accepting a kimage pointer, an address, and a size, will gather the source pages within the specified address range, create an array of page pointers, and map these to a contiguous virtual address range. The function returns the start of this range if successful, or NULL if unsuccessful. Additionally, introduce kimage_unmap_segment() for unmapping segments using vunmap(). Introduce ima_kexec_post_load(), to be invoked by IMA following the kexec 'load' of the new Kernel image. This function will map the IMA buffer, allocated during kexec 'load', to a segment in the loaded image. Lastly, relocate the for_each_kimage_entry() macro from kexec_core.c to kexec.h for broader accessibility. Signed-off-by: Tushar Sugandhi <tusharsu@linux.microsoft.com> --- include/linux/ima.h | 3 ++ include/linux/kexec.h | 13 +++++++ kernel/kexec_core.c | 59 +++++++++++++++++++++++++++--- security/integrity/ima/ima_kexec.c | 32 ++++++++++++++++ 4 files changed, 102 insertions(+), 5 deletions(-)