@@ -16,6 +16,7 @@ void __kprobes ktext_replication_patch(u32 *tp, __le32 insn);
void ktext_replication_patch_alternative(__le32 *src, int nr_inst);
void ktext_replication_set_swapper_pgd(pgd_t *pgdp, pgd_t pgd);
void ktext_replication_init_tramp(void);
+void create_kernel_nid_map(pgd_t *pgdp, void *ktext);
#else
@@ -136,6 +136,14 @@ void __init ktext_replication_init(void)
/* Copy initial swapper page directory */
memcpy(pgtables[nid]->swapper_pg_dir, swapper_pg_dir, PGD_SIZE);
+
+ /* Clear the kernel mapping */
+ memset(&pgtables[nid]->swapper_pg_dir[kidx], 0,
+ sizeof(pgtables[nid]->swapper_pg_dir[kidx]));
+
+ /* Create kernel mapping pointing at our local copy */
+ create_kernel_nid_map(pgtables[nid]->swapper_pg_dir,
+ kernel_texts[nid]);
}
}
@@ -641,6 +641,16 @@ void mark_rodata_ro(void)
debug_checkwx();
}
+static void __init create_kernel_mapping(pgd_t *pgdp, phys_addr_t pa_start,
+ void *va_start, void *va_end,
+ pgprot_t prot, int flags)
+{
+ size_t size = va_end - va_start;
+
+ __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size,
+ prot, early_pgtable_alloc, flags);
+}
+
static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
pgprot_t prot, struct vm_struct *vma,
int flags, unsigned long vm_flags)
@@ -651,8 +661,7 @@ static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
BUG_ON(!PAGE_ALIGNED(pa_start));
BUG_ON(!PAGE_ALIGNED(size));
- __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
- early_pgtable_alloc, flags);
+ create_kernel_mapping(pgdp, pa_start, va_start, va_end, prot, flags);
if (!(vm_flags & VM_NO_GUARD))
size += PAGE_SIZE;
@@ -721,14 +730,8 @@ static bool arm64_early_this_cpu_has_bti(void)
ID_AA64PFR1_EL1_BT_SHIFT);
}
-/*
- * Create fine-grained mappings for the kernel.
- */
-static void __init map_kernel(pgd_t *pgdp)
+static pgprot_t __init kernel_text_pgprot(void)
{
- static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
- vmlinux_initdata, vmlinux_data;
-
/*
* External debuggers may need to write directly to the text
* mapping to install SW breakpoints. Allow this (only) when
@@ -744,6 +747,38 @@ static void __init map_kernel(pgd_t *pgdp)
if (arm64_early_this_cpu_has_bti())
text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
+ return text_prot;
+}
+
+#ifdef CONFIG_REPLICATE_KTEXT
+void __init create_kernel_nid_map(pgd_t *pgdp, void *ktext)
+{
+ pgprot_t text_prot = kernel_text_pgprot();
+
+ create_kernel_mapping(pgdp, __pa(ktext), _stext, _etext, text_prot, 0);
+ create_kernel_mapping(pgdp, __pa_symbol(__start_rodata),
+ __start_rodata, __inittext_begin,
+ PAGE_KERNEL, NO_CONT_MAPPINGS);
+ create_kernel_mapping(pgdp, __pa_symbol(__inittext_begin),
+ __inittext_begin, __inittext_end,
+ text_prot, 0);
+ create_kernel_mapping(pgdp, __pa_symbol(__initdata_begin),
+ __initdata_begin, __initdata_end,
+ PAGE_KERNEL, 0);
+ create_kernel_mapping(pgdp, __pa_symbol(_data), _data, _end,
+ PAGE_KERNEL, 0);
+}
+#endif
+
+/*
+ * Create fine-grained mappings for the kernel.
+ */
+static void __init map_kernel(pgd_t *pgdp)
+{
+ static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
+ vmlinux_initdata, vmlinux_data;
+ pgprot_t text_prot = kernel_text_pgprot();
+
/*
* Only rodata will be remapped with different permissions later on,
* all other segments are allowed to use contiguous mappings.
Setup page table entries in each non-boot NUMA node page table to point at each node's own copy of the kernel text. This switches each node to use its own unique copy of the kernel text. Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk> --- arch/arm64/include/asm/ktext.h | 1 + arch/arm64/mm/ktext.c | 8 +++++ arch/arm64/mm/mmu.c | 53 ++++++++++++++++++++++++++++------ 3 files changed, 53 insertions(+), 9 deletions(-)