@@ -5,9 +5,13 @@
#ifndef ASM_KTEXT_H
#define ASM_KTEXT_H
+#include <linux/kprobes.h>
+
#ifdef CONFIG_REPLICATE_KTEXT
void ktext_replication_init(void);
+void __kprobes ktext_replication_patch(u32 *tp, __le32 insn);
+void ktext_replication_patch_alternative(__le32 *src, int nr_inst);
#else
@@ -15,6 +19,14 @@ static inline void ktext_replication_init(void)
{
}
+static inline void __kprobes ktext_replication_patch(u32 *tp, __le32 insn)
+{
+}
+
+static inline void ktext_replication_patch_alternative(__le32 *src, int nr_inst)
+{
+}
+
#endif
#endif
@@ -15,6 +15,7 @@
#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
+#include <asm/ktext.h>
#include <asm/module.h>
#include <asm/sections.h>
#include <asm/vdso.h>
@@ -174,6 +175,7 @@ static void __apply_alternatives(const struct alt_region *region,
alt_cb(alt, origptr, updptr, nr_inst);
if (!is_module) {
+ ktext_replication_patch_alternative(updptr, nr_inst);
clean_dcache_range_nopatch((u64)origptr,
(u64)(origptr + nr_inst));
}
@@ -10,6 +10,7 @@
#include <asm/fixmap.h>
#include <asm/insn.h>
#include <asm/kprobes.h>
+#include <asm/ktext.h>
#include <asm/patching.h>
#include <asm/sections.h>
@@ -115,9 +116,13 @@ int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
return -EINVAL;
ret = aarch64_insn_write(tp, insn);
- if (ret == 0)
+ if (ret == 0) {
+ /* Also patch the other nodes */
+ ktext_replication_patch(tp, cpu_to_le32(insn));
+
caches_clean_inval_pou((uintptr_t)tp,
(uintptr_t)tp + AARCH64_INSN_SIZE);
+ }
return ret;
}
@@ -3,8 +3,10 @@
* Copyright (C) 2022, Oracle and/or its affiliates.
*/
+#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
+#include <linux/mm.h>
#include <linux/numa.h>
#include <linux/pgtable.h>
#include <linux/string.h>
@@ -15,6 +17,62 @@
static void *kernel_texts[MAX_NUMNODES];
+void __kprobes ktext_replication_patch(u32 *tp, __le32 insn)
+{
+ unsigned long offset;
+ int nid, this_nid;
+ __le32 *p;
+
+ if (!is_kernel_text((unsigned long)tp))
+ return;
+
+ offset = (unsigned long)tp - (unsigned long)_stext;
+
+ this_nid = numa_node_id();
+ if (this_nid) {
+ /* The cache maintenance by aarch64_insn_patch_text_nosync()
+ * will occur on this node. We need it to occur on node 0.
+ */
+ p = (void *)lm_alias(_stext) + offset;
+ caches_clean_inval_pou((u64)p, (u64)p + AARCH64_INSN_SIZE);
+ }
+
+ for_each_node(nid) {
+ if (!kernel_texts[nid])
+ continue;
+
+ p = kernel_texts[nid] + offset;
+ WRITE_ONCE(*p, insn);
+ caches_clean_inval_pou((u64)p, (u64)p + AARCH64_INSN_SIZE);
+ }
+}
+
+/* Copy the patched alternative from the node0 image to the other
+ * modes. src is the node 0 linear-mapping address.
+ */
+void ktext_replication_patch_alternative(__le32 *src, int nr_inst)
+{
+ unsigned long offset;
+ size_t size;
+ int nid;
+ __le32 *p;
+
+ offset = (unsigned long)src - (unsigned long)lm_alias(_stext);
+ if (offset >= _etext - _stext)
+ return;
+
+ size = AARCH64_INSN_SIZE * nr_inst;
+
+ for_each_node(nid) {
+ if (!kernel_texts[nid])
+ continue;
+
+ p = kernel_texts[nid] + offset;
+ memcpy(p, src, size);
+ clean_dcache_range_nopatch((u64)p, (u64)p + size);
+ }
+}
+
/* Allocate memory for the replicated kernel texts. */
void __init ktext_replication_init(void)
{
Add support for text patching on our replicated texts. Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk> --- arch/arm64/include/asm/ktext.h | 12 +++++++ arch/arm64/kernel/alternative.c | 2 ++ arch/arm64/kernel/patching.c | 7 +++- arch/arm64/mm/ktext.c | 58 +++++++++++++++++++++++++++++++++ 4 files changed, 78 insertions(+), 1 deletion(-)