@@ -13,9 +13,12 @@
#include <linux/netdevice.h>
#include <linux/filter.h>
#include <linux/if_vlan.h>
-#include <asm/kprobes.h>
+#include <linux/memory.h>
#include <linux/bpf.h>
+#include <asm/kprobes.h>
+#include <asm/code-patching.h>
+
#include "bpf_jit.h"
static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
@@ -272,3 +275,21 @@ int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct code
ctx->exentry_idx++;
return 0;
}
+
+void *bpf_arch_text_copy(void *dst, void *src, size_t len)
+{
+ void *ret = ERR_PTR(-EINVAL);
+ int err;
+
+ if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
+ return ret;
+
+ ret = dst;
+ mutex_lock(&text_mutex);
+ err = patch_instructions(dst, src, false, len);
+ if (err)
+ ret = ERR_PTR(err);
+ mutex_unlock(&text_mutex);
+
+ return ret;
+}
bpf_arch_text_copy is used to dump JITed binary to RX page, allowing multiple BPF programs to share the same page. Use the newly introduced patch_instructions() to implement it. Around 5X improvement in speed of execution observed, using the new patch_instructions() function over patch_instruction(), while running the tests from test_bpf.ko. Signed-off-by: Hari Bathini <hbathini@linux.ibm.com> --- arch/powerpc/net/bpf_jit_comp.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-)