@@ -228,6 +228,28 @@ static void jit_fill_hole(void *area, unsigned int size)
memset(area, 0xcc, size);
}
+#define INVALID_BUF_SIZE PAGE_SIZE
+static char invalid_insn_buf[INVALID_BUF_SIZE];
+
+static int __init bpf_init_invalid_insn_buf(void)
+{
+ jit_fill_hole(invalid_insn_buf, INVALID_BUF_SIZE);
+ return 0;
+}
+pure_initcall(bpf_init_invalid_insn_buf);
+
+void bpf_arch_invalidate_text(void *dst, size_t len)
+{
+ size_t i = 0;
+
+ while (i < len) {
+ size_t s = min_t(size_t, len - i, INVALID_BUF_SIZE);
+
+ bpf_arch_text_copy(dst + i, invalid_insn_buf, s);
+ i += s;
+ }
+}
+
struct jit_context {
int cleanup_addr; /* Epilogue code offset */
@@ -2382,6 +2382,8 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *bpf_arch_text_copy(void *dst, void *src, size_t len);
+void bpf_arch_invalidate_text(void *dst, size_t len);
+
struct btf_id_set;
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
@@ -873,7 +873,7 @@ static size_t select_bpf_prog_pack_size(void)
return size;
}
-static struct bpf_prog_pack *alloc_new_pack(void)
+static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns)
{
struct bpf_prog_pack *pack;
@@ -886,6 +886,7 @@ static struct bpf_prog_pack *alloc_new_pack(void)
kfree(pack);
return NULL;
}
+ bpf_fill_ill_insns(pack->ptr, bpf_prog_pack_size);
bitmap_zero(pack->bitmap, bpf_prog_pack_size / BPF_PROG_CHUNK_SIZE);
list_add_tail(&pack->list, &pack_list);
@@ -895,7 +896,7 @@ static struct bpf_prog_pack *alloc_new_pack(void)
return pack;
}
-static void *bpf_prog_pack_alloc(u32 size)
+static void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns)
{
unsigned int nbits = BPF_PROG_SIZE_TO_NBITS(size);
struct bpf_prog_pack *pack;
@@ -910,6 +911,7 @@ static void *bpf_prog_pack_alloc(u32 size)
size = round_up(size, PAGE_SIZE);
ptr = module_alloc(size);
if (ptr) {
+ bpf_fill_ill_insns(ptr, size);
set_vm_flush_reset_perms(ptr);
set_memory_ro((unsigned long)ptr, size / PAGE_SIZE);
set_memory_x((unsigned long)ptr, size / PAGE_SIZE);
@@ -923,7 +925,7 @@ static void *bpf_prog_pack_alloc(u32 size)
goto found_free_area;
}
- pack = alloc_new_pack();
+ pack = alloc_new_pack(bpf_fill_ill_insns);
if (!pack)
goto out;
@@ -966,6 +968,7 @@ static void bpf_prog_pack_free(struct bpf_binary_header *hdr)
nbits = BPF_PROG_SIZE_TO_NBITS(hdr->size);
pos = ((unsigned long)hdr - (unsigned long)pack_ptr) >> BPF_PROG_CHUNK_SHIFT;
+ bpf_arch_invalidate_text(hdr, hdr->size);
bitmap_clear(pack->bitmap, pos, nbits);
if (bitmap_find_next_zero_area(pack->bitmap, bpf_prog_chunk_count(), 0,
bpf_prog_chunk_count(), 0) == 0) {
@@ -1102,7 +1105,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr,
if (bpf_jit_charge_modmem(size))
return NULL;
- ro_header = bpf_prog_pack_alloc(size);
+ ro_header = bpf_prog_pack_alloc(size, bpf_fill_ill_insns);
if (!ro_header) {
bpf_jit_uncharge_modmem(size);
return NULL;
@@ -1203,6 +1206,16 @@ void __weak bpf_jit_free(struct bpf_prog *fp)
bpf_prog_unlock_free(fp);
}
+void __weak bpf_arch_invalidate_text(void *dst, size_t len)
+{
+ char buf[1] = {};
+ int i;
+
+ WARN_ONCE(1, "Please override %s for bpf_prog_pack\n", __func__);
+ for (i = 0; i < len; i++)
+ bpf_arch_text_copy(dst + i, buf, 1);
+}
+
int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed)
bpf_prog_pack enables sharing huge pages among multiple BPF programs. These pages are marked as executable, but some part of these huge page may not contain proper BPF programs. To make these pages safe, fill such unused part of these pages with invalid instructions. This is done when a pack is first allocated, and when a bpf program is freed.. Fixes: 57631054fae6 ("bpf: Introduce bpf_prog_pack allocator") Fixes: 33c9805860e5 ("bpf: Introduce bpf_jit_binary_pack_[alloc|finalize|free]") Signed-off-by: Song Liu <song@kernel.org> --- arch/x86/net/bpf_jit_comp.c | 22 ++++++++++++++++++++++ include/linux/bpf.h | 2 ++ kernel/bpf/core.c | 21 +++++++++++++++++---- 3 files changed, 41 insertions(+), 4 deletions(-)