Message ID | 20230825151810.164418-3-hbathini@linux.ibm.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | powerpc/bpf: use BPF prog pack allocator | expand |
Context | Check | Description |
---|---|---|
bpf/vmtest-bpf-next-PR | pending | PR summary |
bpf/vmtest-bpf-next-VM_Test-0 | success | Logs for ShellCheck |
bpf/vmtest-bpf-next-VM_Test-5 | success | Logs for set-matrix |
bpf/vmtest-bpf-next-VM_Test-1 | success | Logs for build for aarch64 with gcc |
bpf/vmtest-bpf-next-VM_Test-3 | success | Logs for build for x86_64 with gcc |
bpf/vmtest-bpf-next-VM_Test-4 | success | Logs for build for x86_64 with llvm-16 |
netdev/tree_selection | success | Not a local patch, async |
bpf/vmtest-bpf-next-VM_Test-19 | success | Logs for test_progs_no_alu32_parallel on x86_64 with gcc |
bpf/vmtest-bpf-next-VM_Test-2 | success | Logs for build for s390x with gcc |
bpf/vmtest-bpf-next-VM_Test-6 | success | Logs for test_maps on aarch64 with gcc |
bpf/vmtest-bpf-next-VM_Test-7 | pending | Logs for test_maps on s390x with gcc |
bpf/vmtest-bpf-next-VM_Test-8 | success | Logs for test_maps on x86_64 with gcc |
bpf/vmtest-bpf-next-VM_Test-18 | success | Logs for test_progs_no_alu32_parallel on aarch64 with gcc |
bpf/vmtest-bpf-next-VM_Test-9 | success | Logs for test_maps on x86_64 with llvm-16 |
bpf/vmtest-bpf-next-VM_Test-16 | success | Logs for test_progs_no_alu32 on x86_64 with gcc |
bpf/vmtest-bpf-next-VM_Test-17 | success | Logs for test_progs_no_alu32 on x86_64 with llvm-16 |
bpf/vmtest-bpf-next-VM_Test-20 | success | Logs for test_progs_no_alu32_parallel on x86_64 with llvm-16 |
bpf/vmtest-bpf-next-VM_Test-21 | success | Logs for test_progs_parallel on aarch64 with gcc |
bpf/vmtest-bpf-next-VM_Test-22 | success | Logs for test_progs_parallel on x86_64 with gcc |
bpf/vmtest-bpf-next-VM_Test-23 | success | Logs for test_progs_parallel on x86_64 with llvm-16 |
bpf/vmtest-bpf-next-VM_Test-24 | success | Logs for test_verifier on aarch64 with gcc |
bpf/vmtest-bpf-next-VM_Test-26 | success | Logs for test_verifier on x86_64 with gcc |
bpf/vmtest-bpf-next-VM_Test-27 | success | Logs for test_verifier on x86_64 with llvm-16 |
bpf/vmtest-bpf-next-VM_Test-28 | success | Logs for veristat |
bpf/vmtest-bpf-next-VM_Test-12 | success | Logs for test_progs on x86_64 with gcc |
bpf/vmtest-bpf-next-VM_Test-14 | success | Logs for test_progs_no_alu32 on aarch64 with gcc |
bpf/vmtest-bpf-next-VM_Test-10 | success | Logs for test_progs on aarch64 with gcc |
bpf/vmtest-bpf-next-VM_Test-13 | success | Logs for test_progs on x86_64 with llvm-16 |
bpf/vmtest-bpf-next-VM_Test-25 | success | Logs for test_verifier on s390x with gcc |
bpf/vmtest-bpf-next-VM_Test-15 | success | Logs for test_progs_no_alu32 on s390x with gcc |
bpf/vmtest-bpf-next-VM_Test-11 | success | Logs for test_progs on s390x with gcc |
Le 25/08/2023 à 17:18, Hari Bathini a écrit : > Implement bpf_arch_text_invalidate and use it to fill unused part of > the bpf_prog_pack with trap instructions when a BPF program is freed. > > Signed-off-by: Hari Bathini <hbathini@linux.ibm.com> > --- > arch/powerpc/net/bpf_jit_comp.c | 22 +++++++++++++++++++--- > 1 file changed, 19 insertions(+), 3 deletions(-) > > diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c > index 170ebf8ac0f2..7cd4cf53d61c 100644 > --- a/arch/powerpc/net/bpf_jit_comp.c > +++ b/arch/powerpc/net/bpf_jit_comp.c > @@ -30,7 +30,7 @@ static void bpf_jit_fill_ill_insns(void *area, unsigned int size) > * Patch 'len' bytes of instructions from opcode to addr, one instruction > * at a time. Returns addr on success. ERR_PTR(-EINVAL), otherwise. > */ > -static void *bpf_patch_instructions(void *addr, void *opcode, size_t len) > +static void *bpf_patch_instructions(void *addr, void *opcode, size_t len, bool fill_insn) It's a pitty that you have to modify in patch 2 a function you have added in patch 1 of the same series. Can't you have it right from the begining ? > { > while (len > 0) { > ppc_inst_t insn = ppc_inst_read(opcode); > @@ -41,7 +41,8 @@ static void *bpf_patch_instructions(void *addr, void *opcode, size_t len) > > len -= ilen; > addr = addr + ilen; > - opcode = opcode + ilen; > + if (!fill_insn) > + opcode = opcode + ilen; > } > > return addr; > @@ -307,7 +308,22 @@ void *bpf_arch_text_copy(void *dst, void *src, size_t len) > return ERR_PTR(-EINVAL); > > mutex_lock(&text_mutex); > - ret = bpf_patch_instructions(dst, src, len); > + ret = bpf_patch_instructions(dst, src, len, false); > + mutex_unlock(&text_mutex); > + > + return ret; > +} > + > +int bpf_arch_text_invalidate(void *dst, size_t len) > +{ > + u32 insn = BREAKPOINT_INSTRUCTION; > + int ret; > + > + if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst))) > + return -EINVAL; > + > + mutex_lock(&text_mutex); > + ret = IS_ERR(bpf_patch_instructions(dst, &insn, len, true)); Why IS_ERR ? As far as I understand from the weak definition in kernel/bpf/core.c, this function is supposed to return an error, not a bool. > mutex_unlock(&text_mutex); > > return ret;
On 25/08/23 9:03 pm, Christophe Leroy wrote: > > > Le 25/08/2023 à 17:18, Hari Bathini a écrit : >> Implement bpf_arch_text_invalidate and use it to fill unused part of >> the bpf_prog_pack with trap instructions when a BPF program is freed. >> >> Signed-off-by: Hari Bathini <hbathini@linux.ibm.com> >> --- >> arch/powerpc/net/bpf_jit_comp.c | 22 +++++++++++++++++++--- >> 1 file changed, 19 insertions(+), 3 deletions(-) >> >> diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c >> index 170ebf8ac0f2..7cd4cf53d61c 100644 >> --- a/arch/powerpc/net/bpf_jit_comp.c >> +++ b/arch/powerpc/net/bpf_jit_comp.c >> @@ -30,7 +30,7 @@ static void bpf_jit_fill_ill_insns(void *area, unsigned int size) >> * Patch 'len' bytes of instructions from opcode to addr, one instruction >> * at a time. Returns addr on success. ERR_PTR(-EINVAL), otherwise. >> */ >> -static void *bpf_patch_instructions(void *addr, void *opcode, size_t len) >> +static void *bpf_patch_instructions(void *addr, void *opcode, size_t len, bool fill_insn) > > It's a pitty that you have to modify in patch 2 a function you have > added in patch 1 of the same series. Can't you have it right from the > begining ? > >> { >> while (len > 0) { >> ppc_inst_t insn = ppc_inst_read(opcode); >> @@ -41,7 +41,8 @@ static void *bpf_patch_instructions(void *addr, void *opcode, size_t len) >> >> len -= ilen; >> addr = addr + ilen; >> - opcode = opcode + ilen; >> + if (!fill_insn) >> + opcode = opcode + ilen; >> } >> >> return addr; >> @@ -307,7 +308,22 @@ void *bpf_arch_text_copy(void *dst, void *src, size_t len) >> return ERR_PTR(-EINVAL); >> >> mutex_lock(&text_mutex); >> - ret = bpf_patch_instructions(dst, src, len); >> + ret = bpf_patch_instructions(dst, src, len, false); >> + mutex_unlock(&text_mutex); >> + >> + return ret; >> +} >> + >> +int bpf_arch_text_invalidate(void *dst, size_t len) >> +{ >> + u32 insn = BREAKPOINT_INSTRUCTION; >> + int ret; >> + >> + if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst))) >> + return -EINVAL; >> + >> + mutex_lock(&text_mutex); >> + ret = IS_ERR(bpf_patch_instructions(dst, &insn, len, true)); > > Why IS_ERR ? > > As far as I understand from the weak definition in kernel/bpf/core.c, > this function is supposed to return an error, not a bool. My bad! Will fix that in the next revision. - Hari
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 170ebf8ac0f2..7cd4cf53d61c 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -30,7 +30,7 @@ static void bpf_jit_fill_ill_insns(void *area, unsigned int size) * Patch 'len' bytes of instructions from opcode to addr, one instruction * at a time. Returns addr on success. ERR_PTR(-EINVAL), otherwise. */ -static void *bpf_patch_instructions(void *addr, void *opcode, size_t len) +static void *bpf_patch_instructions(void *addr, void *opcode, size_t len, bool fill_insn) { while (len > 0) { ppc_inst_t insn = ppc_inst_read(opcode); @@ -41,7 +41,8 @@ static void *bpf_patch_instructions(void *addr, void *opcode, size_t len) len -= ilen; addr = addr + ilen; - opcode = opcode + ilen; + if (!fill_insn) + opcode = opcode + ilen; } return addr; @@ -307,7 +308,22 @@ void *bpf_arch_text_copy(void *dst, void *src, size_t len) return ERR_PTR(-EINVAL); mutex_lock(&text_mutex); - ret = bpf_patch_instructions(dst, src, len); + ret = bpf_patch_instructions(dst, src, len, false); + mutex_unlock(&text_mutex); + + return ret; +} + +int bpf_arch_text_invalidate(void *dst, size_t len) +{ + u32 insn = BREAKPOINT_INSTRUCTION; + int ret; + + if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst))) + return -EINVAL; + + mutex_lock(&text_mutex); + ret = IS_ERR(bpf_patch_instructions(dst, &insn, len, true)); mutex_unlock(&text_mutex); return ret;
Implement bpf_arch_text_invalidate and use it to fill unused part of the bpf_prog_pack with trap instructions when a BPF program is freed. Signed-off-by: Hari Bathini <hbathini@linux.ibm.com> --- arch/powerpc/net/bpf_jit_comp.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-)