@@ -2026,18 +2026,10 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
return ctx->idx;
}
-int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
- void *image_end, const struct btf_func_model *m,
- u32 flags, struct bpf_tramp_links *tlinks,
- void *func_addr)
+static int btf_func_model_nregs(const struct btf_func_model *m)
{
- int i, ret;
int nregs = m->nr_args;
- int max_insns = ((long)image_end - (long)image) / AARCH64_INSN_SIZE;
- struct jit_ctx ctx = {
- .image = NULL,
- .idx = 0,
- };
+ int i;
/* extra registers needed for struct argument */
for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) {
@@ -2046,19 +2038,53 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
nregs += (m->arg_size[i] + 7) / 8 - 1;
}
+ return nregs;
+}
+
+int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks, void *func_addr)
+{
+ struct jit_ctx ctx = {
+ .image = NULL,
+ .idx = 0,
+ };
+ struct bpf_tramp_image im;
+ int nregs, ret;
+
+ nregs = btf_func_model_nregs(m);
/* the first 8 registers are used for arguments */
if (nregs > 8)
return -ENOTSUPP;
- ret = prepare_trampoline(&ctx, im, tlinks, func_addr, nregs, flags);
+ ret = prepare_trampoline(&ctx, &im, tlinks, func_addr, nregs, flags);
if (ret < 0)
return ret;
- if (ret > max_insns)
- return -EFBIG;
+ return ret < 0 ? ret : ret * AARCH64_INSN_SIZE;
+}
- ctx.image = image;
- ctx.idx = 0;
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
+ void *image_end, const struct btf_func_model *m,
+ u32 flags, struct bpf_tramp_links *tlinks,
+ void *func_addr)
+{
+ int ret, nregs;
+ struct jit_ctx ctx = {
+ .image = image,
+ .idx = 0,
+ };
+
+ nregs = btf_func_model_nregs(m);
+ /* the first 8 registers are used for arguments */
+ if (nregs > 8)
+ return -ENOTSUPP;
+
+ ret = arch_bpf_trampoline_size(m, flags, tlinks, func_addr);
+ if (ret < 0)
+ return ret;
+
+ if (ret > ((long)image_end - (long)image))
+ return -EFBIG;
jit_fill_hole(image, (unsigned int)(image_end - image));
ret = prepare_trampoline(&ctx, im, tlinks, func_addr, nregs, flags);
@@ -1024,6 +1024,20 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
return ret;
}
+int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks, void *func_addr)
+{
+ struct bpf_tramp_image im;
+ struct rv_jit_context ctx;
+
+ ctx.ninsns = 0;
+ ctx.insns = NULL;
+ ctx.ro_insns = NULL;
+ ret = __arch_prepare_bpf_trampoline(&im, m, tlinks, func_addr, flags, &ctx);
+
+ return ret < 0 ? ret : ninsns_rvoff(ctx->ninsns);
+}
+
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
void *image_end, const struct btf_func_model *m,
u32 flags, struct bpf_tramp_links *tlinks,
@@ -1032,14 +1046,11 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
int ret;
struct rv_jit_context ctx;
- ctx.ninsns = 0;
- ctx.insns = NULL;
- ctx.ro_insns = NULL;
- ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
+ ret = arch_bpf_trampoline_size(im, m, flags, tlinks, func_addr);
if (ret < 0)
return ret;
- if (ninsns_rvoff(ret) > (long)image_end - (long)image)
+ if (ret > (long)image_end - (long)image)
return -EFBIG;
ctx.ninsns = 0;
@@ -2622,6 +2622,21 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
return 0;
}
+int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks, void *orig_call)
+{
+ struct bpf_tramp_image im;
+ struct bpf_tramp_jit tjit;
+ int ret;
+
+ memset(&tjit, 0, sizeof(tjit));
+
+ ret = __arch_prepare_bpf_trampoline(&im, &tjit, m, flags,
+ tlinks, orig_call);
+
+ return ret < 0 ? ret : tjit.common.prg;
+}
+
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
void *image_end, const struct btf_func_model *m,
u32 flags, struct bpf_tramp_links *tlinks,
@@ -2629,30 +2644,23 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
{
struct bpf_tramp_jit tjit;
int ret;
- int i;
- for (i = 0; i < 2; i++) {
- if (i == 0) {
- /* Compute offsets, check whether the code fits. */
- memset(&tjit, 0, sizeof(tjit));
- } else {
- /* Generate the code. */
- tjit.common.prg = 0;
- tjit.common.prg_buf = image;
- }
- ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
- tlinks, func_addr);
- if (ret < 0)
- return ret;
- if (tjit.common.prg > (char *)image_end - (char *)image)
- /*
- * Use the same error code as for exceeding
- * BPF_MAX_TRAMP_LINKS.
- */
- return -E2BIG;
- }
+ ret = arch_bpf_trampoline_size(m, flags, tlinks, func_addr);
+ if (ret < 0)
+ return ret;
+
+ if (ret > (char *)image_end - (char *)image)
+ /*
+ * Use the same error code as for exceeding
+ * BPF_MAX_TRAMP_LINKS.
+ */
+ return -E2BIG;
- return tjit.common.prg;
+ memset(&tjit, 0, sizeof(tjit));
+ tjit.common.prg_buf = image;
+ ret = __arch_prepare_bpf_trampoline(im, &tjit, m, flags,
+ tlinks, func_addr);
+ return ret < 0 ? ret : tjit.common.prg;
}
bool bpf_jit_supports_subprog_tailcalls(void)
@@ -2422,10 +2422,10 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
* add rsp, 8 // skip eth_type_trans's frame
* ret // return to its caller
*/
-int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
- const struct btf_func_model *m, u32 flags,
- struct bpf_tramp_links *tlinks,
- void *func_addr)
+static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
+ const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks,
+ void *func_addr)
{
int i, ret, nr_regs = m->nr_args, stack_size = 0;
int regs_off, nregs_off, ip_off, run_ctx_off, arg_stack_off, rbx_off;
@@ -2678,6 +2678,35 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
return ret;
}
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
+ const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks,
+ void *func_addr)
+{
+ return __arch_prepare_bpf_trampoline(im, image, image_end, m, flags, tlinks, func_addr);
+}
+
+int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks, void *func_addr)
+{
+ struct bpf_tramp_image im;
+ void *image;
+ int ret;
+
+ /* Allocate a temporary buffer for __arch_prepare_bpf_trampoline().
+ * This will NOT cause fragmentation in direct map, as we do not
+ * call set_memory_*() on this buffer.
+ */
+ image = bpf_jit_alloc_exec(PAGE_SIZE);
+ if (!image)
+ return -ENOMEM;
+
+ ret = __arch_prepare_bpf_trampoline(&im, image, image + PAGE_SIZE, m, flags,
+ tlinks, func_addr);
+ bpf_jit_free_exec(image);
+ return ret;
+}
+
static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
{
u8 *jg_reloc, *prog = *pprog;
@@ -1087,6 +1087,8 @@ void *arch_alloc_bpf_trampoline(int size);
void arch_free_bpf_trampoline(void *image, int size);
void arch_protect_bpf_trampoline(void *image, int size);
void arch_unprotect_bpf_trampoline(void *image, int size);
+int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks, void *func_addr);
u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
struct bpf_tramp_run_ctx *run_ctx);
@@ -1070,6 +1070,12 @@ void __weak arch_unprotect_bpf_trampoline(void *image, int size)
set_memory_rw((long)image, 1);
}
+int __weak arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags,
+ struct bpf_tramp_links *tlinks, void *func_addr)
+{
+ return -ENOTSUPP;
+}
+
static int __init init_trampolines(void)
{
int i;
This helper will be used to calculate the size of the trampoline before allocating the memory. Signed-off-by: Song Liu <song@kernel.org> --- arch/arm64/net/bpf_jit_comp.c | 56 ++++++++++++++++++++++++--------- arch/riscv/net/bpf_jit_comp64.c | 21 ++++++++++--- arch/s390/net/bpf_jit_comp.c | 52 +++++++++++++++++------------- arch/x86/net/bpf_jit_comp.c | 37 +++++++++++++++++++--- include/linux/bpf.h | 2 ++ kernel/bpf/trampoline.c | 6 ++++ 6 files changed, 128 insertions(+), 46 deletions(-)