diff mbox series

[RFC,bpf-next,2/3] bpf: Add bpf_runtime_kfunc_tracing_hook

Message ID AM6PR03MB5080A03B62901EA2AF9C78AC99FB2@AM6PR03MB5080.eurprd03.prod.outlook.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series bpf: BPF runtime hooks: BPF debug mode (tracing all kfuncs and helpers calls) | expand

Checks

Context Check Description
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/apply fail Patch does not apply to bpf-next-0

Commit Message

Juntong Deng Feb. 17, 2025, 5:23 p.m. UTC
This patch adds the bpf_runtime_kfunc_tracing_hook.

When a bpf program is in BPF debug mode, all calls to kfuncs and helpers
will be installed with bpf_runtime_kfunc_tracing_hook.

The arguments, return value, and timestamp of this call are recorded in
bpf_runtime_kfunc_tracing_hook.

Since this is a proof of concept, I output the information to the
tracing ring buffer directly in bpf_runtime_kfunc_tracing_hook.

In actual implementation, bpf_runtime_kfunc_tracing_hook should only be
responsible for recording information. Parsing and outputting
information should be done in another thread to avoid affecting the
performance of the bpf program.

Signed-off-by: Juntong Deng <juntong.deng@outlook.com>
---
 arch/x86/net/bpf_jit_comp.c |  2 +-
 include/linux/btf.h         |  4 ++-
 kernel/bpf/btf.c            | 72 ++++++++++++++++++++++++++++++++++++-
 3 files changed, 75 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index da579e835731..93714ec975e2 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -2198,7 +2198,7 @@  st:			if (is_imm8(insn->off))
 				ip += 2;
 			}
 			ip += x86_call_depth_emit_accounting(&prog, func, ip);
-			runtime_hook = select_bpf_runtime_hook(func);
+			runtime_hook = select_bpf_runtime_hook(func, bpf_prog);
 			if (runtime_hook) {
 				emit_mov_imm64(&prog, X86_REG_R9, (long)func >> 32,
 					       (u32) (long)func);
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 46681181e2bc..c4195479d0c1 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -571,7 +571,9 @@  void *bpf_runtime_acquire_hook(void *arg1, void *arg2, void *arg3,
 			       void *arg4, void *arg5, void *arg6);
 void bpf_runtime_release_hook(void *arg1, void *arg2, void *arg3,
 			      void *arg4, void *arg5, void *arg6);
-void *select_bpf_runtime_hook(void *kfunc);
+void *bpf_runtime_kfunc_tracing_hook(void *arg1, void *arg2, void *arg3,
+				     void *arg4, void *arg5, void *arg6);
+void *select_bpf_runtime_hook(void *kfunc, struct bpf_prog *bpf_prog);
 const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
 void btf_set_base_btf(struct btf *btf, const struct btf *base_btf);
 int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **map_ids);
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 917d91494d00..4d114d2739ac 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -9740,12 +9740,82 @@  void bpf_runtime_release_hook(void *arg1, void *arg2, void *arg3,
 	print_bpf_active_refs();
 }
 
-void *select_bpf_runtime_hook(void *kfunc)
+void *bpf_runtime_kfunc_tracing_hook(void *arg1, void *arg2, void *arg3,
+				    void *arg4, void *arg5, void *arg6 /* kfunc addr */)
+{
+	char kfunc_name[KSYM_SYMBOL_LEN], helper_proto_name[KSYM_SYMBOL_LEN];
+	const void *args[5] = {arg1, arg2, arg3, arg4, arg5};
+	const struct btf_type *kfunc_type, *kfunc_proto;
+	struct bpf_func_proto *helper_proto;
+	int i, vlen = 0, index = 0;
+	unsigned long rem_usec;
+	bpf_kfunc_t kfunc;
+	bool return_void;
+	void *kfunc_ret;
+	struct btf *btf;
+	char str[300];
+	u32 kfunc_id;
+	u64 ts_nsec;
+
+	kfunc = (bpf_kfunc_t)arg6;
+	kfunc_ret = kfunc(arg1, arg2, arg3, arg4, arg5);
+	ts_nsec = ktime_get_ns();
+
+	/* In actual implementation, the following part should be executed in
+	 * another thread to avoid affecting the performance of the bpf program.
+	 */
+	rem_usec = do_div(ts_nsec, NSEC_PER_SEC) / 1000;
+	sprint_symbol_no_offset(kfunc_name, (unsigned long)kfunc);
+	sprintf(helper_proto_name, "%s_proto", kfunc_name);
+	helper_proto = (struct bpf_func_proto *)kallsyms_lookup_name(helper_proto_name);
+	if (helper_proto) {
+		for (i = 0; i < 5; i++) {
+			if (helper_proto->arg_type[i] == ARG_DONTCARE)
+				break;
+			vlen = i + 1;
+		}
+		return_void = helper_proto->ret_type == RET_VOID ? true : false;
+	} else {
+		btf = bpf_get_btf_vmlinux();
+		kfunc_id = btf_find_by_name_kind(btf, kfunc_name, BTF_KIND_FUNC);
+		if (kfunc_id < 0)
+			return kfunc_ret;
+
+		kfunc_type = btf_type_by_id(btf, kfunc_id);
+		kfunc_proto = btf_type_by_id(btf, kfunc_type->type);
+		vlen = btf_type_vlen(kfunc_proto);
+		return_void = kfunc_proto->type == 0 ? true : false;
+	}
+
+	memset(str, 0, sizeof(str));
+	index += sprintf(str + index, "%s", "(");
+	for (i = 0; i < vlen; i++) {
+		if (i == vlen - 1)
+			index += sprintf(str + index, "%lx", (unsigned long)args[i]);
+		else
+			index += sprintf(str + index, "%lx,", (unsigned long)args[i]);
+	}
+
+	if (return_void)
+		sprintf(str + index, "%s", ")");
+	else
+		sprintf(str + index, ") = %lx", (unsigned long)kfunc_ret);
+
+	trace_printk("[%lu.%06lu] %s%s\n",
+		     (unsigned long)ts_nsec, rem_usec, kfunc_name, str);
+
+	return kfunc_ret;
+}
+
+void *select_bpf_runtime_hook(void *kfunc, struct bpf_prog *bpf_prog)
 {
 	struct btf_struct_kfunc *struct_kfunc, dummy_key;
 	struct btf_struct_kfunc_tab *tab;
 	struct btf *btf;
 
+	if (bpf_prog->debug_mode)
+		return bpf_runtime_kfunc_tracing_hook;
+
 	btf = bpf_get_btf_vmlinux();
 	dummy_key.kfunc_addr = (unsigned long)kfunc;