diff mbox series

[RFC,1/3] ftrace: Add ftrace_location_lookup() to lookup address of ftrace location

Message ID 1b71a95af2e21b8dc3599005dfa0170008ed952c.1644216043.git.naveen.n.rao@linux.vnet.ibm.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series powerpc64/bpf: Add support for BPF Trampolines | expand

Checks

Context Check Description
netdev/tree_selection success Not a local patch, async

Commit Message

Naveen N. Rao Feb. 7, 2022, 7:07 a.m. UTC
Add a new function ftrace_location_lookup() that can be used to
determine the exact ftrace location around function entry. This is
useful on architectures where the ftrace location is not the very first
instruction in a function. Such architectures can override this function
to search for ftrace location and to return the exact address of the
same.

Convert some uses of ftrace_location() in BPF infrastructure to the new
function.

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
---
 include/linux/ftrace.h  |  5 +++++
 kernel/bpf/trampoline.c | 27 +++++++++------------------
 kernel/trace/ftrace.c   | 14 ++++++++++++++
 3 files changed, 28 insertions(+), 18 deletions(-)
diff mbox series

Patch

diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 708e9d610f1337..59791f2aa0b356 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -582,6 +582,7 @@  int ftrace_test_record(struct dyn_ftrace *rec, bool enable);
 void ftrace_run_stop_machine(int command);
 unsigned long ftrace_location(unsigned long ip);
 unsigned long ftrace_location_range(unsigned long start, unsigned long end);
+unsigned long ftrace_location_lookup(unsigned long ip);
 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
 
@@ -795,6 +796,10 @@  static inline unsigned long ftrace_location(unsigned long ip)
 {
 	return 0;
 }
+static inline unsigned long ftrace_location_lookup(unsigned long ip)
+{
+	return 0;
+}
 
 /*
  * Again users of functions that have ftrace_ops may not
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 4b6974a195c138..5da9d332cd0e10 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -117,25 +117,14 @@  static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
 	tr->mod = NULL;
 }
 
-static int is_ftrace_location(void *ip)
-{
-	long addr;
-
-	addr = ftrace_location((long)ip);
-	if (!addr)
-		return 0;
-	if (WARN_ON_ONCE(addr != (long)ip))
-		return -EFAULT;
-	return 1;
-}
-
 static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
 {
 	void *ip = tr->func.addr;
 	int ret;
 
 	if (tr->func.ftrace_managed)
-		ret = unregister_ftrace_direct((long)ip, (long)old_addr);
+		ret = unregister_ftrace_direct(ftrace_location_lookup((unsigned long)ip),
+									(long)old_addr);
 	else
 		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
 
@@ -150,7 +139,8 @@  static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_ad
 	int ret;
 
 	if (tr->func.ftrace_managed)
-		ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
+		ret = modify_ftrace_direct(ftrace_location_lookup((unsigned long)ip),
+						(long)old_addr, (long)new_addr);
 	else
 		ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
 	return ret;
@@ -162,10 +152,11 @@  static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
 	void *ip = tr->func.addr;
 	int ret;
 
-	ret = is_ftrace_location(ip);
-	if (ret < 0)
-		return ret;
-	tr->func.ftrace_managed = ret;
+	ip = (void *)ftrace_location_lookup((unsigned long)ip);
+	tr->func.ftrace_managed = !!ip;
+
+	if (!ip)
+		ip = tr->func.addr;
 
 	if (bpf_trampoline_module_get(tr))
 		return -ENOENT;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ff57a842fbebcd..6a68b86b2b6ac6 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1581,6 +1581,20 @@  unsigned long ftrace_location(unsigned long ip)
 	return ftrace_location_range(ip, ip);
 }
 
+/**
+ * ftrace_location_lookup - return exact address of traced location
+ * @ip: the instruction pointer to check
+ *
+ * Used to lookup traced location around function entry. This is
+ * especially useful on architectures where the traced location is
+ * not the very first instruction in a function. Such architectures
+ * should provide an implementation of this function.
+ */
+unsigned long __weak ftrace_location_lookup(unsigned long ip)
+{
+	return ftrace_location_range(ip, ip);
+}
+
 /**
  * ftrace_text_reserved - return true if range contains an ftrace location
  * @start: start of range to search