diff mbox series

[v2,5/5] ftrace: Revert the FTRACE_MCOUNT_MAX_OFFSET workaround

Message ID 20240723063258.2240610-6-zhengyejian@huaweicloud.com (mailing list archive)
State Handled Elsewhere
Headers show
Series kallsyms: Emit symbol for holes in text and fix weak function issue | expand

Commit Message

Zheng Yejian July 23, 2024, 6:32 a.m. UTC
After patch titled "ftrace: Skip invalid __fentry__ in
ftrace_process_locs()", __fentry__ locations in overridden weak function
have been checked and skipped, then all records in ftrace_pages are
valid, the FTRACE_MCOUNT_MAX_OFFSET workaround can be reverted, include:
 1. commit b39181f7c690 ("ftrace: Add FTRACE_MCOUNT_MAX_OFFSET to avoid
    adding weak function")
 2. commit 7af82ff90a2b ("powerpc/ftrace: Ignore weak functions")
 3. commit f6834c8c59a8 ("powerpc/ftrace: Fix dropping weak symbols with
    older toolchains")

Signed-off-by: Zheng Yejian <zhengyejian@huaweicloud.com>
---
 arch/powerpc/include/asm/ftrace.h |   7 --
 arch/x86/include/asm/ftrace.h     |   7 --
 kernel/trace/ftrace.c             | 141 +-----------------------------
 3 files changed, 2 insertions(+), 153 deletions(-)
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index 559560286e6d..328cf55acfb7 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -8,13 +8,6 @@ 
 #define MCOUNT_ADDR		((unsigned long)(_mcount))
 #define MCOUNT_INSN_SIZE	4 /* sizeof mcount call */
 
-/* Ignore unused weak functions which will have larger offsets */
-#if defined(CONFIG_MPROFILE_KERNEL) || defined(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)
-#define FTRACE_MCOUNT_MAX_OFFSET	16
-#elif defined(CONFIG_PPC32)
-#define FTRACE_MCOUNT_MAX_OFFSET	8
-#endif
-
 #ifndef __ASSEMBLY__
 extern void _mcount(void);
 
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 0152a81d9b4a..6a3a4a8830dc 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -9,13 +9,6 @@ 
 # define MCOUNT_ADDR		((unsigned long)(__fentry__))
 #define MCOUNT_INSN_SIZE	5 /* sizeof mcount call */
 
-/* Ignore unused weak functions which will have non zero offsets */
-#ifdef CONFIG_HAVE_FENTRY
-# include <asm/ibt.h>
-/* Add offset for endbr64 if IBT enabled */
-# define FTRACE_MCOUNT_MAX_OFFSET	ENDBR_INSN_SIZE
-#endif
-
 #ifdef CONFIG_DYNAMIC_FTRACE
 #define ARCH_SUPPORTS_FTRACE_OPS 1
 #endif
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 6947be8801d9..37510c591498 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -49,8 +49,6 @@ 
 #define FTRACE_NOCLEAR_FLAGS	(FTRACE_FL_DISABLED | FTRACE_FL_TOUCHED | \
 				 FTRACE_FL_MODIFIED)
 
-#define FTRACE_INVALID_FUNCTION		"__ftrace_invalid_address__"
-
 #define FTRACE_WARN_ON(cond)			\
 	({					\
 		int ___r = cond;		\
@@ -4208,105 +4206,6 @@  static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
 		seq_printf(m, " ->%pS", ptr);
 }
 
-#ifdef FTRACE_MCOUNT_MAX_OFFSET
-/*
- * Weak functions can still have an mcount/fentry that is saved in
- * the __mcount_loc section. These can be detected by having a
- * symbol offset of greater than FTRACE_MCOUNT_MAX_OFFSET, as the
- * symbol found by kallsyms is not the function that the mcount/fentry
- * is part of. The offset is much greater in these cases.
- *
- * Test the record to make sure that the ip points to a valid kallsyms
- * and if not, mark it disabled.
- */
-static int test_for_valid_rec(struct dyn_ftrace *rec)
-{
-	char str[KSYM_SYMBOL_LEN];
-	unsigned long offset;
-	const char *ret;
-
-	ret = kallsyms_lookup(rec->ip, NULL, &offset, NULL, str);
-
-	/* Weak functions can cause invalid addresses */
-	if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
-		rec->flags |= FTRACE_FL_DISABLED;
-		return 0;
-	}
-	return 1;
-}
-
-static struct workqueue_struct *ftrace_check_wq __initdata;
-static struct work_struct ftrace_check_work __initdata;
-
-/*
- * Scan all the mcount/fentry entries to make sure they are valid.
- */
-static __init void ftrace_check_work_func(struct work_struct *work)
-{
-	struct ftrace_page *pg;
-	struct dyn_ftrace *rec;
-
-	mutex_lock(&ftrace_lock);
-	do_for_each_ftrace_rec(pg, rec) {
-		test_for_valid_rec(rec);
-	} while_for_each_ftrace_rec();
-	mutex_unlock(&ftrace_lock);
-}
-
-static int __init ftrace_check_for_weak_functions(void)
-{
-	INIT_WORK(&ftrace_check_work, ftrace_check_work_func);
-
-	ftrace_check_wq = alloc_workqueue("ftrace_check_wq", WQ_UNBOUND, 0);
-
-	queue_work(ftrace_check_wq, &ftrace_check_work);
-	return 0;
-}
-
-static int __init ftrace_check_sync(void)
-{
-	/* Make sure the ftrace_check updates are finished */
-	if (ftrace_check_wq)
-		destroy_workqueue(ftrace_check_wq);
-	return 0;
-}
-
-late_initcall_sync(ftrace_check_sync);
-subsys_initcall(ftrace_check_for_weak_functions);
-
-static int print_rec(struct seq_file *m, unsigned long ip)
-{
-	unsigned long offset;
-	char str[KSYM_SYMBOL_LEN];
-	char *modname;
-	const char *ret;
-
-	ret = kallsyms_lookup(ip, NULL, &offset, &modname, str);
-	/* Weak functions can cause invalid addresses */
-	if (!ret || offset > FTRACE_MCOUNT_MAX_OFFSET) {
-		snprintf(str, KSYM_SYMBOL_LEN, "%s_%ld",
-			 FTRACE_INVALID_FUNCTION, offset);
-		ret = NULL;
-	}
-
-	seq_puts(m, str);
-	if (modname)
-		seq_printf(m, " [%s]", modname);
-	return ret == NULL ? -1 : 0;
-}
-#else
-static inline int test_for_valid_rec(struct dyn_ftrace *rec)
-{
-	return 1;
-}
-
-static inline int print_rec(struct seq_file *m, unsigned long ip)
-{
-	seq_printf(m, "%ps", (void *)ip);
-	return 0;
-}
-#endif
-
 static int t_show(struct seq_file *m, void *v)
 {
 	struct ftrace_iterator *iter = m->private;
@@ -4334,13 +4233,7 @@  static int t_show(struct seq_file *m, void *v)
 	if (iter->flags & FTRACE_ITER_ADDRS)
 		seq_printf(m, "%lx ", rec->ip);
 
-	if (print_rec(m, rec->ip)) {
-		/* This should only happen when a rec is disabled */
-		WARN_ON_ONCE(!(rec->flags & FTRACE_FL_DISABLED));
-		seq_putc(m, '\n');
-		return 0;
-	}
-
+	seq_printf(m, "%ps", (void *)rec->ip);
 	if (iter->flags & (FTRACE_ITER_ENABLED | FTRACE_ITER_TOUCHED)) {
 		struct ftrace_ops *ops;
 
@@ -4720,24 +4613,6 @@  add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
 	return 0;
 }
 
-#ifdef FTRACE_MCOUNT_MAX_OFFSET
-static int lookup_ip(unsigned long ip, char **modname, char *str)
-{
-	unsigned long offset;
-
-	kallsyms_lookup(ip, NULL, &offset, modname, str);
-	if (offset > FTRACE_MCOUNT_MAX_OFFSET)
-		return -1;
-	return 0;
-}
-#else
-static int lookup_ip(unsigned long ip, char **modname, char *str)
-{
-	kallsyms_lookup(ip, NULL, NULL, modname, str);
-	return 0;
-}
-#endif
-
 static int
 ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
 		struct ftrace_glob *mod_g, int exclude_mod)
@@ -4745,12 +4620,7 @@  ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
 	char str[KSYM_SYMBOL_LEN];
 	char *modname;
 
-	if (lookup_ip(rec->ip, &modname, str)) {
-		/* This should only happen when a rec is disabled */
-		WARN_ON_ONCE(system_state == SYSTEM_RUNNING &&
-			     !(rec->flags & FTRACE_FL_DISABLED));
-		return 0;
-	}
+	kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
 
 	if (mod_g) {
 		int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
@@ -7399,13 +7269,6 @@  void ftrace_module_enable(struct module *mod)
 		if (!within_module(rec->ip, mod))
 			break;
 
-		/* Weak functions should still be ignored */
-		if (!test_for_valid_rec(rec)) {
-			/* Clear all other flags. Should not be enabled anyway */
-			rec->flags = FTRACE_FL_DISABLED;
-			continue;
-		}
-
 		cnt = 0;
 
 		/*