@@ -58,10 +58,7 @@ void notrace __hot ftrace_function_trampoline(unsigned long parent,
#endif
extern struct ftrace_ops *function_trace_op;
- if (function_trace_op->flags & FTRACE_OPS_FL_ENABLED &&
- ftrace_trace_function != ftrace_stub)
- ftrace_trace_function(self_addr, parent,
- function_trace_op, fregs);
+ ftrace_trace_function(self_addr, parent, function_trace_op, fregs);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (dereference_function_descriptor(ftrace_graph_return) !=
@@ -101,6 +98,7 @@ int __init ftrace_dyn_arch_init(void)
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
+ ftrace_trace_function = func;
return 0;
}
dereference_function_descriptor() calls get_kernel_nofault(), which itself might call into tracing. This leads to a deadlock. Instead of reverting back to casts to compare whether there's a trace function set, just always call the appropriate trace function. With dynamic ftrace this function shouldn't be called at all when ftrace is disabled. It adds a function call for the non-dynamic case, but people concerned about the costs of ftrace should use dynamic ftrace anyways. Signed-off-by: Sven Schnelle <svens@stackframe.org> --- arch/parisc/kernel/ftrace.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-)