@@ -1151,6 +1151,7 @@ void ftrace_graph_exit_task(struct task_struct *t)
kfree(ret_stack);
}
+#ifdef CONFIG_DYNAMIC_FTRACE
static int fgraph_pid_func(struct ftrace_graph_ent *trace,
struct fgraph_ops *gops)
{
@@ -1177,7 +1178,6 @@ void fgraph_update_pid_func(void)
if (!(graph_ops.flags & FTRACE_OPS_FL_INITIALIZED))
return;
-#ifdef CONFIG_DYNAMIC_FTRACE
list_for_each_entry(op, &graph_ops.subop_list, list) {
if (op->flags & FTRACE_OPS_FL_PID) {
gops = container_of(op, struct fgraph_ops, ops);
@@ -1187,8 +1187,8 @@ void fgraph_update_pid_func(void)
static_call_update(fgraph_func, gops->entryfunc);
}
}
-#endif
}
+#endif
/* Allocate a return stack for each task */
static int start_graph_tracing(void)
@@ -52,7 +52,11 @@ static inline int ftrace_shutdown_subops(struct ftrace_ops *ops, struct ftrace_o
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern int ftrace_graph_active;
+# ifdef CONFIG_DYNAMIC_FTRACE
extern void fgraph_update_pid_func(void);
+# else
+static inline void fgraph_update_pid_func(void) {}
+# endif
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
# define ftrace_graph_active 0
static inline void fgraph_update_pid_func(void) {}