@@ -52,6 +52,11 @@
DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
int ftrace_graph_active;
+static int fgraph_array_cnt;
+#define FGRAPH_ARRAY_SIZE 16
+
+static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE];
+
/* Both enabled by default (can be cleared by function_graph tracer flags */
static bool fgraph_sleep_time = true;
@@ -75,6 +80,20 @@ int __weak ftrace_disable_ftrace_graph_caller(void)
}
#endif
+int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
+{
+ return 0;
+}
+
+static void ftrace_graph_ret_stub(struct ftrace_graph_ret *trace)
+{
+}
+
+static struct fgraph_ops fgraph_stub = {
+ .entryfunc = ftrace_graph_entry_stub,
+ .retfunc = ftrace_graph_ret_stub,
+};
+
/**
* ftrace_graph_stop - set to permanently disable function graph tracing
*
@@ -161,7 +180,7 @@ int function_graph_enter(unsigned long ret, unsigned long func,
goto out;
/* Only trace if the calling function expects to */
- if (!ftrace_graph_entry(&trace))
+ if (!fgraph_array[0]->entryfunc(&trace))
goto out_ret;
return 0;
@@ -276,7 +295,7 @@ static unsigned long __ftrace_return_to_handler(struct fgraph_ret_regs *ret_regs
trace.retval = fgraph_ret_regs_return_value(ret_regs);
#endif
trace.rettime = trace_clock_local();
- ftrace_graph_return(&trace);
+ fgraph_array[0]->retfunc(&trace);
/*
* The ftrace_graph_return() may still access the current
* ret_stack structure, we need to make sure the update of
@@ -412,11 +431,6 @@ void ftrace_graph_sleep_time_control(bool enable)
fgraph_sleep_time = enable;
}
-int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
-{
- return 0;
-}
-
/*
* Simply points to ftrace_stub, but with the proper protocol.
* Defined by the linker script in linux/vmlinux.lds.h
@@ -654,37 +668,54 @@ static int start_graph_tracing(void)
int register_ftrace_graph(struct fgraph_ops *gops)
{
int ret = 0;
+ int i;
mutex_lock(&ftrace_lock);
- /* we currently allow only one tracer registered at a time */
- if (ftrace_graph_active) {
+ if (!fgraph_array[0]) {
+ /* The array must always have real data on it */
+ for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
+ fgraph_array[i] = &fgraph_stub;
+ }
+
+ /* Look for an available spot */
+ for (i = 0; i < FGRAPH_ARRAY_SIZE; i++) {
+ if (fgraph_array[i] == &fgraph_stub)
+ break;
+ }
+ if (i >= FGRAPH_ARRAY_SIZE) {
ret = -EBUSY;
goto out;
}
- register_pm_notifier(&ftrace_suspend_notifier);
+ fgraph_array[i] = gops;
+ if (i + 1 > fgraph_array_cnt)
+ fgraph_array_cnt = i + 1;
ftrace_graph_active++;
- ret = start_graph_tracing();
- if (ret) {
- ftrace_graph_active--;
- goto out;
- }
- ftrace_graph_return = gops->retfunc;
+ if (ftrace_graph_active == 1) {
+ register_pm_notifier(&ftrace_suspend_notifier);
+ ret = start_graph_tracing();
+ if (ret) {
+ ftrace_graph_active--;
+ goto out;
+ }
+
+ ftrace_graph_return = gops->retfunc;
- /*
- * Update the indirect function to the entryfunc, and the
- * function that gets called to the entry_test first. Then
- * call the update fgraph entry function to determine if
- * the entryfunc should be called directly or not.
- */
- __ftrace_graph_entry = gops->entryfunc;
- ftrace_graph_entry = ftrace_graph_entry_test;
- update_function_graph_func();
+ /*
+ * Update the indirect function to the entryfunc, and the
+ * function that gets called to the entry_test first. Then
+ * call the update fgraph entry function to determine if
+ * the entryfunc should be called directly or not.
+ */
+ __ftrace_graph_entry = gops->entryfunc;
+ ftrace_graph_entry = ftrace_graph_entry_test;
+ update_function_graph_func();
- ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
+ ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
+ }
out:
mutex_unlock(&ftrace_lock);
return ret;
@@ -692,19 +723,36 @@ int register_ftrace_graph(struct fgraph_ops *gops)
void unregister_ftrace_graph(struct fgraph_ops *gops)
{
+ int i;
+
mutex_lock(&ftrace_lock);
if (unlikely(!ftrace_graph_active))
goto out;
- ftrace_graph_active--;
- ftrace_graph_return = ftrace_stub_graph;
- ftrace_graph_entry = ftrace_graph_entry_stub;
- __ftrace_graph_entry = ftrace_graph_entry_stub;
- ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
- unregister_pm_notifier(&ftrace_suspend_notifier);
- unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
+ for (i = 0; i < fgraph_array_cnt; i++)
+ if (gops == fgraph_array[i])
+ break;
+ if (i >= fgraph_array_cnt)
+ goto out;
+ fgraph_array[i] = &fgraph_stub;
+ if (i + 1 == fgraph_array_cnt) {
+ for (; i >= 0; i--)
+ if (fgraph_array[i] != &fgraph_stub)
+ break;
+ fgraph_array_cnt = i + 1;
+ }
+
+ ftrace_graph_active--;
+ if (!ftrace_graph_active) {
+ ftrace_graph_return = ftrace_stub_graph;
+ ftrace_graph_entry = ftrace_graph_entry_stub;
+ __ftrace_graph_entry = ftrace_graph_entry_stub;
+ ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
+ unregister_pm_notifier(&ftrace_suspend_notifier);
+ unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
+ }
out:
mutex_unlock(&ftrace_lock);
}