@@ -1144,6 +1144,7 @@ void ftrace_graph_init_task(struct task_struct *t)
t->curr_ret_stack = 0;
t->curr_ret_depth = -1;
+ mutex_lock(&ftrace_lock);
if (ftrace_graph_active) {
unsigned long *ret_stack;
@@ -1155,6 +1156,7 @@ void ftrace_graph_init_task(struct task_struct *t)
return;
graph_init_task(t, ret_stack);
}
+ mutex_unlock(&ftrace_lock);
}
void ftrace_graph_exit_task(struct task_struct *t)
@@ -1292,19 +1294,106 @@ static void ftrace_graph_disable_direct(bool disable_branch)
fgraph_direct_gops = &fgraph_stub;
}
-/* The cpu_boot init_task->ret_stack will never be freed */
-static int fgraph_cpu_init(unsigned int cpu)
+static void __fgraph_cpu_init(unsigned int cpu)
{
if (!idle_task(cpu)->ret_stack)
ftrace_graph_init_idle_task(idle_task(cpu), cpu);
+}
+
+static int fgraph_cpu_init(unsigned int cpu)
+{
+ if (ftrace_graph_active)
+ __fgraph_cpu_init(cpu);
return 0;
}
+struct ret_stack_free_data {
+ struct list_head list;
+ struct task_struct *task;
+};
+
+static void remove_ret_stack(struct task_struct *t, struct list_head *head, int list_index)
+{
+ struct ret_stack_free_data *free_data;
+
+ /* If the ret_stack is still in use, skip this */
+ if (t->curr_ret_depth >= 0)
+ return;
+
+ free_data = (struct ret_stack_free_data*)(t->ret_stack + list_index);
+ list_add(&free_data->list, head);
+ free_data->task = t;
+}
+
+static void free_ret_stacks(void)
+{
+ struct ret_stack_free_data *free_data, *n;
+ struct task_struct *g, *t;
+ LIST_HEAD(stacks);
+ int list_index;
+ int list_sz;
+ int cpu;
+
+ /* Calculate the size in longs to hold ret_stack_free_data */
+ list_sz = DIV_ROUND_UP(sizeof(struct ret_stack_free_data), sizeof(long));
+
+ /*
+ * We do not want to race with __ftrace_return_to_handler() where this
+ * CPU can see the update to curr_ret_depth going to zero before it
+ * actually does. As tracing is disabled, the ret_stack is not going
+ * to be used anymore and there will be no more callbacks. Use
+ * the top of the stack as the link list pointer to attach this
+ * ret_stack to @head. Then at the end, run an RCU trace synthronization
+ * which will guarantee that there are no more uses of the ret_stacks
+ * and they can all be freed.
+ */
+ list_index = SHADOW_STACK_MAX_OFFSET - list_sz;
+
+ read_lock(&tasklist_lock);
+ for_each_process_thread(g, t) {
+ if (t->ret_stack)
+ remove_ret_stack(t, &stacks, list_index);
+ }
+ read_unlock(&tasklist_lock);
+
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ t = idle_task(cpu);
+ if (t->ret_stack)
+ remove_ret_stack(t, &stacks, list_index);
+ }
+ cpus_read_unlock();
+
+ /* Make sure nothing is using the ret_stacks anymore */
+ synchronize_rcu_tasks_trace();
+
+ list_for_each_entry_safe(free_data, n, &stacks, list) {
+ unsigned long *stack = free_data->task->ret_stack;
+
+ free_data->task->ret_stack = NULL;
+ kmem_cache_free(fgraph_stack_cachep, stack);
+ }
+}
+
+static __init int fgraph_init(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "fgraph:online",
+ fgraph_cpu_init, NULL);
+ if (ret < 0) {
+ pr_warn("fgraph: Error to init cpu hotplug support\n");
+ return ret;
+ }
+ return 0;
+}
+core_initcall(fgraph_init)
+
int register_ftrace_graph(struct fgraph_ops *gops)
{
- static bool fgraph_initialized;
int command = 0;
- int ret = 0;
+ int cpu;
+ int ret;
int i = -1;
guard(mutex)(&ftrace_lock);
@@ -1317,17 +1406,6 @@ int register_ftrace_graph(struct fgraph_ops *gops)
return -ENOMEM;
}
- if (!fgraph_initialized) {
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "fgraph:online",
- fgraph_cpu_init, NULL);
- if (ret < 0) {
- pr_warn("fgraph: Error to init cpu hotplug support\n");
- return ret;
- }
- fgraph_initialized = true;
- ret = 0;
- }
-
if (!fgraph_array[0]) {
/* The array must always have real data on it */
for (i = 0; i < FGRAPH_ARRAY_SIZE; i++)
@@ -1342,6 +1420,12 @@ int register_ftrace_graph(struct fgraph_ops *gops)
ftrace_graph_active++;
+ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ __fgraph_cpu_init(cpu);
+ }
+ cpus_read_unlock();
+
if (ftrace_graph_active == 2)
ftrace_graph_disable_direct(true);
@@ -1412,6 +1496,7 @@ void unregister_ftrace_graph(struct fgraph_ops *gops)
ftrace_graph_entry = ftrace_graph_entry_stub;
unregister_pm_notifier(&ftrace_suspend_notifier);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
+ free_ret_stacks();
}
out:
gops->saved_func = NULL;