@@ -13,23 +13,13 @@
#include <asm/patch.h>
#ifdef CONFIG_DYNAMIC_FTRACE
-void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
+void arch_ftrace_update_code(int command)
{
mutex_lock(&text_mutex);
-
- /*
- * The code sequences we use for ftrace can't be patched while the
- * kernel is running, so we need to use stop_machine() to modify them
- * for now. This doesn't play nice with text_mutex, we use this flag
- * to elide the check.
- */
- riscv_patch_in_stop_machine = true;
-}
-
-void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
-{
- riscv_patch_in_stop_machine = false;
+ command |= FTRACE_MAY_SLEEP;
+ ftrace_modify_all_code(command);
mutex_unlock(&text_mutex);
+ flush_icache_all();
}
static int ftrace_check_current_call(unsigned long hook_pos,
@@ -155,41 +145,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
return __ftrace_modify_call_site(&ftrace_call_dest, func, true);
}
-struct ftrace_modify_param {
- int command;
- atomic_t cpu_count;
-};
-
-static int __ftrace_modify_code(void *data)
-{
- struct ftrace_modify_param *param = data;
-
- if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) {
- ftrace_modify_all_code(param->command);
- /*
- * Make sure the patching store is effective *before* we
- * increment the counter which releases all waiting CPUs
- * by using the release variant of atomic increment. The
- * release pairs with the call to local_flush_icache_all()
- * on the waiting CPU.
- */
- atomic_inc_return_release(¶m->cpu_count);
- } else {
- while (atomic_read(¶m->cpu_count) <= num_online_cpus())
- cpu_relax();
-
- local_flush_icache_all();
- }
-
- return 0;
-}
-
-void arch_ftrace_update_code(int command)
-{
- struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
-
- stop_machine(__ftrace_modify_code, ¶m, cpu_online_mask);
-}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS