diff options
author | Andy Chiu <andy.chiu@sifive.com> | 2025-04-08 02:08:30 +0800 |
---|---|---|
committer | Palmer Dabbelt <palmer@dabbelt.com> | 2025-06-05 11:09:26 -0700 |
commit | 5aa4ef95588456df5a5563dd23892827f97fb14f (patch) | |
tree | bf6b22194412da6bc0df7af242e42b6737decf9f | |
parent | b2137c3b6d7a45622967aac78b46a4bd2cff6c42 (diff) | |
download | linux-5aa4ef95588456df5a5563dd23892827f97fb14f.tar.gz |
riscv: ftrace: do not use stop_machine to update code
Now it is safe to remove dependency from stop_machine() for us to patch
code in ftrace.
Signed-off-by: Andy Chiu <andy.chiu@sifive.com>
Link: https://lore.kernel.org/r/20250407180838.42877-6-andybnac@gmail.com
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
-rw-r--r-- | arch/riscv/kernel/ftrace.c | 64 |
1 files changed, 10 insertions, 54 deletions
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index ea04f09f9d4d3f..b133c60808fe01 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -24,23 +24,13 @@ unsigned long arch_ftrace_get_symaddr(unsigned long fentry_ip) return fentry_ip - MCOUNT_AUIPC_SIZE; } -void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex) +void arch_ftrace_update_code(int command) { mutex_lock(&text_mutex); - - /* - * The code sequences we use for ftrace can't be patched while the - * kernel is running, so we need to use stop_machine() to modify them - * for now. This doesn't play nice with text_mutex, we use this flag - * to elide the check. - */ - riscv_patch_in_stop_machine = true; -} - -void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex) -{ - riscv_patch_in_stop_machine = false; + command |= FTRACE_MAY_SLEEP; + ftrace_modify_all_code(command); mutex_unlock(&text_mutex); + flush_icache_all(); } static int __ftrace_modify_call(unsigned long source, unsigned long target, bool validate) @@ -129,51 +119,17 @@ int ftrace_update_ftrace_func(ftrace_func_t func) * before the write to function_trace_op later in the generic ftrace. * If the sequence is not enforced, then an old ftrace_call_dest may * race loading a new function_trace_op set in ftrace_modify_all_code - * - * If we are in stop_machine, then we don't need to call remote fence - * as there is no concurrent read-side of ftrace_call_dest. */ smp_wmb(); - if (!irqs_disabled()) - smp_call_function(ftrace_sync_ipi, NULL, 1); - return 0; -} - -struct ftrace_modify_param { - int command; - atomic_t cpu_count; -}; - -static int __ftrace_modify_code(void *data) -{ - struct ftrace_modify_param *param = data; - - if (atomic_inc_return(¶m->cpu_count) == num_online_cpus()) { - ftrace_modify_all_code(param->command); - /* - * Make sure the patching store is effective *before* we - * increment the counter which releases all waiting CPUs - * by using the release variant of atomic increment. The - * release pairs with the call to local_flush_icache_all() - * on the waiting CPU. - */ - atomic_inc_return_release(¶m->cpu_count); - } else { - while (atomic_read(¶m->cpu_count) <= num_online_cpus()) - cpu_relax(); - - local_flush_icache_all(); - } - + /* + * Updating ftrace dpes not take stop_machine path, so irqs should not + * be disabled. + */ + WARN_ON(irqs_disabled()); + smp_call_function(ftrace_sync_ipi, NULL, 1); return 0; } -void arch_ftrace_update_code(int command) -{ - struct ftrace_modify_param param = { command, ATOMIC_INIT(0) }; - - stop_machine(__ftrace_modify_code, ¶m, cpu_online_mask); -} #else /* CONFIG_DYNAMIC_FTRACE */ unsigned long ftrace_call_adjust(unsigned long addr) { |