RISC-V: Don't check text_mutex during stop_machine
authorConor Dooley <conor.dooley@microchip.com>
Fri, 3 Mar 2023 14:37:55 +0000 (14:37 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 Mar 2023 07:50:29 +0000 (08:50 +0100)
[ Upstream commit 2a8db5ec4a28a0fce822d10224db9471a44b6925 ]

We're currently using stop_machine() to update ftrace & kprobes, which
means that the thread that takes text_mutex during may not be the same
as the thread that eventually patches the code.  This isn't actually a
race because the lock is still held (preventing any other concurrent
accesses) and there is only one thread running during stop_machine(),
but it does trigger a lockdep failure.

This patch just elides the lockdep check during stop_machine.

Fixes: c15ac4fd60d5 ("riscv/ftrace: Add dynamic function tracer support")
Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Reported-by: Changbin Du <changbin.du@gmail.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
Link: https://lore.kernel.org/r/20230303143754.4005217-1-conor.dooley@microchip.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
arch/riscv/include/asm/ftrace.h
arch/riscv/include/asm/patch.h
arch/riscv/kernel/ftrace.c
arch/riscv/kernel/patch.c

index 9e73922..d47d87c 100644 (file)
@@ -109,6 +109,6 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
 #define ftrace_init_nop ftrace_init_nop
 #endif
 
-#endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
 
 #endif /* _ASM_RISCV_FTRACE_H */
index 9a7d734..98d9de0 100644 (file)
@@ -9,4 +9,6 @@
 int patch_text_nosync(void *addr, const void *insns, size_t len);
 int patch_text(void *addr, u32 insn);
 
+extern int riscv_patch_in_stop_machine;
+
 #endif /* _ASM_RISCV_PATCH_H */
index 5bff37a..03a6434 100644 (file)
 void ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
 {
        mutex_lock(&text_mutex);
+
+       /*
+        * The code sequences we use for ftrace can't be patched while the
+        * kernel is running, so we need to use stop_machine() to modify them
+        * for now.  This doesn't play nice with text_mutex, we use this flag
+        * to elide the check.
+        */
+       riscv_patch_in_stop_machine = true;
 }
 
 void ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
 {
+       riscv_patch_in_stop_machine = false;
        mutex_unlock(&text_mutex);
 }
 
@@ -107,9 +116,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
 {
        int out;
 
-       ftrace_arch_code_modify_prepare();
+       mutex_lock(&text_mutex);
        out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
-       ftrace_arch_code_modify_post_process();
+       mutex_unlock(&text_mutex);
 
        return out;
 }
index 765004b..e099961 100644 (file)
@@ -11,6 +11,7 @@
 #include <asm/kprobes.h>
 #include <asm/cacheflush.h>
 #include <asm/fixmap.h>
+#include <asm/ftrace.h>
 #include <asm/patch.h>
 
 struct patch_insn {
@@ -19,6 +20,8 @@ struct patch_insn {
        atomic_t cpu_count;
 };
 
+int riscv_patch_in_stop_machine = false;
+
 #ifdef CONFIG_MMU
 /*
  * The fix_to_virt(, idx) needs a const value (not a dynamic variable of
@@ -59,8 +62,15 @@ static int patch_insn_write(void *addr, const void *insn, size_t len)
         * Before reaching here, it was expected to lock the text_mutex
         * already, so we don't need to give another lock here and could
         * ensure that it was safe between each cores.
+        *
+        * We're currently using stop_machine() for ftrace & kprobes, and while
+        * that ensures text_mutex is held before installing the mappings it
+        * does not ensure text_mutex is held by the calling thread.  That's
+        * safe but triggers a lockdep failure, so just elide it for that
+        * specific case.
         */
-       lockdep_assert_held(&text_mutex);
+       if (!riscv_patch_in_stop_machine)
+               lockdep_assert_held(&text_mutex);
 
        if (across_pages)
                patch_map(addr + len, FIX_TEXT_POKE1);
@@ -121,13 +131,25 @@ NOKPROBE_SYMBOL(patch_text_cb);
 
 int patch_text(void *addr, u32 insn)
 {
+       int ret;
        struct patch_insn patch = {
                .addr = addr,
                .insn = insn,
                .cpu_count = ATOMIC_INIT(0),
        };
 
-       return stop_machine_cpuslocked(patch_text_cb,
-                                      &patch, cpu_online_mask);
+       /*
+        * kprobes takes text_mutex, before calling patch_text(), but as we call
+        * calls stop_machine(), the lockdep assertion in patch_insn_write()
+        * gets confused by the context in which the lock is taken.
+        * Instead, ensure the lock is held before calling stop_machine(), and
+        * set riscv_patch_in_stop_machine to skip the check in
+        * patch_insn_write().
+        */
+       lockdep_assert_held(&text_mutex);
+       riscv_patch_in_stop_machine = true;
+       ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
+       riscv_patch_in_stop_machine = false;
+       return ret;
 }
 NOKPROBE_SYMBOL(patch_text);