static_call: Add call depth tracking support
authorPeter Zijlstra <peterz@infradead.org>
Thu, 15 Sep 2022 11:11:31 +0000 (13:11 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Mon, 17 Oct 2022 14:41:16 +0000 (16:41 +0200)
When indirect calls are switched to direct calls then it has to be ensured
that the call target is not the function, but the call thunk when call
depth tracking is enabled. But static calls are available before call
thunks have been set up.

Ensure a second run through the static call patching code after call thunks
have been created. When call thunks are not enabled this has no side
effects.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220915111148.306100465@infradead.org
arch/x86/include/asm/alternative.h
arch/x86/kernel/callthunks.c
arch/x86/kernel/static_call.c
include/linux/static_call.h
kernel/static_call_inline.c

index 4c416b21bac80693902d34df351b2212f54a56ab..07ac25793a3f493701ec09fe7d5ce38c7b7f08d0 100644 (file)
@@ -91,11 +91,16 @@ struct callthunk_sites {
 extern void callthunks_patch_builtin_calls(void);
 extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
                                          struct module *mod);
+extern void *callthunks_translate_call_dest(void *dest);
 #else
 static __always_inline void callthunks_patch_builtin_calls(void) {}
 static __always_inline void
 callthunks_patch_module_calls(struct callthunk_sites *sites,
                              struct module *mod) {}
+static __always_inline void *callthunks_translate_call_dest(void *dest)
+{
+       return dest;
+}
 #endif
 
 #ifdef CONFIG_SMP
index dfe7ffff88b9323e64cad8f98f6110e267d5ff05..071003605a865b006989bbda0a1d864d618d39d3 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/kallsyms.h>
 #include <linux/memory.h>
 #include <linux/moduleloader.h>
+#include <linux/static_call.h>
 
 #include <asm/alternative.h>
 #include <asm/asm-offsets.h>
@@ -271,10 +272,27 @@ void __init callthunks_patch_builtin_calls(void)
        pr_info("Setting up call depth tracking\n");
        mutex_lock(&text_mutex);
        callthunks_setup(&cs, &builtin_coretext);
+       static_call_force_reinit();
        thunks_initialized = true;
        mutex_unlock(&text_mutex);
 }
 
+void *callthunks_translate_call_dest(void *dest)
+{
+       void *target;
+
+       lockdep_assert_held(&text_mutex);
+
+       if (!thunks_initialized || skip_addr(dest))
+               return dest;
+
+       if (!is_coretext(NULL, dest))
+               return dest;
+
+       target = patch_dest(dest, false);
+       return target ? : dest;
+}
+
 #ifdef CONFIG_MODULES
 void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
                                            struct module *mod)
index 5d3844a98373e2008b68acab77788e7ae48c94b4..2ebc338980bcdfe5a276ac79fbbe2a90ecc6815c 100644 (file)
@@ -34,6 +34,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
 
        switch (type) {
        case CALL:
+               func = callthunks_translate_call_dest(func);
                code = text_gen_insn(CALL_INSN_OPCODE, insn, func);
                if (func == &__static_call_return0) {
                        emulate = code;
index df53bed9d71f1de74af1410515ab91104b908909..141e6b176a1b308c89cedf3481ba7592c6d974a8 100644 (file)
@@ -162,6 +162,8 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool
 
 extern int __init static_call_init(void);
 
+extern void static_call_force_reinit(void);
+
 struct static_call_mod {
        struct static_call_mod *next;
        struct module *mod; /* for vmlinux, mod == NULL */
index dc5665b628140ee9b0a841011a4ae495d2c9eee8..639397b5491ca0ff6eabf18d1431dd2ab9036686 100644 (file)
@@ -15,7 +15,18 @@ extern struct static_call_site __start_static_call_sites[],
 extern struct static_call_tramp_key __start_static_call_tramp_key[],
                                    __stop_static_call_tramp_key[];
 
-static bool static_call_initialized;
+static int static_call_initialized;
+
+/*
+ * Must be called before early_initcall() to be effective.
+ */
+void static_call_force_reinit(void)
+{
+       if (WARN_ON_ONCE(!static_call_initialized))
+               return;
+
+       static_call_initialized++;
+}
 
 /* mutex to protect key modules/sites */
 static DEFINE_MUTEX(static_call_mutex);
@@ -475,7 +486,8 @@ int __init static_call_init(void)
 {
        int ret;
 
-       if (static_call_initialized)
+       /* See static_call_force_reinit(). */
+       if (static_call_initialized == 1)
                return 0;
 
        cpus_read_lock();
@@ -490,11 +502,12 @@ int __init static_call_init(void)
                BUG();
        }
 
-       static_call_initialized = true;
-
 #ifdef CONFIG_MODULES
-       register_module_notifier(&static_call_module_nb);
+       if (!static_call_initialized)
+               register_module_notifier(&static_call_module_nb);
 #endif
+
+       static_call_initialized = 1;
        return 0;
 }
 early_initcall(static_call_init);