ftrace: arm64: remove static ftrace
authorMark Rutland <mark.rutland@arm.com>
Tue, 22 Nov 2022 16:36:24 +0000 (16:36 +0000)
committerWill Deacon <will@kernel.org>
Fri, 25 Nov 2022 12:11:50 +0000 (12:11 +0000)
The build test robot pointer out that there's a build failure when:

  CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y
  CONFIG_DYNAMIC_FTRACE_WITH_ARGS=n

... due to some mismatched ifdeffery, some of which checks
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS, and some of which checks
CONFIG_DYNAMIC_FTRACE_WITH_ARGS, leading to some missing definitions expected
by the core code when CONFIG_DYNAMIC_FTRACE=n and consequently
CONFIG_DYNAMIC_FTRACE_WITH_ARGS=n.

There's really not much point in supporting CONFIG_DYNAMIC_FTRACE=n (AKA
static ftrace). All supported toolchains allow us to implement
DYNAMIC_FTRACE, distributions all prefer DYNAMIC_FTRACE, and both
powerpc and s390 removed support for static ftrace in commits:

  0c0c52306f4792a4 ("powerpc: Only support DYNAMIC_FTRACE not static")
  5d6a0163494c78ad ("s390/ftrace: enforce DYNAMIC_FTRACE if FUNCTION_TRACER is selected")

... and according to Steven, static ftrace is only supported on x86 to
allow testing that the core code still functions in this configuration.

Given that, let's simplify matters by removing arm64's support for
static ftrace. This avoids the problem originally reported, and leaves
us with less code to maintain.

Fixes: 26299b3f6ba2 ("ftrace: arm64: move from REGS to ARGS")
Link: https://lore.kernel.org/r/202211212249.livTPi3Y-lkp@intel.com
Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Link: https://lore.kernel.org/r/20221122163624.1225912-1-mark.rutland@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/Kconfig
arch/arm64/kernel/entry-ftrace.S
arch/arm64/kernel/ftrace.c

index b6b3305ba70137d1ae9e6c38ad26a2baa48019a3..5553a734123eb32b65ba29bccffb1c91f87c5977 100644 (file)
@@ -117,6 +117,7 @@ config ARM64
        select CPU_PM if (SUSPEND || CPU_IDLE)
        select CRC32
        select DCACHE_WORD_ACCESS
+       select DYNAMIC_FTRACE if FUNCTION_TRACER
        select DMA_DIRECT_REMAP
        select EDAC_SUPPORT
        select FRAME_POINTER
index 4d3050549aa6e1b9c09c626bf7461261b27862ee..30cc2a9d1757a6a7221dbcca839a9da75b81e42b 100644 (file)
@@ -170,44 +170,6 @@ SYM_CODE_END(ftrace_caller)
        add     \reg, \reg, #8
        .endm
 
-#ifndef CONFIG_DYNAMIC_FTRACE
-/*
- * void _mcount(unsigned long return_address)
- * @return_address: return address to instrumented function
- *
- * This function makes calls, if enabled, to:
- *     - tracer function to probe instrumented function's entry,
- *     - ftrace_graph_caller to set up an exit hook
- */
-SYM_FUNC_START(_mcount)
-       mcount_enter
-
-       ldr_l   x2, ftrace_trace_function
-       adr     x0, ftrace_stub
-       cmp     x0, x2                  // if (ftrace_trace_function
-       b.eq    skip_ftrace_call        //     != ftrace_stub) {
-
-       mcount_get_pc   x0              //       function's pc
-       mcount_get_lr   x1              //       function's lr (= parent's pc)
-       blr     x2                      //   (*ftrace_trace_function)(pc, lr);
-
-skip_ftrace_call:                      // }
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       ldr_l   x2, ftrace_graph_return
-       cmp     x0, x2                  //   if ((ftrace_graph_return
-       b.ne    ftrace_graph_caller     //        != ftrace_stub)
-
-       ldr_l   x2, ftrace_graph_entry  //     || (ftrace_graph_entry
-       adr_l   x0, ftrace_graph_entry_stub //     != ftrace_graph_entry_stub))
-       cmp     x0, x2
-       b.ne    ftrace_graph_caller     //     ftrace_graph_caller();
-#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
-       mcount_exit
-SYM_FUNC_END(_mcount)
-EXPORT_SYMBOL(_mcount)
-NOKPROBE(_mcount)
-
-#else /* CONFIG_DYNAMIC_FTRACE */
 /*
  * _mcount() is used to build the kernel with -pg option, but all the branch
  * instructions to _mcount() are replaced to NOP initially at kernel start up,
@@ -247,7 +209,6 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
 
        mcount_exit
 SYM_FUNC_END(ftrace_caller)
-#endif /* CONFIG_DYNAMIC_FTRACE */
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 /*
index 5cf990d052ba88c7fd04376870e22d92c8fd3b07..b30b955a892113413bc394b383630285f191f2ae 100644 (file)
@@ -60,7 +60,6 @@ int ftrace_regs_query_register_offset(const char *name)
 }
 #endif
 
-#ifdef CONFIG_DYNAMIC_FTRACE
 /*
  * Replace a single instruction, which may be a branch or NOP.
  * If @validate == true, a replaced instruction is checked against 'old'.
@@ -268,7 +267,6 @@ void arch_ftrace_update_code(int command)
        command |= FTRACE_MAY_SLEEP;
        ftrace_modify_all_code(command);
 }
-#endif /* CONFIG_DYNAMIC_FTRACE */
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 /*
@@ -299,8 +297,6 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
        }
 }
 
-#ifdef CONFIG_DYNAMIC_FTRACE
-
 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
 void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
                       struct ftrace_ops *op, struct ftrace_regs *fregs)
@@ -338,5 +334,4 @@ int ftrace_disable_ftrace_graph_caller(void)
        return ftrace_modify_graph_caller(false);
 }
 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
-#endif /* CONFIG_DYNAMIC_FTRACE */
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */