From cfce092dae95ed81391e49f273353a96cc6dec64 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 22 Nov 2022 16:36:24 +0000 Subject: [PATCH] ftrace: arm64: remove static ftrace The build test robot pointer out that there's a build failure when: CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y CONFIG_DYNAMIC_FTRACE_WITH_ARGS=n ... due to some mismatched ifdeffery, some of which checks CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS, and some of which checks CONFIG_DYNAMIC_FTRACE_WITH_ARGS, leading to some missing definitions expected by the core code when CONFIG_DYNAMIC_FTRACE=n and consequently CONFIG_DYNAMIC_FTRACE_WITH_ARGS=n. There's really not much point in supporting CONFIG_DYNAMIC_FTRACE=n (AKA static ftrace). All supported toolchains allow us to implement DYNAMIC_FTRACE, distributions all prefer DYNAMIC_FTRACE, and both powerpc and s390 removed support for static ftrace in commits: 0c0c52306f4792a4 ("powerpc: Only support DYNAMIC_FTRACE not static") 5d6a0163494c78ad ("s390/ftrace: enforce DYNAMIC_FTRACE if FUNCTION_TRACER is selected") ... and according to Steven, static ftrace is only supported on x86 to allow testing that the core code still functions in this configuration. Given that, let's simplify matters by removing arm64's support for static ftrace. This avoids the problem originally reported, and leaves us with less code to maintain. Fixes: 26299b3f6ba2 ("ftrace: arm64: move from REGS to ARGS") Link: https://lore.kernel.org/r/202211212249.livTPi3Y-lkp@intel.com Suggested-by: Steven Rostedt Signed-off-by: Mark Rutland Cc: Will Deacon Cc: Catalin Marinas Acked-by: Steven Rostedt (Google) Link: https://lore.kernel.org/r/20221122163624.1225912-1-mark.rutland@arm.com Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 + arch/arm64/kernel/entry-ftrace.S | 39 --------------------------------------- arch/arm64/kernel/ftrace.c | 5 ----- 3 files changed, 1 insertion(+), 44 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b6b3305..5553a73 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -117,6 +117,7 @@ config ARM64 select CPU_PM if (SUSPEND || CPU_IDLE) select CRC32 select DCACHE_WORD_ACCESS + select DYNAMIC_FTRACE if FUNCTION_TRACER select DMA_DIRECT_REMAP select EDAC_SUPPORT select FRAME_POINTER diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index 4d30505..30cc2a9 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S @@ -170,44 +170,6 @@ SYM_CODE_END(ftrace_caller) add \reg, \reg, #8 .endm -#ifndef CONFIG_DYNAMIC_FTRACE -/* - * void _mcount(unsigned long return_address) - * @return_address: return address to instrumented function - * - * This function makes calls, if enabled, to: - * - tracer function to probe instrumented function's entry, - * - ftrace_graph_caller to set up an exit hook - */ -SYM_FUNC_START(_mcount) - mcount_enter - - ldr_l x2, ftrace_trace_function - adr x0, ftrace_stub - cmp x0, x2 // if (ftrace_trace_function - b.eq skip_ftrace_call // != ftrace_stub) { - - mcount_get_pc x0 // function's pc - mcount_get_lr x1 // function's lr (= parent's pc) - blr x2 // (*ftrace_trace_function)(pc, lr); - -skip_ftrace_call: // } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - ldr_l x2, ftrace_graph_return - cmp x0, x2 // if ((ftrace_graph_return - b.ne ftrace_graph_caller // != ftrace_stub) - - ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry - adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub)) - cmp x0, x2 - b.ne ftrace_graph_caller // ftrace_graph_caller(); -#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ - mcount_exit -SYM_FUNC_END(_mcount) -EXPORT_SYMBOL(_mcount) -NOKPROBE(_mcount) - -#else /* CONFIG_DYNAMIC_FTRACE */ /* * _mcount() is used to build the kernel with -pg option, but all the branch * instructions to _mcount() are replaced to NOP initially at kernel start up, @@ -247,7 +209,6 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller(); mcount_exit SYM_FUNC_END(ftrace_caller) -#endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 5cf990d..b30b955 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -60,7 +60,6 @@ int ftrace_regs_query_register_offset(const char *name) } #endif -#ifdef CONFIG_DYNAMIC_FTRACE /* * Replace a single instruction, which may be a branch or NOP. * If @validate == true, a replaced instruction is checked against 'old'. @@ -268,7 +267,6 @@ void arch_ftrace_update_code(int command) command |= FTRACE_MAY_SLEEP; ftrace_modify_all_code(command); } -#endif /* CONFIG_DYNAMIC_FTRACE */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* @@ -299,8 +297,6 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, } } -#ifdef CONFIG_DYNAMIC_FTRACE - #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs) @@ -338,5 +334,4 @@ int ftrace_disable_ftrace_graph_caller(void) return ftrace_modify_graph_caller(false); } #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */ -#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ -- 2.7.4