sparc64: Add HAVE_FUNCTION_TRACE_MCOUNT_TEST and tidy up.
authorDavid S. Miller <davem@davemloft.net>
Tue, 13 Apr 2010 05:35:24 +0000 (22:35 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 13 Apr 2010 05:35:24 +0000 (22:35 -0700)
Check function_trace_stop at ftrace_caller

Toss mcount_call and dummy call of ftrace_stub, unnecessary.

Document problems we'll have if the final kernel image link
ever turns on relaxation.

Properly size 'ftrace_call' so it looks right when inspecting
instructions under gdb et al.

Signed-off-by: David S. Miller <davem@davemloft.net>
arch/sparc/Kconfig
arch/sparc/lib/mcount.S

index 6db5136..035304c 100644 (file)
@@ -37,6 +37,7 @@ config SPARC64
        def_bool 64BIT
        select ARCH_SUPPORTS_MSI
        select HAVE_FUNCTION_TRACER
+       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_KRETPROBES
        select HAVE_KPROBES
        select HAVE_LMB
index 24b8b12..7047997 100644 (file)
@@ -96,13 +96,12 @@ mcount:
 #endif
 #ifdef CONFIG_FUNCTION_TRACER
 #ifdef CONFIG_DYNAMIC_FTRACE
-       mov             %o7, %o0
-       .globl          mcount_call
-mcount_call:
-       call            ftrace_stub
-        mov            %o0, %o7
+       /* Do nothing, the retl/nop below is all we need.  */
 #else
-       sethi           %hi(ftrace_trace_function), %g1
+       sethi           %hi(function_trace_stop), %g1
+       lduw            [%g1 + %lo(function_trace_stop)], %g2
+       brnz,pn         %g2, 1f
+        sethi          %hi(ftrace_trace_function), %g1
        sethi           %hi(ftrace_stub), %g2
        ldx             [%g1 + %lo(ftrace_trace_function)], %g1
        or              %g2, %lo(ftrace_stub), %g2
@@ -131,14 +130,23 @@ ftrace_stub:
        .globl          ftrace_caller
        .type           ftrace_caller,#function
 ftrace_caller:
+       sethi           %hi(function_trace_stop), %g1
        mov             %i7, %o1
-       mov             %o7, %o0
+       lduw            [%g1 + %lo(function_trace_stop)], %g2
+       brnz,pn         %g2, ftrace_stub
+        mov            %o7, %o0
        .globl          ftrace_call
 ftrace_call:
+       /* If the final kernel link ever turns on relaxation, we'll need
+        * to do something about this tail call.  Otherwise the linker
+        * will rewrite the call into a branch and nop out the move
+        * instruction.
+        */
        call            ftrace_stub
         mov            %o0, %o7
        retl
         nop
+       .size           ftrace_call,.-ftrace_call
        .size           ftrace_caller,.-ftrace_caller
 #endif
 #endif