ftrace: ipipe: enable tracing from the head domain
authorPhilippe Gerum <rpm@xenomai.org>
Sun, 3 Dec 2017 11:06:54 +0000 (12:06 +0100)
committerMarek Szyprowski <m.szyprowski@samsung.com>
Fri, 27 Apr 2018 09:21:34 +0000 (11:21 +0200)
include/linux/ftrace.h
kernel/trace/Kconfig
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_clock.c
kernel/trace/trace_functions.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_irqsoff.c

index e54d257983f28c4e395d9a7bf871652e7f89c3de..2b2f11f477e2f3d97ca2d0760e2004dac6d10107 100644 (file)
@@ -141,6 +141,7 @@ enum {
        FTRACE_OPS_FL_PID                       = 1 << 14,
        FTRACE_OPS_FL_RCU                       = 1 << 15,
        FTRACE_OPS_FL_TRACE_ARRAY               = 1 << 16,
+       FTRACE_OPS_FL_IPIPE_EXCLUSIVE           = 1 << 17,
 };
 
 #ifdef CONFIG_DYNAMIC_FTRACE
index 434c840e2d82f64c127a5c13f1f6aa244544ed43..f87e0748f1fce151f8811c9f63fcdcbfa2007c88 100644 (file)
@@ -480,6 +480,7 @@ config DYNAMIC_FTRACE
        bool "enable/disable function tracing dynamically"
        depends on FUNCTION_TRACER
        depends on HAVE_DYNAMIC_FTRACE
+       depends on !IPIPE
        default y
        help
          This option will modify all the calls to function tracing
index 8319e09e15b945f14f9046edeb885e173ef26652..713cf843efb964bfa2c96a14fef86d1b28a48bbf 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/list.h>
 #include <linux/hash.h>
 #include <linux/rcupdate.h>
+#include <linux/ipipe.h>
 
 #include <trace/events/sched.h>
 
@@ -271,8 +272,17 @@ static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
 
 static void update_ftrace_function(void)
 {
+       struct ftrace_ops *ops;
        ftrace_func_t func;
 
+       for (ops = ftrace_ops_list;
+            ops != &ftrace_list_end; ops = ops->next)
+               if (ops->flags & FTRACE_OPS_FL_IPIPE_EXCLUSIVE) {
+                       set_function_trace_op = ops;
+                       func = ops->func;
+                       goto set_pointers;
+               }
+
        /*
         * Prepare the ftrace_ops that the arch callback will use.
         * If there's only one ftrace_ops registered, the ftrace_ops_list
@@ -302,6 +312,7 @@ static void update_ftrace_function(void)
 
        update_function_graph_func();
 
+  set_pointers:
        /* If there's no change, then do nothing more here */
        if (ftrace_trace_function == func)
                return;
@@ -2689,6 +2700,9 @@ void __weak arch_ftrace_update_code(int command)
 
 static void ftrace_run_update_code(int command)
 {
+#ifdef CONFIG_IPIPE
+       unsigned long flags;
+#endif /* CONFIG_IPIPE */
        int ret;
 
        ret = ftrace_arch_code_modify_prepare();
@@ -2702,7 +2716,13 @@ static void ftrace_run_update_code(int command)
         * is safe. The stop_machine() is the safest, but also
         * produces the most overhead.
         */
+#ifdef CONFIG_IPIPE
+       flags = ipipe_critical_enter(NULL);
+       __ftrace_modify_code(&command);
+       ipipe_critical_exit(flags);
+#else  /* !CONFIG_IPIPE */
        arch_ftrace_update_code(command);
+#endif /* !CONFIG_IPIPE */
 
        ret = ftrace_arch_code_modify_post_process();
        FTRACE_WARN_ON(ret);
@@ -5661,10 +5681,10 @@ static int ftrace_process_locs(struct module *mod,
         * reason to cause large interrupt latencies while we do it.
         */
        if (!mod)
-               local_irq_save(flags);
+               flags = hard_local_irq_save();
        ftrace_update_code(mod, start_pg);
        if (!mod)
-               local_irq_restore(flags);
+               hard_local_irq_restore(flags);
        ret = 0;
  out:
        mutex_unlock(&ftrace_lock);
@@ -5917,9 +5937,11 @@ void __init ftrace_init(void)
        unsigned long count, flags;
        int ret;
 
-       local_irq_save(flags);
+       flags = hard_local_irq_save_notrace();
        ret = ftrace_dyn_arch_init();
-       local_irq_restore(flags);
+       hard_local_irq_restore_notrace(flags);
+
+       /* ftrace_dyn_arch_init places the return code in addr */
        if (ret)
                goto failed;
 
@@ -6075,7 +6097,16 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
                }
        } while_for_each_ftrace_op(op);
 out:
-       preempt_enable_notrace();
+#ifdef CONFIG_IPIPE
+       if (hard_irqs_disabled() || !__ipipe_root_p)
+               /*
+                * Nothing urgent to schedule here. At latest the timer tick
+                * will pick up whatever the tracing functions kicked off.
+                */
+               preempt_enable_no_resched_notrace();
+       else
+#endif
+               preempt_enable_notrace();
        trace_clear_recursion(bit);
 }
 
index 0476a9372014763fba412ef9f459d8384f73ae94..a4455be1e51bc9c5432f6104d5aa814fede6ab58 100644 (file)
@@ -2579,7 +2579,8 @@ rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
 static __always_inline int
 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 {
-       unsigned int val = cpu_buffer->current_context;
+       unsigned long flags;
+       unsigned int val;
        int bit;
 
        if (in_interrupt()) {
@@ -2592,19 +2593,30 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
        } else
                bit = RB_CTX_NORMAL;
 
-       if (unlikely(val & (1 << bit)))
+       flags = hard_local_irq_save();
+
+       val = cpu_buffer->current_context;
+       if (unlikely(val & (1 << bit))) {
+               hard_local_irq_restore(flags);
                return 1;
+       }
 
        val |= (1 << bit);
        cpu_buffer->current_context = val;
 
+       hard_local_irq_restore(flags);
+
        return 0;
 }
 
 static __always_inline void
 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
 {
+       unsigned long flags;
+
+       flags = hard_local_irq_save();
        cpu_buffer->current_context &= cpu_buffer->current_context - 1;
+       hard_local_irq_restore(flags);
 }
 
 /**
index 76bcc80b893ebc15868defc6d6b0af253c07ad57..d13785d040de56ec66ca77c2e19e98f660d617b4 100644 (file)
@@ -2910,8 +2910,9 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
        /* Don't pollute graph traces with trace_vprintk internals */
        pause_graph_tracing();
 
+       flags = hard_local_irq_save();
+
        pc = preempt_count();
-       preempt_disable_notrace();
 
        tbuffer = get_trace_buf();
        if (!tbuffer) {
@@ -2924,7 +2925,6 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
        if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
                goto out;
 
-       local_save_flags(flags);
        size = sizeof(*entry) + sizeof(u32) * len;
        buffer = tr->trace_buffer.buffer;
        event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
@@ -2945,7 +2945,7 @@ out:
        put_trace_buf();
 
 out_nobuffer:
-       preempt_enable_notrace();
+       hard_local_irq_restore(flags);
        unpause_graph_tracing();
 
        return len;
index 5fdc779f411d83f5f47d5f501b0c0b8cc40facca..e722b05584471223c5b148bac0e56a15ac294c45 100644 (file)
@@ -96,7 +96,7 @@ u64 notrace trace_clock_global(void)
        int this_cpu;
        u64 now;
 
-       local_irq_save(flags);
+       flags = hard_local_irq_save_notrace();
 
        this_cpu = raw_smp_processor_id();
        now = sched_clock_cpu(this_cpu);
@@ -122,7 +122,7 @@ u64 notrace trace_clock_global(void)
        arch_spin_unlock(&trace_clock_struct.lock);
 
  out:
-       local_irq_restore(flags);
+       hard_local_irq_restore_notrace(flags);
 
        return now;
 }
index 27f7ad12c4b1b11da86dd161468c1c11944dd8fb..d1db7ae6c68da686475acd897cf9eddc0b8e7bba 100644 (file)
@@ -172,7 +172,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
         * Need to use raw, since this must be called before the
         * recursive protection is performed.
         */
-       local_irq_save(flags);
+       flags = hard_local_irq_save();
        cpu = raw_smp_processor_id();
        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
        disabled = atomic_inc_return(&data->disabled);
@@ -192,7 +192,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
        }
 
        atomic_dec(&data->disabled);
-       local_irq_restore(flags);
+       hard_local_irq_restore(flags);
 }
 
 static struct tracer_opt func_opts[] = {
index 23c0b0cb5fb95c9875fb35cbd0d22f027430343c..2356e8e5203fb89fc7044a427af7b0844e72618d 100644 (file)
@@ -408,7 +408,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
        if (tracing_thresh)
                return 1;
 
-       local_irq_save(flags);
+       flags = hard_local_irq_save_notrace();
        cpu = raw_smp_processor_id();
        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
        disabled = atomic_inc_return(&data->disabled);
@@ -420,7 +420,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
        }
 
        atomic_dec(&data->disabled);
-       local_irq_restore(flags);
+       hard_local_irq_restore_notrace(flags);
 
        return ret;
 }
@@ -482,7 +482,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
        int cpu;
        int pc;
 
-       local_irq_save(flags);
+       flags = hard_local_irq_save_notrace();
        cpu = raw_smp_processor_id();
        data = per_cpu_ptr(tr->trace_buffer.data, cpu);
        disabled = atomic_inc_return(&data->disabled);
@@ -491,7 +491,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
                __trace_graph_return(tr, trace, flags, pc);
        }
        atomic_dec(&data->disabled);
-       local_irq_restore(flags);
+       hard_local_irq_restore_notrace(flags);
 }
 
 void set_graph_array(struct trace_array *tr)
index 7758bc0617cb15d8731defbc67912b5eb46246c2..f2395c62a93fecb97c375c7db1b03116f0e3ae60 100644 (file)
@@ -483,28 +483,28 @@ inline void print_irqtrace_events(struct task_struct *curr)
  */
 void trace_hardirqs_on(void)
 {
-       if (!preempt_trace() && irq_trace())
+       if (ipipe_root_p && !preempt_trace() && irq_trace())
                stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 }
 EXPORT_SYMBOL(trace_hardirqs_on);
 
 void trace_hardirqs_off(void)
 {
-       if (!preempt_trace() && irq_trace())
+       if (ipipe_root_p && !preempt_trace() && irq_trace())
                start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 }
 EXPORT_SYMBOL(trace_hardirqs_off);
 
 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
 {
-       if (!preempt_trace() && irq_trace())
+       if (ipipe_root_p && !preempt_trace() && irq_trace())
                stop_critical_timing(CALLER_ADDR0, caller_addr);
 }
 EXPORT_SYMBOL(trace_hardirqs_on_caller);
 
 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
 {
-       if (!preempt_trace() && irq_trace())
+       if (ipipe_root_p && !preempt_trace() && irq_trace())
                start_critical_timing(CALLER_ADDR0, caller_addr);
 }
 EXPORT_SYMBOL(trace_hardirqs_off_caller);