FTRACE_OPS_FL_PID = 1 << 14,
FTRACE_OPS_FL_RCU = 1 << 15,
FTRACE_OPS_FL_TRACE_ARRAY = 1 << 16,
+ FTRACE_OPS_FL_IPIPE_EXCLUSIVE = 1 << 17,
};
#ifdef CONFIG_DYNAMIC_FTRACE
bool "enable/disable function tracing dynamically"
depends on FUNCTION_TRACER
depends on HAVE_DYNAMIC_FTRACE
+ depends on !IPIPE
default y
help
This option will modify all the calls to function tracing
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/rcupdate.h>
+#include <linux/ipipe.h>
#include <trace/events/sched.h>
static void update_ftrace_function(void)
{
+ struct ftrace_ops *ops;
ftrace_func_t func;
+ for (ops = ftrace_ops_list;
+ ops != &ftrace_list_end; ops = ops->next)
+ if (ops->flags & FTRACE_OPS_FL_IPIPE_EXCLUSIVE) {
+ set_function_trace_op = ops;
+ func = ops->func;
+ goto set_pointers;
+ }
+
/*
* Prepare the ftrace_ops that the arch callback will use.
* If there's only one ftrace_ops registered, the ftrace_ops_list
update_function_graph_func();
+ set_pointers:
/* If there's no change, then do nothing more here */
if (ftrace_trace_function == func)
return;
static void ftrace_run_update_code(int command)
{
+#ifdef CONFIG_IPIPE
+ unsigned long flags;
+#endif /* CONFIG_IPIPE */
int ret;
ret = ftrace_arch_code_modify_prepare();
* is safe. The stop_machine() is the safest, but also
* produces the most overhead.
*/
+#ifdef CONFIG_IPIPE
+ flags = ipipe_critical_enter(NULL);
+ __ftrace_modify_code(&command);
+ ipipe_critical_exit(flags);
+#else /* !CONFIG_IPIPE */
arch_ftrace_update_code(command);
+#endif /* !CONFIG_IPIPE */
ret = ftrace_arch_code_modify_post_process();
FTRACE_WARN_ON(ret);
* reason to cause large interrupt latencies while we do it.
*/
if (!mod)
- local_irq_save(flags);
+ flags = hard_local_irq_save();
ftrace_update_code(mod, start_pg);
if (!mod)
- local_irq_restore(flags);
+ hard_local_irq_restore(flags);
ret = 0;
out:
mutex_unlock(&ftrace_lock);
unsigned long count, flags;
int ret;
- local_irq_save(flags);
+ flags = hard_local_irq_save_notrace();
ret = ftrace_dyn_arch_init();
- local_irq_restore(flags);
+ hard_local_irq_restore_notrace(flags);
+
+ /* ftrace_dyn_arch_init places the return code in addr */
if (ret)
goto failed;
}
} while_for_each_ftrace_op(op);
out:
- preempt_enable_notrace();
+#ifdef CONFIG_IPIPE
+ if (hard_irqs_disabled() || !__ipipe_root_p)
+ /*
+ * Nothing urgent to schedule here. At latest the timer tick
+ * will pick up whatever the tracing functions kicked off.
+ */
+ preempt_enable_no_resched_notrace();
+ else
+#endif
+ preempt_enable_notrace();
trace_clear_recursion(bit);
}
static __always_inline int
trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
{
- unsigned int val = cpu_buffer->current_context;
+ unsigned long flags;
+ unsigned int val;
int bit;
if (in_interrupt()) {
} else
bit = RB_CTX_NORMAL;
- if (unlikely(val & (1 << bit)))
+ flags = hard_local_irq_save();
+
+ val = cpu_buffer->current_context;
+ if (unlikely(val & (1 << bit))) {
+ hard_local_irq_restore(flags);
return 1;
+ }
val |= (1 << bit);
cpu_buffer->current_context = val;
+ hard_local_irq_restore(flags);
+
return 0;
}
static __always_inline void
trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
{
+ unsigned long flags;
+
+ flags = hard_local_irq_save();
cpu_buffer->current_context &= cpu_buffer->current_context - 1;
+ hard_local_irq_restore(flags);
}
/**
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
+ flags = hard_local_irq_save();
+
pc = preempt_count();
- preempt_disable_notrace();
tbuffer = get_trace_buf();
if (!tbuffer) {
if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
goto out;
- local_save_flags(flags);
size = sizeof(*entry) + sizeof(u32) * len;
buffer = tr->trace_buffer.buffer;
event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
put_trace_buf();
out_nobuffer:
- preempt_enable_notrace();
+ hard_local_irq_restore(flags);
unpause_graph_tracing();
return len;
int this_cpu;
u64 now;
- local_irq_save(flags);
+ flags = hard_local_irq_save_notrace();
this_cpu = raw_smp_processor_id();
now = sched_clock_cpu(this_cpu);
arch_spin_unlock(&trace_clock_struct.lock);
out:
- local_irq_restore(flags);
+ hard_local_irq_restore_notrace(flags);
return now;
}
* Need to use raw, since this must be called before the
* recursive protection is performed.
*/
- local_irq_save(flags);
+ flags = hard_local_irq_save();
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
}
atomic_dec(&data->disabled);
- local_irq_restore(flags);
+ hard_local_irq_restore(flags);
}
static struct tracer_opt func_opts[] = {
if (tracing_thresh)
return 1;
- local_irq_save(flags);
+ flags = hard_local_irq_save_notrace();
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
}
atomic_dec(&data->disabled);
- local_irq_restore(flags);
+ hard_local_irq_restore_notrace(flags);
return ret;
}
int cpu;
int pc;
- local_irq_save(flags);
+ flags = hard_local_irq_save_notrace();
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
__trace_graph_return(tr, trace, flags, pc);
}
atomic_dec(&data->disabled);
- local_irq_restore(flags);
+ hard_local_irq_restore_notrace(flags);
}
void set_graph_array(struct trace_array *tr)
*/
void trace_hardirqs_on(void)
{
- if (!preempt_trace() && irq_trace())
+ if (ipipe_root_p && !preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL(trace_hardirqs_on);
void trace_hardirqs_off(void)
{
- if (!preempt_trace() && irq_trace())
+ if (ipipe_root_p && !preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL(trace_hardirqs_off);
__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
{
- if (!preempt_trace() && irq_trace())
+ if (ipipe_root_p && !preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
- if (!preempt_trace() && irq_trace())
+ if (ipipe_root_p && !preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);