unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
unsigned long __read_mostly tracing_thresh;
+/* We need to change this state when a selftest is running.
+ * A selftest will lurk into the ring-buffer to count the
+ * entries inserted during the selftest although some concurrent
+ * insertions into the ring-buffer such as ftrace_printk could occurred
+ * at the same time, giving false positive or negative results.
+ */
+static atomic_t tracing_selftest_running = ATOMIC_INIT(0);
+
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
{ }
struct tracer *saved_tracer = current_trace;
struct trace_array *tr = &global_trace;
int i;
+
+ atomic_set(&tracing_selftest_running, 1);
/*
* Run a selftest on this tracer.
* Here we reset the trace buffer, and set the current
/* the test is responsible for initializing and enabling */
pr_info("Testing tracer %s: ", type->name);
ret = type->selftest(type, tr);
+ atomic_set(&tracing_selftest_running, 0);
/* the test is responsible for resetting too */
current_trace = saved_tracer;
if (ret) {
unsigned long flags, irq_flags;
int cpu, len = 0, size, pc;
- if (tracing_disabled)
+ if (tracing_disabled || atomic_read(&tracing_selftest_running))
return 0;
pc = preempt_count();