unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
unsigned long __read_mostly tracing_thresh;
-/* We need to change this state when a selftest is running.
+/*
+ * We need to change this state when a selftest is running.
* A selftest will lurk into the ring-buffer to count the
* entries inserted during the selftest although some concurrent
* insertions into the ring-buffer such as ftrace_printk could occurred
* at the same time, giving false positive or negative results.
*/
-static atomic_t tracing_selftest_running = ATOMIC_INIT(0);
+static bool __read_mostly tracing_selftest_running;
/* For tracers that don't implement custom flags */
static struct tracer_opt dummy_tracer_opt[] = {
unlock_kernel();
mutex_lock(&trace_types_lock);
+ tracing_selftest_running = true;
+
for (t = trace_types; t; t = t->next) {
if (strcmp(type->name, t->name) == 0) {
/* already found */
struct trace_array *tr = &global_trace;
int i;
- atomic_set(&tracing_selftest_running, 1);
/*
* Run a selftest on this tracer.
* Here we reset the trace buffer, and set the current
/* the test is responsible for initializing and enabling */
pr_info("Testing tracer %s: ", type->name);
ret = type->selftest(type, tr);
- atomic_set(&tracing_selftest_running, 0);
/* the test is responsible for resetting too */
current_trace = saved_tracer;
if (ret) {
max_tracer_type_len = len;
out:
+ tracing_selftest_running = false;
mutex_unlock(&trace_types_lock);
lock_kernel();
unsigned long flags, irq_flags;
int cpu, len = 0, size, pc;
- if (tracing_disabled || atomic_read(&tracing_selftest_running))
+ if (tracing_disabled || tracing_selftest_running)
return 0;
pc = preempt_count();