tracing: Disable snapshot buffer when stopping instance tracers
[platform/kernel/linux-starfive.git] / kernel / trace / trace.c
index abaaf51..5b2ba69 100644 (file)
@@ -2359,13 +2359,7 @@ int is_tracing_stopped(void)
        return global_trace.stop_count;
 }
 
-/**
- * tracing_start - quick start of the tracer
- *
- * If tracing is enabled but was stopped by tracing_stop,
- * this will start the tracer back up.
- */
-void tracing_start(void)
+static void tracing_start_tr(struct trace_array *tr)
 {
        struct trace_buffer *buffer;
        unsigned long flags;
@@ -2373,119 +2367,83 @@ void tracing_start(void)
        if (tracing_disabled)
                return;
 
-       raw_spin_lock_irqsave(&global_trace.start_lock, flags);
-       if (--global_trace.stop_count) {
-               if (global_trace.stop_count < 0) {
+       raw_spin_lock_irqsave(&tr->start_lock, flags);
+       if (--tr->stop_count) {
+               if (WARN_ON_ONCE(tr->stop_count < 0)) {
                        /* Someone screwed up their debugging */
-                       WARN_ON_ONCE(1);
-                       global_trace.stop_count = 0;
+                       tr->stop_count = 0;
                }
                goto out;
        }
 
        /* Prevent the buffers from switching */
-       arch_spin_lock(&global_trace.max_lock);
+       arch_spin_lock(&tr->max_lock);
 
-       buffer = global_trace.array_buffer.buffer;
+       buffer = tr->array_buffer.buffer;
        if (buffer)
                ring_buffer_record_enable(buffer);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
-       buffer = global_trace.max_buffer.buffer;
+       buffer = tr->max_buffer.buffer;
        if (buffer)
                ring_buffer_record_enable(buffer);
 #endif
 
-       arch_spin_unlock(&global_trace.max_lock);
-
- out:
-       raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
-}
-
-static void tracing_start_tr(struct trace_array *tr)
-{
-       struct trace_buffer *buffer;
-       unsigned long flags;
-
-       if (tracing_disabled)
-               return;
-
-       /* If global, we need to also start the max tracer */
-       if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
-               return tracing_start();
-
-       raw_spin_lock_irqsave(&tr->start_lock, flags);
-
-       if (--tr->stop_count) {
-               if (tr->stop_count < 0) {
-                       /* Someone screwed up their debugging */
-                       WARN_ON_ONCE(1);
-                       tr->stop_count = 0;
-               }
-               goto out;
-       }
-
-       buffer = tr->array_buffer.buffer;
-       if (buffer)
-               ring_buffer_record_enable(buffer);
+       arch_spin_unlock(&tr->max_lock);
 
  out:
        raw_spin_unlock_irqrestore(&tr->start_lock, flags);
 }
 
 /**
- * tracing_stop - quick stop of the tracer
+ * tracing_start - quick start of the tracer
  *
- * Light weight way to stop tracing. Use in conjunction with
- * tracing_start.
+ * If tracing is enabled but was stopped by tracing_stop,
+ * this will start the tracer back up.
  */
-void tracing_stop(void)
+void tracing_start(void)
+
+{
+       return tracing_start_tr(&global_trace);
+}
+
+static void tracing_stop_tr(struct trace_array *tr)
 {
        struct trace_buffer *buffer;
        unsigned long flags;
 
-       raw_spin_lock_irqsave(&global_trace.start_lock, flags);
-       if (global_trace.stop_count++)
+       raw_spin_lock_irqsave(&tr->start_lock, flags);
+       if (tr->stop_count++)
                goto out;
 
        /* Prevent the buffers from switching */
-       arch_spin_lock(&global_trace.max_lock);
+       arch_spin_lock(&tr->max_lock);
 
-       buffer = global_trace.array_buffer.buffer;
+       buffer = tr->array_buffer.buffer;
        if (buffer)
                ring_buffer_record_disable(buffer);
 
 #ifdef CONFIG_TRACER_MAX_TRACE
-       buffer = global_trace.max_buffer.buffer;
+       buffer = tr->max_buffer.buffer;
        if (buffer)
                ring_buffer_record_disable(buffer);
 #endif
 
-       arch_spin_unlock(&global_trace.max_lock);
+       arch_spin_unlock(&tr->max_lock);
 
  out:
-       raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
+       raw_spin_unlock_irqrestore(&tr->start_lock, flags);
 }
 
-static void tracing_stop_tr(struct trace_array *tr)
+/**
+ * tracing_stop - quick stop of the tracer
+ *
+ * Light weight way to stop tracing. Use in conjunction with
+ * tracing_start.
+ */
+void tracing_stop(void)
 {
-       struct trace_buffer *buffer;
-       unsigned long flags;
-
-       /* If global, we need to also stop the max tracer */
-       if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
-               return tracing_stop();
-
-       raw_spin_lock_irqsave(&tr->start_lock, flags);
-       if (tr->stop_count++)
-               goto out;
-
-       buffer = tr->array_buffer.buffer;
-       if (buffer)
-               ring_buffer_record_disable(buffer);
-
- out:
-       raw_spin_unlock_irqrestore(&tr->start_lock, flags);
+       return tracing_stop_tr(&global_trace);
 }
 
 static int trace_save_cmdline(struct task_struct *tsk)
@@ -2769,8 +2727,11 @@ void trace_buffered_event_enable(void)
        for_each_tracing_cpu(cpu) {
                page = alloc_pages_node(cpu_to_node(cpu),
                                        GFP_KERNEL | __GFP_NORETRY, 0);
-               if (!page)
-                       goto failed;
+               /* This is just an optimization and can handle failures */
+               if (!page) {
+                       pr_err("Failed to allocate event buffer\n");
+                       break;
+               }
 
                event = page_address(page);
                memset(event, 0, sizeof(*event));
@@ -2784,10 +2745,6 @@ void trace_buffered_event_enable(void)
                        WARN_ON_ONCE(1);
                preempt_enable();
        }
-
-       return;
- failed:
-       trace_buffered_event_disable();
 }
 
 static void enable_trace_buffered_event(void *data)
@@ -4986,6 +4943,20 @@ int tracing_open_file_tr(struct inode *inode, struct file *filp)
        if (ret)
                return ret;
 
+       mutex_lock(&event_mutex);
+
+       /* Fail if the file is marked for removal */
+       if (file->flags & EVENT_FILE_FL_FREED) {
+               trace_array_put(file->tr);
+               ret = -ENODEV;
+       } else {
+               event_file_get(file);
+       }
+
+       mutex_unlock(&event_mutex);
+       if (ret)
+               return ret;
+
        filp->private_data = inode->i_private;
 
        return 0;
@@ -4996,6 +4967,7 @@ int tracing_release_file_tr(struct inode *inode, struct file *filp)
        struct trace_event_file *file = inode->i_private;
 
        trace_array_put(file->tr);
+       event_file_put(file);
 
        return 0;
 }
@@ -6380,13 +6352,15 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
        if (!tr->array_buffer.buffer)
                return 0;
 
+       /* Do not allow tracing while resizng ring buffer */
+       tracing_stop_tr(tr);
+
        ret = ring_buffer_resize(tr->array_buffer.buffer, size, cpu);
        if (ret < 0)
-               return ret;
+               goto out_start;
 
 #ifdef CONFIG_TRACER_MAX_TRACE
-       if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
-           !tr->current_trace->use_max_tr)
+       if (!tr->current_trace->use_max_tr)
                goto out;
 
        ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
@@ -6411,7 +6385,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
                        WARN_ON(1);
                        tracing_disabled = 1;
                }
-               return ret;
+               goto out_start;
        }
 
        update_buffer_entries(&tr->max_buffer, cpu);
@@ -6420,7 +6394,8 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
 #endif /* CONFIG_TRACER_MAX_TRACE */
 
        update_buffer_entries(&tr->array_buffer, cpu);
-
+ out_start:
+       tracing_start_tr(tr);
        return ret;
 }