Merge tag 'trace-v6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux...
[platform/kernel/linux-starfive.git] / kernel / trace / ring_buffer.c
index 76a2d91..834b361 100644 (file)
@@ -163,7 +163,7 @@ enum {
 #define extended_time(event) \
        (event->type_len >= RINGBUF_TYPE_TIME_EXTEND)
 
-static inline int rb_null_event(struct ring_buffer_event *event)
+static inline bool rb_null_event(struct ring_buffer_event *event)
 {
        return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
 }
@@ -363,11 +363,9 @@ static void free_buffer_page(struct buffer_page *bpage)
 /*
  * We need to fit the time_stamp delta into 27 bits.
  */
-static inline int test_time_stamp(u64 delta)
+static inline bool test_time_stamp(u64 delta)
 {
-       if (delta & TS_DELTA_TEST)
-               return 1;
-       return 0;
+       return !!(delta & TS_DELTA_TEST);
 }
 
 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
@@ -696,7 +694,7 @@ rb_time_read_cmpxchg(local_t *l, unsigned long expect, unsigned long set)
        return ret == expect;
 }
 
-static int rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
+static bool rb_time_cmpxchg(rb_time_t *t, u64 expect, u64 set)
 {
        unsigned long cnt, top, bottom, msb;
        unsigned long cnt2, top2, bottom2, msb2;
@@ -1486,7 +1484,7 @@ rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
        return NULL;
 }
 
-static int rb_head_page_replace(struct buffer_page *old,
+static bool rb_head_page_replace(struct buffer_page *old,
                                struct buffer_page *new)
 {
        unsigned long *ptr = (unsigned long *)&old->list.prev->next;
@@ -1565,15 +1563,12 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
        }
 }
 
-static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
+static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
                          struct buffer_page *bpage)
 {
        unsigned long val = (unsigned long)bpage;
 
-       if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
-               return 1;
-
-       return 0;
+       RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
 }
 
 /**
@@ -1583,30 +1578,28 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
  * As a safety measure we check to make sure the data pages have not
  * been corrupted.
  */
-static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
 {
        struct list_head *head = rb_list_head(cpu_buffer->pages);
        struct list_head *tmp;
 
        if (RB_WARN_ON(cpu_buffer,
                        rb_list_head(rb_list_head(head->next)->prev) != head))
-               return -1;
+               return;
 
        if (RB_WARN_ON(cpu_buffer,
                        rb_list_head(rb_list_head(head->prev)->next) != head))
-               return -1;
+               return;
 
        for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
                if (RB_WARN_ON(cpu_buffer,
                                rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
-                       return -1;
+                       return;
 
                if (RB_WARN_ON(cpu_buffer,
                                rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
-                       return -1;
+                       return;
        }
-
-       return 0;
 }
 
 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
@@ -1774,6 +1767,8 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
        struct list_head *head = cpu_buffer->pages;
        struct buffer_page *bpage, *tmp;
 
+       irq_work_sync(&cpu_buffer->irq_work.work);
+
        free_buffer_page(cpu_buffer->reader_page);
 
        if (head) {
@@ -1880,6 +1875,8 @@ ring_buffer_free(struct trace_buffer *buffer)
 
        cpuhp_state_remove_instance(CPUHP_TRACE_RB_PREPARE, &buffer->node);
 
+       irq_work_sync(&buffer->irq_work.work);
+
        for_each_buffer_cpu(buffer, cpu)
                rb_free_cpu_buffer(buffer->buffers[cpu]);
 
@@ -1918,7 +1915,7 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
        return local_read(&bpage->write) & RB_WRITE_MASK;
 }
 
-static int
+static bool
 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
 {
        struct list_head *tail_page, *to_remove, *next_page;
@@ -2031,12 +2028,13 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
        return nr_removed == 0;
 }
 
-static int
+static bool
 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
 {
        struct list_head *pages = &cpu_buffer->new_pages;
-       int retries, success;
        unsigned long flags;
+       bool success;
+       int retries;
 
        /* Can be called at early boot up, where interrupts must not been enabled */
        raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
@@ -2055,15 +2053,16 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
         * spinning.
         */
        retries = 10;
-       success = 0;
+       success = false;
        while (retries--) {
                struct list_head *head_page, *prev_page, *r;
                struct list_head *last_page, *first_page;
                struct list_head *head_page_with_bit;
+               struct buffer_page *hpage = rb_set_head_page(cpu_buffer);
 
-               head_page = &rb_set_head_page(cpu_buffer)->list;
-               if (!head_page)
+               if (!hpage)
                        break;
+               head_page = &hpage->list;
                prev_page = head_page->prev;
 
                first_page = pages->next;
@@ -2084,7 +2083,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
                         * pointer to point to end of list
                         */
                        head_page->prev = last_page;
-                       success = 1;
+                       success = true;
                        break;
                }
        }
@@ -2112,7 +2111,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
 
 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
 {
-       int success;
+       bool success;
 
        if (cpu_buffer->nr_pages_to_update > 0)
                success = rb_insert_pages(cpu_buffer);
@@ -2995,7 +2994,7 @@ static u64 rb_time_delta(struct ring_buffer_event *event)
        }
 }
 
-static inline int
+static inline bool
 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
                  struct ring_buffer_event *event)
 {
@@ -3016,7 +3015,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
        delta = rb_time_delta(event);
 
        if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp))
-               return 0;
+               return false;
 
        /* Make sure the write stamp is read before testing the location */
        barrier();
@@ -3029,7 +3028,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
                /* Something came in, can't discard */
                if (!rb_time_cmpxchg(&cpu_buffer->write_stamp,
                                       write_stamp, write_stamp - delta))
-                       return 0;
+                       return false;
 
                /*
                 * It's possible that the event time delta is zero
@@ -3062,12 +3061,12 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
                if (index == old_index) {
                        /* update counters */
                        local_sub(event_length, &cpu_buffer->entries_bytes);
-                       return 1;
+                       return true;
                }
        }
 
        /* could not discard */
-       return 0;
+       return false;
 }
 
 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
@@ -3288,7 +3287,7 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
  * Note: The TRANSITION bit only handles a single transition between context.
  */
 
-static __always_inline int
+static __always_inline bool
 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 {
        unsigned int val = cpu_buffer->current_context;
@@ -3305,14 +3304,14 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
                bit = RB_CTX_TRANSITION;
                if (val & (1 << (bit + cpu_buffer->nest))) {
                        do_ring_buffer_record_recursion();
-                       return 1;
+                       return true;
                }
        }
 
        val |= (1 << (bit + cpu_buffer->nest));
        cpu_buffer->current_context = val;
 
-       return 0;
+       return false;
 }
 
 static __always_inline void
@@ -4069,10 +4068,10 @@ void ring_buffer_record_off(struct trace_buffer *buffer)
        unsigned int rd;
        unsigned int new_rd;
 
+       rd = atomic_read(&buffer->record_disabled);
        do {
-               rd = atomic_read(&buffer->record_disabled);
                new_rd = rd | RB_BUFFER_OFF;
-       } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
+       } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
 }
 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
 
@@ -4092,10 +4091,10 @@ void ring_buffer_record_on(struct trace_buffer *buffer)
        unsigned int rd;
        unsigned int new_rd;
 
+       rd = atomic_read(&buffer->record_disabled);
        do {
-               rd = atomic_read(&buffer->record_disabled);
                new_rd = rd & ~RB_BUFFER_OFF;
-       } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
+       } while (!atomic_try_cmpxchg(&buffer->record_disabled, &rd, new_rd));
 }
 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
 
@@ -4502,7 +4501,6 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
        default:
                RB_WARN_ON(cpu_buffer, 1);
        }
-       return;
 }
 
 static void
@@ -4533,7 +4531,6 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
        default:
                RB_WARN_ON(iter->cpu_buffer, 1);
        }
-       return;
 }
 
 static struct buffer_page *
@@ -4543,7 +4540,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
        unsigned long overwrite;
        unsigned long flags;
        int nr_loops = 0;
-       int ret;
+       bool ret;
 
        local_irq_save(flags);
        arch_spin_lock(&cpu_buffer->lock);
@@ -4953,7 +4950,6 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
 {
        if (likely(locked))
                raw_spin_unlock(&cpu_buffer->reader_lock);
-       return;
 }
 
 /**
@@ -5345,6 +5341,9 @@ void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
 }
 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
 
+/* Flag to ensure proper resetting of atomic variables */
+#define RESET_BIT      (1 << 30)
+
 /**
  * ring_buffer_reset_online_cpus - reset a ring buffer per CPU buffer
  * @buffer: The ring buffer to reset a per cpu buffer of
@@ -5361,20 +5360,27 @@ void ring_buffer_reset_online_cpus(struct trace_buffer *buffer)
        for_each_online_buffer_cpu(buffer, cpu) {
                cpu_buffer = buffer->buffers[cpu];
 
-               atomic_inc(&cpu_buffer->resize_disabled);
+               atomic_add(RESET_BIT, &cpu_buffer->resize_disabled);
                atomic_inc(&cpu_buffer->record_disabled);
        }
 
        /* Make sure all commits have finished */
        synchronize_rcu();
 
-       for_each_online_buffer_cpu(buffer, cpu) {
+       for_each_buffer_cpu(buffer, cpu) {
                cpu_buffer = buffer->buffers[cpu];
 
+               /*
+                * If a CPU came online during the synchronize_rcu(), then
+                * ignore it.
+                */
+               if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT))
+                       continue;
+
                reset_disabled_cpu_buffer(cpu_buffer);
 
                atomic_dec(&cpu_buffer->record_disabled);
-               atomic_dec(&cpu_buffer->resize_disabled);
+               atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled);
        }
 
        mutex_unlock(&buffer->mutex);
@@ -5424,8 +5430,8 @@ bool ring_buffer_empty(struct trace_buffer *buffer)
        struct ring_buffer_per_cpu *cpu_buffer;
        unsigned long flags;
        bool dolock;
+       bool ret;
        int cpu;
-       int ret;
 
        /* yes this is racy, but if you don't like the race, lock the buffer */
        for_each_buffer_cpu(buffer, cpu) {
@@ -5454,7 +5460,7 @@ bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
        struct ring_buffer_per_cpu *cpu_buffer;
        unsigned long flags;
        bool dolock;
-       int ret;
+       bool ret;
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
                return true;