ring-buffer: Fix wrong stat of cpu_buffer->read
authorZheng Yejian <zhengyejian1@huawei.com>
Mon, 24 Jul 2023 05:40:40 +0000 (13:40 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 3 Aug 2023 08:24:07 +0000 (10:24 +0200)
[ Upstream commit 2d093282b0d4357373497f65db6a05eb0c28b7c8 ]

When pages are removed in rb_remove_pages(), 'cpu_buffer->read' is set
to 0 in order to make sure any read iterators reset themselves. However,
this will mess 'entries' stating, see following steps:

  # cd /sys/kernel/tracing/
  # 1. Enlarge ring buffer prepare for later reducing:
  # echo 20 > per_cpu/cpu0/buffer_size_kb
  # 2. Write a log into ring buffer of cpu0:
  # taskset -c 0 echo "hello1" > trace_marker
  # 3. Read the log:
  # cat per_cpu/cpu0/trace_pipe
       <...>-332     [000] .....    62.406844: tracing_mark_write: hello1
  # 4. Stop reading and see the stats, now 0 entries, and 1 event readed:
  # cat per_cpu/cpu0/stats
   entries: 0
   [...]
   read events: 1
  # 5. Reduce the ring buffer
  # echo 7 > per_cpu/cpu0/buffer_size_kb
  # 6. Now entries became unexpected 1 because actually no entries!!!
  # cat per_cpu/cpu0/stats
   entries: 1
   [...]
   read events: 0

To fix it, introduce 'page_removed' field to count total removed pages
since last reset, then use it to let read iterators reset themselves
instead of changing the 'read' pointer.

Link: https://lore.kernel.org/linux-trace-kernel/20230724054040.3489499-1-zhengyejian1@huawei.com
Cc: <mhiramat@kernel.org>
Cc: <vnagarnaik@google.com>
Fixes: 83f40318dab0 ("ring-buffer: Make removal of ring buffer pages atomic")
Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/trace/ring_buffer.c

index c264421..c49ed61 100644 (file)
@@ -529,6 +529,8 @@ struct ring_buffer_per_cpu {
        rb_time_t                       before_stamp;
        u64                             event_stamp[MAX_NEST];
        u64                             read_stamp;
+       /* pages removed since last reset */
+       unsigned long                   pages_removed;
        /* ring buffer pages to update, > 0 to add, < 0 to remove */
        long                            nr_pages_to_update;
        struct list_head                new_pages; /* new pages to add */
@@ -564,6 +566,7 @@ struct ring_buffer_iter {
        struct buffer_page              *head_page;
        struct buffer_page              *cache_reader_page;
        unsigned long                   cache_read;
+       unsigned long                   cache_pages_removed;
        u64                             read_stamp;
        u64                             page_stamp;
        struct ring_buffer_event        *event;
@@ -1967,6 +1970,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
                to_remove = rb_list_head(to_remove)->next;
                head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
        }
+       /* Read iterators need to reset themselves when some pages removed */
+       cpu_buffer->pages_removed += nr_removed;
 
        next_page = rb_list_head(to_remove)->next;
 
@@ -1988,12 +1993,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
                cpu_buffer->head_page = list_entry(next_page,
                                                struct buffer_page, list);
 
-       /*
-        * change read pointer to make sure any read iterators reset
-        * themselves
-        */
-       cpu_buffer->read = 0;
-
        /* pages are removed, resume tracing and then free the pages */
        atomic_dec(&cpu_buffer->record_disabled);
        raw_spin_unlock_irq(&cpu_buffer->reader_lock);
@@ -4385,6 +4384,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
 
        iter->cache_reader_page = iter->head_page;
        iter->cache_read = cpu_buffer->read;
+       iter->cache_pages_removed = cpu_buffer->pages_removed;
 
        if (iter->head) {
                iter->read_stamp = cpu_buffer->read_stamp;
@@ -4841,12 +4841,13 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
        buffer = cpu_buffer->buffer;
 
        /*
-        * Check if someone performed a consuming read to
-        * the buffer. A consuming read invalidates the iterator
-        * and we need to reset the iterator in this case.
+        * Check if someone performed a consuming read to the buffer
+        * or removed some pages from the buffer. In these cases,
+        * iterator was invalidated and we need to reset it.
         */
        if (unlikely(iter->cache_read != cpu_buffer->read ||
-                    iter->cache_reader_page != cpu_buffer->reader_page))
+                    iter->cache_reader_page != cpu_buffer->reader_page ||
+                    iter->cache_pages_removed != cpu_buffer->pages_removed))
                rb_iter_reset(iter);
 
  again:
@@ -5291,6 +5292,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
        cpu_buffer->last_overrun = 0;
 
        rb_head_page_activate(cpu_buffer);
+       cpu_buffer->pages_removed = 0;
 }
 
 /* Must have disabled the cpu buffer then done a synchronize_rcu */