Merge tag 'trace-v6.2-rc7-3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 23 Feb 2023 18:08:01 +0000 (10:08 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 23 Feb 2023 18:08:01 +0000 (10:08 -0800)
Pull tracing fix from Steven Rostedt:
 "Fix race that causes a warning of corrupt ring buffer

  With the change that allows to read the "trace" file without disabling
  writing to the ring buffer, there was an integrity check of the ring
  buffer in the iterator read code, that expected the ring buffer to be
  write disabled. This caused the integrity check to trigger when stress
  reading the "trace" file while writing was happening.

  The integrity check is a bit aggressive (and has never triggered in
  practice). Change it so that it checks just the integrity of the
  linked pages without clearing the flags inside the pointers. This
  removes the warning that was being triggered"

[ Heh. This was supposed to have gone in last week before the 6.2
  release, but Steven forgot to actually add me to the participants of
  the pull request, so here it is, a week later   - Linus ]

* tag 'trace-v6.2-rc7-3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
  ring-buffer: Handle race between rb_move_tail and rb_check_pages

kernel/trace/ring_buffer.c

index c366a0a..b641cab 100644 (file)
@@ -1581,19 +1581,6 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
 }
 
 /**
- * rb_check_list - make sure a pointer to a list has the last bits zero
- */
-static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
-                        struct list_head *list)
-{
-       if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
-               return 1;
-       if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
-               return 1;
-       return 0;
-}
-
-/**
  * rb_check_pages - integrity check of buffer pages
  * @cpu_buffer: CPU buffer with pages to test
  *
@@ -1602,36 +1589,27 @@ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
  */
 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
 {
-       struct list_head *head = cpu_buffer->pages;
-       struct buffer_page *bpage, *tmp;
+       struct list_head *head = rb_list_head(cpu_buffer->pages);
+       struct list_head *tmp;
 
-       /* Reset the head page if it exists */
-       if (cpu_buffer->head_page)
-               rb_set_head_page(cpu_buffer);
-
-       rb_head_page_deactivate(cpu_buffer);
-
-       if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
-               return -1;
-       if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
+       if (RB_WARN_ON(cpu_buffer,
+                       rb_list_head(rb_list_head(head->next)->prev) != head))
                return -1;
 
-       if (rb_check_list(cpu_buffer, head))
+       if (RB_WARN_ON(cpu_buffer,
+                       rb_list_head(rb_list_head(head->prev)->next) != head))
                return -1;
 
-       list_for_each_entry_safe(bpage, tmp, head, list) {
+       for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
                if (RB_WARN_ON(cpu_buffer,
-                              bpage->list.next->prev != &bpage->list))
+                               rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
                        return -1;
+
                if (RB_WARN_ON(cpu_buffer,
-                              bpage->list.prev->next != &bpage->list))
-                       return -1;
-               if (rb_check_list(cpu_buffer, &bpage->list))
+                               rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
                        return -1;
        }
 
-       rb_head_page_activate(cpu_buffer);
-
        return 0;
 }