ring_buffer: Change some static functions to void
authorUros Bizjak <ubizjak@gmail.com>
Sun, 5 Mar 2023 15:55:30 +0000 (16:55 +0100)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Tue, 21 Mar 2023 17:59:31 +0000 (13:59 -0400)
The results of some static functions are not used. Change the
type of these function to void and remove unnecessary returns.

No functional change intended.

Link: https://lkml.kernel.org/r/20230305155532.5549-2-ubizjak@gmail.com
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
Reviewed-by: Mukesh Ojha <quic_mojha@quicinc.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
kernel/trace/ring_buffer.c

index c6f47b6..b45915d 100644 (file)
@@ -1565,15 +1565,12 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
        }
 }
 
-static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
+static void rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
                          struct buffer_page *bpage)
 {
        unsigned long val = (unsigned long)bpage;
 
-       if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
-               return 1;
-
-       return 0;
+       RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK);
 }
 
 /**
@@ -1583,30 +1580,28 @@ static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
  * As a safety measure we check to make sure the data pages have not
  * been corrupted.
  */
-static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
+static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
 {
        struct list_head *head = rb_list_head(cpu_buffer->pages);
        struct list_head *tmp;
 
        if (RB_WARN_ON(cpu_buffer,
                        rb_list_head(rb_list_head(head->next)->prev) != head))
-               return -1;
+               return;
 
        if (RB_WARN_ON(cpu_buffer,
                        rb_list_head(rb_list_head(head->prev)->next) != head))
-               return -1;
+               return;
 
        for (tmp = rb_list_head(head->next); tmp != head; tmp = rb_list_head(tmp->next)) {
                if (RB_WARN_ON(cpu_buffer,
                                rb_list_head(rb_list_head(tmp->next)->prev) != tmp))
-                       return -1;
+                       return;
 
                if (RB_WARN_ON(cpu_buffer,
                                rb_list_head(rb_list_head(tmp->prev)->next) != tmp))
-                       return -1;
+                       return;
        }
-
-       return 0;
 }
 
 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
@@ -4496,7 +4491,6 @@ rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
        default:
                RB_WARN_ON(cpu_buffer, 1);
        }
-       return;
 }
 
 static void
@@ -4527,7 +4521,6 @@ rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
        default:
                RB_WARN_ON(iter->cpu_buffer, 1);
        }
-       return;
 }
 
 static struct buffer_page *
@@ -4942,7 +4935,6 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
 {
        if (likely(locked))
                raw_spin_unlock(&cpu_buffer->reader_lock);
-       return;
 }
 
 /**