rcu: Add TPS() protection for _rcu_barrier_trace strings
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 27 Jun 2017 20:22:17 +0000 (13:22 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 17 Aug 2017 14:26:22 +0000 (07:26 -0700)
The _rcu_barrier_trace() function is a wrapper for trace_rcu_barrier(),
which needs TPS() protection for strings passed through the second
argument.  However, it has escaped prior TPS()-ification efforts because
it _rcu_barrier_trace() does not start with "trace_".  This commit
therefore adds the needed TPS() protection

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
kernel/rcu/tree.c

index 2b13d96..c1442be 100644 (file)
@@ -3568,10 +3568,11 @@ static void rcu_barrier_callback(struct rcu_head *rhp)
        struct rcu_state *rsp = rdp->rsp;
 
        if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
-               _rcu_barrier_trace(rsp, "LastCB", -1, rsp->barrier_sequence);
+               _rcu_barrier_trace(rsp, TPS("LastCB"), -1,
+                                  rsp->barrier_sequence);
                complete(&rsp->barrier_completion);
        } else {
-               _rcu_barrier_trace(rsp, "CB", -1, rsp->barrier_sequence);
+               _rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence);
        }
 }
 
@@ -3583,14 +3584,15 @@ static void rcu_barrier_func(void *type)
        struct rcu_state *rsp = type;
        struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
 
-       _rcu_barrier_trace(rsp, "IRQ", -1, rsp->barrier_sequence);
+       _rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence);
        rdp->barrier_head.func = rcu_barrier_callback;
        debug_rcu_head_queue(&rdp->barrier_head);
        if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
                atomic_inc(&rsp->barrier_cpu_count);
        } else {
                debug_rcu_head_unqueue(&rdp->barrier_head);
-               _rcu_barrier_trace(rsp, "IRQNQ", -1, rsp->barrier_sequence);
+               _rcu_barrier_trace(rsp, TPS("IRQNQ"), -1,
+                                  rsp->barrier_sequence);
        }
 }
 
@@ -3604,14 +3606,15 @@ static void _rcu_barrier(struct rcu_state *rsp)
        struct rcu_data *rdp;
        unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
 
-       _rcu_barrier_trace(rsp, "Begin", -1, s);
+       _rcu_barrier_trace(rsp, TPS("Begin"), -1, s);
 
        /* Take mutex to serialize concurrent rcu_barrier() requests. */
        mutex_lock(&rsp->barrier_mutex);
 
        /* Did someone else do our work for us? */
        if (rcu_seq_done(&rsp->barrier_sequence, s)) {
-               _rcu_barrier_trace(rsp, "EarlyExit", -1, rsp->barrier_sequence);
+               _rcu_barrier_trace(rsp, TPS("EarlyExit"), -1,
+                                  rsp->barrier_sequence);
                smp_mb(); /* caller's subsequent code after above check. */
                mutex_unlock(&rsp->barrier_mutex);
                return;
@@ -3619,7 +3622,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
 
        /* Mark the start of the barrier operation. */
        rcu_seq_start(&rsp->barrier_sequence);
-       _rcu_barrier_trace(rsp, "Inc1", -1, rsp->barrier_sequence);
+       _rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence);
 
        /*
         * Initialize the count to one rather than to zero in order to
@@ -3642,10 +3645,10 @@ static void _rcu_barrier(struct rcu_state *rsp)
                rdp = per_cpu_ptr(rsp->rda, cpu);
                if (rcu_is_nocb_cpu(cpu)) {
                        if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
-                               _rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
+                               _rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu,
                                                   rsp->barrier_sequence);
                        } else {
-                               _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
+                               _rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu,
                                                   rsp->barrier_sequence);
                                smp_mb__before_atomic();
                                atomic_inc(&rsp->barrier_cpu_count);
@@ -3653,11 +3656,11 @@ static void _rcu_barrier(struct rcu_state *rsp)
                                           rcu_barrier_callback, rsp, cpu, 0);
                        }
                } else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
-                       _rcu_barrier_trace(rsp, "OnlineQ", cpu,
+                       _rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu,
                                           rsp->barrier_sequence);
                        smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
                } else {
-                       _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
+                       _rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu,
                                           rsp->barrier_sequence);
                }
        }
@@ -3674,7 +3677,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
        wait_for_completion(&rsp->barrier_completion);
 
        /* Mark the end of the barrier operation. */
-       _rcu_barrier_trace(rsp, "Inc2", -1, rsp->barrier_sequence);
+       _rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence);
        rcu_seq_end(&rsp->barrier_sequence);
 
        /* Other rcu_barrier() invocations can now safely proceed. */