rcu: Mark rcu_state.gp_seq to detect more concurrent writes
authorPaul E. McKenney <paulmck@kernel.org>
Sun, 22 Mar 2020 02:52:20 +0000 (19:52 -0700)
committerPaul E. McKenney <paulmck@kernel.org>
Mon, 27 Apr 2020 18:01:16 +0000 (11:01 -0700)
The rcu_state structure's gp_seq field is only to be modified by the RCU
grace-period kthread, which is single-threaded.  This commit therefore
enlists KCSAN's help in enforcing this restriction.  This commit applies
KCSAN-specific primitives, so cannot go upstream until KCSAN does.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree.c

index 0132bc6..183b9cf 100644 (file)
@@ -1230,7 +1230,7 @@ static bool rcu_start_this_gp(struct rcu_node *rnp_start, struct rcu_data *rdp,
                trace_rcu_this_gp(rnp, rdp, gp_seq_req, TPS("NoGPkthread"));
                goto unlock_out;
        }
-       trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("newreq"));
+       trace_rcu_grace_period(rcu_state.name, data_race(rcu_state.gp_seq), TPS("newreq"));
        ret = true;  /* Caller must wake GP kthread. */
 unlock_out:
        /* Push furthest requested GP to leaf node and rcu_data structure. */
@@ -1519,6 +1519,7 @@ static bool rcu_gp_init(void)
        record_gp_stall_check_time();
        /* Record GP times before starting GP, hence rcu_seq_start(). */
        rcu_seq_start(&rcu_state.gp_seq);
+       ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
        trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
        raw_spin_unlock_irq_rcu_node(rnp);
 
@@ -1805,6 +1806,7 @@ static void rcu_gp_cleanup(void)
        /* Declare grace period done, trace first to use old GP number. */
        trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("end"));
        rcu_seq_end(&rcu_state.gp_seq);
+       ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
        rcu_state.gp_state = RCU_GP_IDLE;
        /* Check for GP requests since above loop. */
        rdp = this_cpu_ptr(&rcu_data);