rcu: Make synchronize_rcu() fast path update ->gp_seq counters
authorPaul E. McKenney <paulmck@kernel.org>
Fri, 5 Aug 2022 00:28:01 +0000 (17:28 -0700)
committerPaul E. McKenney <paulmck@kernel.org>
Wed, 31 Aug 2022 12:09:21 +0000 (05:09 -0700)
This commit causes the early boot single-CPU synchronize_rcu() fastpath to
update the rcu_state and rcu_node structures' ->gp_seq and ->gp_seq_needed
counters.  This will allow the full-state polled grace-period APIs to
detect all normal grace periods without the need to track the special
combined polling-only counter, which is a step towards removing the
->rgos_polled field from the rcu_gp_oldstate, thereby reducing its size
by one third.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree.c

index 0ff7d5e..8fa5ec0 100644 (file)
@@ -3480,24 +3480,37 @@ static int rcu_blocking_is_gp(void)
  */
 void synchronize_rcu(void)
 {
+       unsigned long flags;
+       struct rcu_node *rnp;
+
        RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
                         lock_is_held(&rcu_lock_map) ||
                         lock_is_held(&rcu_sched_lock_map),
                         "Illegal synchronize_rcu() in RCU read-side critical section");
-       if (rcu_blocking_is_gp()) {
-               // Note well that this code runs with !PREEMPT && !SMP.
-               // In addition, all code that advances grace periods runs at
-               // process level.  Therefore, this normal GP overlaps with
-               // other normal GPs only by being fully nested within them,
-               // which allows reuse of ->gp_seq_polled_snap.
-               rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
-               rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
-               return;  // Context allows vacuous grace periods.
+       if (!rcu_blocking_is_gp()) {
+               if (rcu_gp_is_expedited())
+                       synchronize_rcu_expedited();
+               else
+                       wait_rcu_gp(call_rcu);
+               return;
        }
-       if (rcu_gp_is_expedited())
-               synchronize_rcu_expedited();
-       else
-               wait_rcu_gp(call_rcu);
+
+       // Context allows vacuous grace periods.
+       // Note well that this code runs with !PREEMPT && !SMP.
+       // In addition, all code that advances grace periods runs at
+       // process level.  Therefore, this normal GP overlaps with other
+       // normal GPs only by being fully nested within them, which allows
+       // reuse of ->gp_seq_polled_snap.
+       rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_snap);
+       rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_snap);
+
+       // Update normal grace-period counters to record grace period.
+       local_irq_save(flags);
+       WARN_ON_ONCE(num_online_cpus() > 1);
+       rcu_state.gp_seq += (1 << RCU_SEQ_CTR_SHIFT);
+       rcu_for_each_node_breadth_first(rnp)
+               rnp->gp_seq_needed = rnp->gp_seq = rcu_state.gp_seq;
+       local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu);