rcu: Simplify the calculation of rcu_state.ncpus
authorWei Yang <richard.weiyang@gmail.com>
Sun, 19 Apr 2020 21:57:15 +0000 (21:57 +0000)
committerPaul E. McKenney <paulmck@kernel.org>
Mon, 29 Jun 2020 18:58:49 +0000 (11:58 -0700)
There is only 1 bit set in mask, which means that the only difference
between oldmask and the new one will be at the position where the bit is
set in mask.  This commit therefore updates rcu_state.ncpus by checking
whether the bit in mask is already set in rnp->expmaskinitnext.

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree.c

index 6c6569e..bef1dc9 100644 (file)
@@ -3842,10 +3842,9 @@ void rcu_cpu_starting(unsigned int cpu)
 {
        unsigned long flags;
        unsigned long mask;
-       int nbits;
-       unsigned long oldmask;
        struct rcu_data *rdp;
        struct rcu_node *rnp;
+       bool newcpu;
 
        if (per_cpu(rcu_cpu_started, cpu))
                return;
@@ -3857,12 +3856,10 @@ void rcu_cpu_starting(unsigned int cpu)
        mask = rdp->grpmask;
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
        WRITE_ONCE(rnp->qsmaskinitnext, rnp->qsmaskinitnext | mask);
-       oldmask = rnp->expmaskinitnext;
+       newcpu = !(rnp->expmaskinitnext & mask);
        rnp->expmaskinitnext |= mask;
-       oldmask ^= rnp->expmaskinitnext;
-       nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
        /* Allow lockless access for expedited grace periods. */
-       smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + nbits); /* ^^^ */
+       smp_store_release(&rcu_state.ncpus, rcu_state.ncpus + newcpu); /* ^^^ */
        ASSERT_EXCLUSIVE_WRITER(rcu_state.ncpus);
        rcu_gpnum_ovf(rnp, rdp); /* Offline-induced counter wrap? */
        rdp->rcu_onl_gp_seq = READ_ONCE(rcu_state.gp_seq);