rcu: Implement rcu_segcblist_is_offloaded() config dependent
authorFrederic Weisbecker <frederic@kernel.org>
Mon, 21 Sep 2020 12:43:40 +0000 (14:43 +0200)
committerPaul E. McKenney <paulmck@kernel.org>
Fri, 20 Nov 2020 03:37:16 +0000 (19:37 -0800)
This commit simplifies the use of the rcu_segcblist_is_offloaded() API so
that its callers no longer need to check the RCU_NOCB_CPU Kconfig option.
Note that rcu_segcblist_is_offloaded() is defined in the header file,
which means that the generated code should be just as efficient as before.

Suggested-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Paul E. McKenney <paulmck@kernel.org>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/rcu_segcblist.h
kernel/rcu/tree.c

index 5c293af..492262b 100644 (file)
@@ -62,7 +62,7 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
 /* Is the specified rcu_segcblist offloaded?  */
 static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
 {
-       return rsclp->offloaded;
+       return IS_ENABLED(CONFIG_RCU_NOCB_CPU) && rsclp->offloaded;
 }
 
 /*
index 93e1808..0ccdca4 100644 (file)
@@ -1603,8 +1603,7 @@ static bool __note_gp_changes(struct rcu_node *rnp, struct rcu_data *rdp)
 {
        bool ret = false;
        bool need_qs;
-       const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
-                              rcu_segcblist_is_offloaded(&rdp->cblist);
+       const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
 
        raw_lockdep_assert_held_rcu_node(rnp);
 
@@ -2048,8 +2047,7 @@ static void rcu_gp_cleanup(void)
                needgp = true;
        }
        /* Advance CBs to reduce false positives below. */
-       offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
-                   rcu_segcblist_is_offloaded(&rdp->cblist);
+       offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
        if ((offloaded || !rcu_accelerate_cbs(rnp, rdp)) && needgp) {
                WRITE_ONCE(rcu_state.gp_flags, RCU_GP_FLAG_INIT);
                WRITE_ONCE(rcu_state.gp_req_activity, jiffies);
@@ -2248,8 +2246,7 @@ rcu_report_qs_rdp(struct rcu_data *rdp)
        unsigned long flags;
        unsigned long mask;
        bool needwake = false;
-       const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
-                              rcu_segcblist_is_offloaded(&rdp->cblist);
+       const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
        struct rcu_node *rnp;
 
        WARN_ON_ONCE(rdp->cpu != smp_processor_id());
@@ -2417,8 +2414,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
 {
        int div;
        unsigned long flags;
-       const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
-                              rcu_segcblist_is_offloaded(&rdp->cblist);
+       const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
        struct rcu_head *rhp;
        struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
        long bl, count;
@@ -2675,8 +2671,7 @@ static __latent_entropy void rcu_core(void)
        unsigned long flags;
        struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
        struct rcu_node *rnp = rdp->mynode;
-       const bool offloaded = IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
-                              rcu_segcblist_is_offloaded(&rdp->cblist);
+       const bool offloaded = rcu_segcblist_is_offloaded(&rdp->cblist);
 
        if (cpu_is_offline(smp_processor_id()))
                return;
@@ -2978,8 +2973,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func)
                                   rcu_segcblist_n_cbs(&rdp->cblist));
 
        /* Go handle any RCU core processing required. */
-       if (IS_ENABLED(CONFIG_RCU_NOCB_CPU) &&
-           unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
+       if (unlikely(rcu_segcblist_is_offloaded(&rdp->cblist))) {
                __call_rcu_nocb_wake(rdp, was_alldone, flags); /* unlocks */
        } else {
                __call_rcu_core(rdp, head, flags);
@@ -3712,8 +3706,7 @@ static int rcu_pending(int user)
 
        /* Has RCU gone idle with this CPU needing another grace period? */
        if (!gp_in_progress && rcu_segcblist_is_enabled(&rdp->cblist) &&
-           (!IS_ENABLED(CONFIG_RCU_NOCB_CPU) ||
-            !rcu_segcblist_is_offloaded(&rdp->cblist)) &&
+           !rcu_segcblist_is_offloaded(&rdp->cblist) &&
            !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
                return 1;