rcu: Apply callbacks processing time limit only on softirq
authorFrederic Weisbecker <frederic@kernel.org>
Tue, 19 Oct 2021 00:08:15 +0000 (02:08 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 12 May 2022 10:30:26 +0000 (12:30 +0200)
commit a554ba288845fd3f6f12311fd76a51694233458a upstream.

Time limit only makes sense when callbacks are serviced in softirq mode
because:

_ In case we need to get back to the scheduler,
  cond_resched_tasks_rcu_qs() is called after each callback.

_ In case some other softirq vector needs the CPU, the call to
  local_bh_enable() before cond_resched_tasks_rcu_qs() takes care about
  them via a call to do_softirq().

Therefore, make sure the time limit only applies to softirq mode.

Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Tested-by: Valentin Schneider <valentin.schneider@arm.com>
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Neeraj Upadhyay <neeraju@codeaurora.org>
Cc: Uladzislau Rezki <urezki@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
[UR: backport to 5.15-stable]
Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
kernel/rcu/tree.c

index a968cc6..a4a9d68 100644 (file)
@@ -2476,7 +2476,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
        div = READ_ONCE(rcu_divisor);
        div = div < 0 ? 7 : div > sizeof(long) * 8 - 2 ? sizeof(long) * 8 - 2 : div;
        bl = max(rdp->blimit, pending >> div);
-       if (unlikely(bl > 100)) {
+       if (in_serving_softirq() && unlikely(bl > 100)) {
                long rrn = READ_ONCE(rcu_resched_ns);
 
                rrn = rrn < NSEC_PER_MSEC ? NSEC_PER_MSEC : rrn > NSEC_PER_SEC ? NSEC_PER_SEC : rrn;
@@ -2517,6 +2517,18 @@ static void rcu_do_batch(struct rcu_data *rdp)
                        if (count >= bl && (need_resched() ||
                                        (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
                                break;
+
+                       /*
+                        * Make sure we don't spend too much time here and deprive other
+                        * softirq vectors of CPU cycles.
+                        */
+                       if (unlikely(tlimit)) {
+                               /* only call local_clock() every 32 callbacks */
+                               if (likely((count & 31) || local_clock() < tlimit))
+                                       continue;
+                               /* Exceeded the time limit, so leave. */
+                               break;
+                       }
                } else {
                        local_bh_enable();
                        lockdep_assert_irqs_enabled();
@@ -2524,18 +2536,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
                        lockdep_assert_irqs_enabled();
                        local_bh_disable();
                }
-
-               /*
-                * Make sure we don't spend too much time here and deprive other
-                * softirq vectors of CPU cycles.
-                */
-               if (unlikely(tlimit)) {
-                       /* only call local_clock() every 32 callbacks */
-                       if (likely((count & 31) || local_clock() < tlimit))
-                               continue;
-                       /* Exceeded the time limit, so leave. */
-                       break;
-               }
        }
 
        local_irq_save(flags);