rcu: Fix callbacks processing time limit retaining cond_resched()
authorFrederic Weisbecker <frederic@kernel.org>
Tue, 19 Oct 2021 00:08:14 +0000 (02:08 +0200)
committerPaul E. McKenney <paulmck@kernel.org>
Wed, 8 Dec 2021 00:24:44 +0000 (16:24 -0800)
The callbacks processing time limit makes sure we are not exceeding a
given amount of time executing the queue.

However its "continue" clause bypasses the cond_resched() call on
rcuc and NOCB kthreads, delaying it until we reach the limit, which can
be very long...

Make sure the scheduler has a higher priority than the time limit.

Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Tested-by: Valentin Schneider <valentin.schneider@arm.com>
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Valentin Schneider <valentin.schneider@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Joel Fernandes <joel@joelfernandes.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Neeraj Upadhyay <neeraju@codeaurora.org>
Cc: Uladzislau Rezki <urezki@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree.c

index 4f4d9ea..74e210c 100644 (file)
@@ -2525,9 +2525,21 @@ static void rcu_do_batch(struct rcu_data *rdp)
                /*
                 * Stop only if limit reached and CPU has something to do.
                 */
-               if (count >= bl && in_serving_softirq() &&
-                   (need_resched() || !is_idle_task(current)))
-                       break;
+               if (in_serving_softirq()) {
+                       if (count >= bl && (need_resched() || !is_idle_task(current)))
+                               break;
+               } else {
+                       local_bh_enable();
+                       lockdep_assert_irqs_enabled();
+                       cond_resched_tasks_rcu_qs();
+                       lockdep_assert_irqs_enabled();
+                       local_bh_disable();
+               }
+
+               /*
+                * Make sure we don't spend too much time here and deprive other
+                * softirq vectors of CPU cycles.
+                */
                if (unlikely(tlimit)) {
                        /* only call local_clock() every 32 callbacks */
                        if (likely((count & 31) || local_clock() < tlimit))
@@ -2535,13 +2547,6 @@ static void rcu_do_batch(struct rcu_data *rdp)
                        /* Exceeded the time limit, so leave. */
                        break;
                }
-               if (!in_serving_softirq()) {
-                       local_bh_enable();
-                       lockdep_assert_irqs_enabled();
-                       cond_resched_tasks_rcu_qs();
-                       lockdep_assert_irqs_enabled();
-                       local_bh_disable();
-               }
        }
 
        rcu_nocb_lock_irqsave(rdp, flags);