rcu: Shrink each possible cpu krcp
authorZqiang <qiang.zhang@windriver.com>
Fri, 14 Aug 2020 06:45:57 +0000 (14:45 +0800)
committerPaul E. McKenney <paulmck@kernel.org>
Thu, 3 Sep 2020 16:40:13 +0000 (09:40 -0700)
CPUs can go offline shortly after kfree_call_rcu() has been invoked,
which can leave memory stranded until those CPUs come back online.
This commit therefore drains the kcrp of each CPU, not just the
ones that happen to be online.

Acked-by: Joel Fernandes <joel@joelfernandes.org>
Signed-off-by: Zqiang <qiang.zhang@windriver.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
kernel/rcu/tree.c

index 2323622..9245064 100644 (file)
@@ -3450,7 +3450,7 @@ kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
        unsigned long count = 0;
 
        /* Snapshot count of all CPUs */
-       for_each_online_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
 
                count += READ_ONCE(krcp->count);
@@ -3465,7 +3465,7 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        int cpu, freed = 0;
        unsigned long flags;
 
-       for_each_online_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                int count;
                struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
 
@@ -3498,7 +3498,7 @@ void __init kfree_rcu_scheduler_running(void)
        int cpu;
        unsigned long flags;
 
-       for_each_online_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
 
                raw_spin_lock_irqsave(&krcp->lock, flags);