rcu-tasks: Simplify trc_read_check_handler() atomic operations
authorPaul E. McKenney <paulmck@kernel.org>
Wed, 28 Jul 2021 17:53:41 +0000 (10:53 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 12 Jan 2023 10:58:49 +0000 (11:58 +0100)
commit 96017bf9039763a2e02dcc6adaa18592cd73a39d upstream.

Currently, trc_wait_for_one_reader() atomically increments
the trc_n_readers_need_end counter before sending the IPI
invoking trc_read_check_handler().  All failure paths out of
trc_read_check_handler() and also from the smp_call_function_single()
within trc_wait_for_one_reader() must carefully atomically decrement
this counter.  This is more complex than it needs to be.

This commit therefore simplifies things and saves a few lines of
code by dispensing with the atomic decrements in favor of having
trc_read_check_handler() do the atomic increment only in the success case.
In theory, this represents no change in functionality.

Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Cc: Joel Fernandes <joel@joelfernandes.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
kernel/rcu/tasks.h

index ae83960..4bd07cc 100644 (file)
@@ -892,32 +892,24 @@ static void trc_read_check_handler(void *t_in)
 
        // If the task is no longer running on this CPU, leave.
        if (unlikely(texp != t)) {
-               if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
-                       wake_up(&trc_wait);
                goto reset_ipi; // Already on holdout list, so will check later.
        }
 
        // If the task is not in a read-side critical section, and
        // if this is the last reader, awaken the grace-period kthread.
        if (likely(!READ_ONCE(t->trc_reader_nesting))) {
-               if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
-                       wake_up(&trc_wait);
-               // Mark as checked after decrement to avoid false
-               // positives on the above WARN_ON_ONCE().
                WRITE_ONCE(t->trc_reader_checked, true);
                goto reset_ipi;
        }
        // If we are racing with an rcu_read_unlock_trace(), try again later.
-       if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) {
-               if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
-                       wake_up(&trc_wait);
+       if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0))
                goto reset_ipi;
-       }
        WRITE_ONCE(t->trc_reader_checked, true);
 
        // Get here if the task is in a read-side critical section.  Set
        // its state so that it will awaken the grace-period kthread upon
        // exit from that critical section.
+       atomic_inc(&trc_n_readers_need_end); // One more to wait on.
        WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
        WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
 
@@ -1017,21 +1009,15 @@ static void trc_wait_for_one_reader(struct task_struct *t,
                if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
                        return;
 
-               atomic_inc(&trc_n_readers_need_end);
                per_cpu(trc_ipi_to_cpu, cpu) = true;
                t->trc_ipi_to_cpu = cpu;
                rcu_tasks_trace.n_ipis++;
-               if (smp_call_function_single(cpu,
-                                            trc_read_check_handler, t, 0)) {
+               if (smp_call_function_single(cpu, trc_read_check_handler, t, 0)) {
                        // Just in case there is some other reason for
                        // failure than the target CPU being offline.
                        rcu_tasks_trace.n_ipis_fails++;
                        per_cpu(trc_ipi_to_cpu, cpu) = false;
                        t->trc_ipi_to_cpu = cpu;
-                       if (atomic_dec_and_test(&trc_n_readers_need_end)) {
-                               WARN_ON_ONCE(1);
-                               wake_up(&trc_wait);
-                       }
                }
        }
 }