rcu/nocb: Invert rcu_state.barrier_mutex VS hotplug lock locking order
authorZqiang <qiang1.zhang@intel.com>
Tue, 19 Apr 2022 12:23:19 +0000 (14:23 +0200)
committerPaul E. McKenney <paulmck@kernel.org>
Tue, 19 Jul 2022 18:42:55 +0000 (11:42 -0700)
In case of failure to spawn either rcuog or rcuo[p] kthreads for a given
rdp, rcu_nocb_rdp_deoffload() needs to be called with the hotplug
lock and the barrier_mutex held. However cpus write lock is already held
while calling rcutree_prepare_cpu(). It's not possible to call
rcu_nocb_rdp_deoffload() from there with just locking the barrier_mutex
or this would result in a locking inversion against
rcu_nocb_cpu_deoffload() which holds both locks in the reverse order.

Simply solve this with inverting the locking order inside
rcu_nocb_cpu_[de]offload(). This will be a pre-requisite to toggle NOCB
states toward cpusets anyway.

Signed-off-by: Zqiang <qiang1.zhang@intel.com>
Cc: Neeraj Upadhyay <quic_neeraju@quicinc.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Uladzislau Rezki <uladzislau.rezki@sony.com>
Cc: Joel Fernandes <joel@joelfernandes.org>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Neeraj Upadhyay <quic_neeraju@quicinc.com>
kernel/rcu/tree_nocb.h

index dac7495..f2f2cab 100644 (file)
@@ -1055,8 +1055,8 @@ int rcu_nocb_cpu_deoffload(int cpu)
        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
        int ret = 0;
 
-       mutex_lock(&rcu_state.barrier_mutex);
        cpus_read_lock();
+       mutex_lock(&rcu_state.barrier_mutex);
        if (rcu_rdp_is_offloaded(rdp)) {
                if (cpu_online(cpu)) {
                        ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
@@ -1067,8 +1067,8 @@ int rcu_nocb_cpu_deoffload(int cpu)
                        ret = -EINVAL;
                }
        }
-       cpus_read_unlock();
        mutex_unlock(&rcu_state.barrier_mutex);
+       cpus_read_unlock();
 
        return ret;
 }
@@ -1134,8 +1134,8 @@ int rcu_nocb_cpu_offload(int cpu)
        struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
        int ret = 0;
 
-       mutex_lock(&rcu_state.barrier_mutex);
        cpus_read_lock();
+       mutex_lock(&rcu_state.barrier_mutex);
        if (!rcu_rdp_is_offloaded(rdp)) {
                if (cpu_online(cpu)) {
                        ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
@@ -1146,8 +1146,8 @@ int rcu_nocb_cpu_offload(int cpu)
                        ret = -EINVAL;
                }
        }
-       cpus_read_unlock();
        mutex_unlock(&rcu_state.barrier_mutex);
+       cpus_read_unlock();
 
        return ret;
 }