rcu: Add assertions verifying blocked-tasks list
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 12 Jul 2017 04:52:31 +0000 (21:52 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Thu, 17 Aug 2017 14:26:23 +0000 (07:26 -0700)
This commit adds assertions verifying the consistency of the rcu_node
structure's ->blkd_tasks list and its ->gp_tasks, ->exp_tasks, and
->boost_tasks pointers.  In particular, the ->blkd_tasks lists must be
empty except for leaf rcu_node structures.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
kernel/rcu/tree.c
kernel/rcu/tree_plugin.h

index 2b37f1a..ac2617d 100644 (file)
@@ -2410,6 +2410,8 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
                        return;
                }
                WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
+               WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1 &&
+                            rcu_preempt_blocked_readers_cgp(rnp));
                rnp->qsmask &= ~mask;
                trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
                                                 mask, rnp->qsmask, rnp->level,
index 3e3f92e..eadf8b9 100644 (file)
@@ -180,6 +180,8 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
        struct task_struct *t = current;
 
        lockdep_assert_held(&rnp->lock);
+       WARN_ON_ONCE(rdp->mynode != rnp);
+       WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
 
        /*
         * Decide where to queue the newly blocked task.  In theory,
@@ -261,6 +263,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
                rnp->gp_tasks = &t->rcu_node_entry;
        if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
                rnp->exp_tasks = &t->rcu_node_entry;
+       WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
+                    !(rnp->qsmask & rdp->grpmask));
+       WARN_ON_ONCE(!(blkd_state & RCU_EXP_BLKD) !=
+                    !(rnp->expmask & rdp->grpmask));
        raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
 
        /*
@@ -482,6 +488,7 @@ void rcu_read_unlock_special(struct task_struct *t)
                rnp = t->rcu_blocked_node;
                raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
                WARN_ON_ONCE(rnp != t->rcu_blocked_node);
+               WARN_ON_ONCE(rnp->level != rcu_num_lvls - 1);
                empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
                empty_exp = sync_rcu_preempt_exp_done(rnp);
                smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
@@ -495,10 +502,10 @@ void rcu_read_unlock_special(struct task_struct *t)
                if (&t->rcu_node_entry == rnp->exp_tasks)
                        rnp->exp_tasks = np;
                if (IS_ENABLED(CONFIG_RCU_BOOST)) {
-                       if (&t->rcu_node_entry == rnp->boost_tasks)
-                               rnp->boost_tasks = np;
                        /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
                        drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
+                       if (&t->rcu_node_entry == rnp->boost_tasks)
+                               rnp->boost_tasks = np;
                }
 
                /*