rcu: Add accessor macros for the ->need_future_gp[] array
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 18 Apr 2018 18:11:39 +0000 (11:11 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tue, 15 May 2018 17:29:18 +0000 (10:29 -0700)
Accessors for the ->need_future_gp[] array are currently open-coded,
which makes them difficult to change.  To improve maintainability, this
commit adds need_future_gp_mask() to compute the indexing mask from the
array size, need_future_gp_element() to access the element corresponding
to the specified grace-period number, and need_any_future_gp() to
determine if any future grace period is needed.  This commit also applies
need_future_gp_element() to existing open-coded single-element accesses.

Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Nicholas Piggin <npiggin@gmail.com>
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h

index 4bbba17..79fb999 100644 (file)
@@ -718,11 +718,9 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
 static int rcu_future_needs_gp(struct rcu_state *rsp)
 {
        struct rcu_node *rnp = rcu_get_root(rsp);
-       int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
-       int *fp = &rnp->need_future_gp[idx];
 
        lockdep_assert_irqs_disabled();
-       return READ_ONCE(*fp);
+       return READ_ONCE(need_future_gp_element(rnp, rnp->completed));
 }
 
 /*
@@ -1699,7 +1697,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
         */
        c = rcu_cbs_completed(rdp->rsp, rnp);
        trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
-       if (rnp->need_future_gp[c & 0x1]) {
+       if (need_future_gp_element(rnp, c)) {
                trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
                goto out;
        }
@@ -1711,7 +1709,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
         * current grace period, we don't need to explicitly start one.
         */
        if (rnp->gpnum != rnp->completed) {
-               rnp->need_future_gp[c & 0x1]++;
+               need_future_gp_element(rnp, c)++;
                trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
                goto out;
        }
@@ -1737,13 +1735,13 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
         * If the needed for the required grace period is already
         * recorded, trace and leave.
         */
-       if (rnp_root->need_future_gp[c & 0x1]) {
+       if (need_future_gp_element(rnp_root, c)) {
                trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
                goto unlock_out;
        }
 
        /* Record the need for the future grace period. */
-       rnp_root->need_future_gp[c & 0x1]++;
+       need_future_gp_element(rnp_root, c)++;
 
        /* If a grace period is not already in progress, start one. */
        if (rnp_root->gpnum != rnp_root->completed) {
@@ -1771,8 +1769,8 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
        int needmore;
        struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
 
-       rnp->need_future_gp[c & 0x1] = 0;
-       needmore = rnp->need_future_gp[(c + 1) & 0x1];
+       need_future_gp_element(rnp, c) = 0;
+       needmore = need_future_gp_element(rnp, c + 1);
        trace_rcu_future_gp(rnp, rdp, c,
                            needmore ? TPS("CleanupMore") : TPS("Cleanup"));
        return needmore;
index f491ab4..18b0914 100644 (file)
@@ -159,6 +159,21 @@ struct rcu_node {
        wait_queue_head_t exp_wq[4];
 } ____cacheline_internodealigned_in_smp;
 
+/* Accessors for ->need_future_gp[] array. */
+#define need_future_gp_mask() \
+       (ARRAY_SIZE(((struct rcu_node *)NULL)->need_future_gp) - 1)
+#define need_future_gp_element(rnp, c) \
+       ((rnp)->need_future_gp[(c) & need_future_gp_mask()])
+#define need_any_future_gp(rnp)                                                \
+({                                                                     \
+       int __i;                                                        \
+       bool __nonzero = false;                                         \
+                                                                       \
+       for (__i = 0; __i < ARRAY_SIZE((rnp)->need_future_gp); __i++)   \
+               __nonzero = __nonzero || (rnp)->need_future_gp[__i];    \
+       __nonzero;                                                      \
+})
+
 /*
  * Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
  * are indexed relative to this interval rather than the global CPU ID space.
index 84fbee4..640ea92 100644 (file)
@@ -1790,7 +1790,7 @@ static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
  */
 static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
 {
-       rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
+       need_future_gp_element(rnp, rnp->completed + 1) += nrq;
 }
 
 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)