static int rcu_future_needs_gp(struct rcu_state *rsp)
{
struct rcu_node *rnp = rcu_get_root(rsp);
- int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
- int *fp = &rnp->need_future_gp[idx];
lockdep_assert_irqs_disabled();
- return READ_ONCE(*fp);
+ return READ_ONCE(need_future_gp_element(rnp, rnp->completed));
}
/*
*/
c = rcu_cbs_completed(rdp->rsp, rnp);
trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
- if (rnp->need_future_gp[c & 0x1]) {
+ if (need_future_gp_element(rnp, c)) {
trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
goto out;
}
* current grace period, we don't need to explicitly start one.
*/
if (rnp->gpnum != rnp->completed) {
- rnp->need_future_gp[c & 0x1]++;
+ need_future_gp_element(rnp, c)++;
trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
goto out;
}
* If the needed for the required grace period is already
* recorded, trace and leave.
*/
- if (rnp_root->need_future_gp[c & 0x1]) {
+ if (need_future_gp_element(rnp_root, c)) {
trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
goto unlock_out;
}
/* Record the need for the future grace period. */
- rnp_root->need_future_gp[c & 0x1]++;
+ need_future_gp_element(rnp_root, c)++;
/* If a grace period is not already in progress, start one. */
if (rnp_root->gpnum != rnp_root->completed) {
int needmore;
struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
- rnp->need_future_gp[c & 0x1] = 0;
- needmore = rnp->need_future_gp[(c + 1) & 0x1];
+ need_future_gp_element(rnp, c) = 0;
+ needmore = need_future_gp_element(rnp, c + 1);
trace_rcu_future_gp(rnp, rdp, c,
needmore ? TPS("CleanupMore") : TPS("Cleanup"));
return needmore;
wait_queue_head_t exp_wq[4];
} ____cacheline_internodealigned_in_smp;
+/* Accessors for ->need_future_gp[] array. */
+#define need_future_gp_mask() \
+ (ARRAY_SIZE(((struct rcu_node *)NULL)->need_future_gp) - 1)
+#define need_future_gp_element(rnp, c) \
+ ((rnp)->need_future_gp[(c) & need_future_gp_mask()])
+#define need_any_future_gp(rnp) \
+({ \
+ int __i; \
+ bool __nonzero = false; \
+ \
+ for (__i = 0; __i < ARRAY_SIZE((rnp)->need_future_gp); __i++) \
+ __nonzero = __nonzero || (rnp)->need_future_gp[__i]; \
+ __nonzero; \
+})
+
/*
* Bitmasks in an rcu_node cover the interval [grplo, grphi] of CPU IDs, and
* are indexed relative to this interval rather than the global CPU ID space.