atomic_long_t exp_workdone1; /* # done by others #1. */
atomic_long_t exp_workdone2; /* # done by others #2. */
atomic_long_t exp_workdone3; /* # done by others #3. */
+ int exp_dynticks_snap; /* Double-check need for IPI. */
/* 7) Callback offloading. */
#ifdef CONFIG_RCU_NOCB_CPU
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+ rdp->exp_dynticks_snap =
+ atomic_add_return(0, &rdtp->dynticks);
if (raw_smp_processor_id() == cpu ||
- !(atomic_add_return(0, &rdtp->dynticks) & 0x1) ||
+ !(rdp->exp_dynticks_snap & 0x1) ||
!(rnp->qsmaskinitnext & rdp->grpmask))
mask_ofl_test |= rdp->grpmask;
}
/* IPI the remaining CPUs for expedited quiescent state. */
for_each_leaf_node_possible_cpu(rnp, cpu) {
unsigned long mask = leaf_node_cpu_bit(rnp, cpu);
+ struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+ struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
+
if (!(mask_ofl_ipi & mask))
continue;
retry_ipi:
+ if (atomic_add_return(0, &rdtp->dynticks) !=
+ rdp->exp_dynticks_snap) {
+ mask_ofl_test |= mask;
+ continue;
+ }
ret = smp_call_function_single(cpu, func, rsp, 0);
if (!ret) {
mask_ofl_ipi &= ~mask;