}
}
+static void rcu_tasks_trace_empty_fn(void *unused)
+{
+}
+
/* Wait for grace period to complete and provide ordering. */
static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
{
+ int cpu;
bool firstreport;
struct task_struct *g, *t;
LIST_HEAD(holdouts);
long ret;
+ // Wait for any lingering IPI handlers to complete. Note that
+ // if a CPU has gone offline or transitioned to userspace in the
+ // meantime, all IPI handlers should have been drained beforehand.
+ // Yes, this assumes that CPUs process IPIs in order. If that ever
+ // changes, there will need to be a recheck and/or timed wait.
+ for_each_online_cpu(cpu)
+ if (smp_load_acquire(per_cpu_ptr(&trc_ipi_to_cpu, cpu)))
+ smp_call_function_single(cpu, rcu_tasks_trace_empty_fn, NULL, 1);
+
// Remove the safety count.
smp_mb__before_atomic(); // Order vs. earlier atomics
atomic_dec(&trc_n_readers_need_end);