static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
struct rcu_data *rdp,
unsigned long flags);
- --static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
+ ++static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
-- -static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
++ +static void rcu_spawn_all_nocb_kthreads(int cpu);
++ +static void __init rcu_spawn_nocb_kthreads(void);
++ +#ifdef CONFIG_RCU_NOCB_CPU
++ +static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp);
++ +#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
static bool init_nocb_callback_list(struct rcu_data *rdp);
static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
/* Do a deferred wakeup of rcu_nocb_kthread(). */
static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
{
+ ++ int ndw;
+ ++
if (!rcu_nocb_need_deferred_wakeup(rdp))
return;
- -- ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
- -- wake_nocb_leader(rdp, false);
- -- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
+ ++ ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
+ ++ ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
+ ++ wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
+ ++ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
+ +}
+ +
++ +void __init rcu_init_nohz(void)
++ +{
++ + int cpu;
++ + bool need_rcu_nocb_mask = true;
++ + struct rcu_state *rsp;
++ +
++ +#ifdef CONFIG_RCU_NOCB_CPU_NONE
++ + need_rcu_nocb_mask = false;
++ +#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */
++ +
++ +#if defined(CONFIG_NO_HZ_FULL)
++ + if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask))
++ + need_rcu_nocb_mask = true;
++ +#endif /* #if defined(CONFIG_NO_HZ_FULL) */
++ +
++ + if (!have_rcu_nocb_mask && need_rcu_nocb_mask) {
++ + if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
++ + pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
++ + return;
++ + }
++ + have_rcu_nocb_mask = true;
++ + }
++ + if (!have_rcu_nocb_mask)
++ + return;
++ +
++ +#ifdef CONFIG_RCU_NOCB_CPU_ZERO
++ + pr_info("\tOffload RCU callbacks from CPU 0\n");
++ + cpumask_set_cpu(0, rcu_nocb_mask);
++ +#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */
++ +#ifdef CONFIG_RCU_NOCB_CPU_ALL
++ + pr_info("\tOffload RCU callbacks from all CPUs\n");
++ + cpumask_copy(rcu_nocb_mask, cpu_possible_mask);
++ +#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */
++ +#if defined(CONFIG_NO_HZ_FULL)
++ + if (tick_nohz_full_running)
++ + cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
++ +#endif /* #if defined(CONFIG_NO_HZ_FULL) */
++ +
++ + if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
++ + pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n");
++ + cpumask_and(rcu_nocb_mask, cpu_possible_mask,
++ + rcu_nocb_mask);
++ + }
++ + cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask);
++ + pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf);
++ + if (rcu_nocb_poll)
++ + pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
++ +
++ + for_each_rcu_flavor(rsp) {
++ + for_each_cpu(cpu, rcu_nocb_mask) {
++ + struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
++ +
++ + /*
++ + * If there are early callbacks, they will need
++ + * to be moved to the nocb lists.
++ + */
++ + WARN_ON_ONCE(rdp->nxttail[RCU_NEXT_TAIL] !=
++ + &rdp->nxtlist &&
++ + rdp->nxttail[RCU_NEXT_TAIL] != NULL);
++ + init_nocb_callback_list(rdp);
++ + }
++ + rcu_organize_nocb_kthreads(rsp);
++ + }
+ }
+
/* Initialize per-rcu_data variables for no-CBs CPUs. */
static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
{