Merge tag 'powerpc-6.1-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[platform/kernel/linux-starfive.git] / kernel / rcu / tree_nocb.h
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
4  * Internal non-public definitions that provide either classic
5  * or preemptible semantics.
6  *
7  * Copyright Red Hat, 2009
8  * Copyright IBM Corporation, 2009
9  * Copyright SUSE, 2021
10  *
11  * Author: Ingo Molnar <mingo@elte.hu>
12  *         Paul E. McKenney <paulmck@linux.ibm.com>
13  *         Frederic Weisbecker <frederic@kernel.org>
14  */
15
16 #ifdef CONFIG_RCU_NOCB_CPU
17 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
18 static bool __read_mostly rcu_nocb_poll;    /* Offload kthread are to poll. */
19 static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
20 {
21         return lockdep_is_held(&rdp->nocb_lock);
22 }
23
24 static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
25 {
26         /* Race on early boot between thread creation and assignment */
27         if (!rdp->nocb_cb_kthread || !rdp->nocb_gp_kthread)
28                 return true;
29
30         if (current == rdp->nocb_cb_kthread || current == rdp->nocb_gp_kthread)
31                 if (in_task())
32                         return true;
33         return false;
34 }
35
36 /*
37  * Offload callback processing from the boot-time-specified set of CPUs
38  * specified by rcu_nocb_mask.  For the CPUs in the set, there are kthreads
39  * created that pull the callbacks from the corresponding CPU, wait for
40  * a grace period to elapse, and invoke the callbacks.  These kthreads
41  * are organized into GP kthreads, which manage incoming callbacks, wait for
42  * grace periods, and awaken CB kthreads, and the CB kthreads, which only
43  * invoke callbacks.  Each GP kthread invokes its own CBs.  The no-CBs CPUs
44  * do a wake_up() on their GP kthread when they insert a callback into any
45  * empty list, unless the rcu_nocb_poll boot parameter has been specified,
46  * in which case each kthread actively polls its CPU.  (Which isn't so great
47  * for energy efficiency, but which does reduce RCU's overhead on that CPU.)
48  *
49  * This is intended to be used in conjunction with Frederic Weisbecker's
50  * adaptive-idle work, which would seriously reduce OS jitter on CPUs
51  * running CPU-bound user-mode computations.
52  *
53  * Offloading of callbacks can also be used as an energy-efficiency
54  * measure because CPUs with no RCU callbacks queued are more aggressive
55  * about entering dyntick-idle mode.
56  */
57
58
59 /*
60  * Parse the boot-time rcu_nocb_mask CPU list from the kernel parameters.
61  * If the list is invalid, a warning is emitted and all CPUs are offloaded.
62  */
63 static int __init rcu_nocb_setup(char *str)
64 {
65         alloc_bootmem_cpumask_var(&rcu_nocb_mask);
66         if (*str == '=') {
67                 if (cpulist_parse(++str, rcu_nocb_mask)) {
68                         pr_warn("rcu_nocbs= bad CPU range, all CPUs set\n");
69                         cpumask_setall(rcu_nocb_mask);
70                 }
71         }
72         rcu_state.nocb_is_setup = true;
73         return 1;
74 }
75 __setup("rcu_nocbs", rcu_nocb_setup);
76
77 static int __init parse_rcu_nocb_poll(char *arg)
78 {
79         rcu_nocb_poll = true;
80         return 0;
81 }
82 early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
83
84 /*
85  * Don't bother bypassing ->cblist if the call_rcu() rate is low.
86  * After all, the main point of bypassing is to avoid lock contention
87  * on ->nocb_lock, which only can happen at high call_rcu() rates.
88  */
89 static int nocb_nobypass_lim_per_jiffy = 16 * 1000 / HZ;
90 module_param(nocb_nobypass_lim_per_jiffy, int, 0);
91
92 /*
93  * Acquire the specified rcu_data structure's ->nocb_bypass_lock.  If the
94  * lock isn't immediately available, increment ->nocb_lock_contended to
95  * flag the contention.
96  */
97 static void rcu_nocb_bypass_lock(struct rcu_data *rdp)
98         __acquires(&rdp->nocb_bypass_lock)
99 {
100         lockdep_assert_irqs_disabled();
101         if (raw_spin_trylock(&rdp->nocb_bypass_lock))
102                 return;
103         atomic_inc(&rdp->nocb_lock_contended);
104         WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
105         smp_mb__after_atomic(); /* atomic_inc() before lock. */
106         raw_spin_lock(&rdp->nocb_bypass_lock);
107         smp_mb__before_atomic(); /* atomic_dec() after lock. */
108         atomic_dec(&rdp->nocb_lock_contended);
109 }
110
111 /*
112  * Spinwait until the specified rcu_data structure's ->nocb_lock is
113  * not contended.  Please note that this is extremely special-purpose,
114  * relying on the fact that at most two kthreads and one CPU contend for
115  * this lock, and also that the two kthreads are guaranteed to have frequent
116  * grace-period-duration time intervals between successive acquisitions
117  * of the lock.  This allows us to use an extremely simple throttling
118  * mechanism, and further to apply it only to the CPU doing floods of
119  * call_rcu() invocations.  Don't try this at home!
120  */
121 static void rcu_nocb_wait_contended(struct rcu_data *rdp)
122 {
123         WARN_ON_ONCE(smp_processor_id() != rdp->cpu);
124         while (WARN_ON_ONCE(atomic_read(&rdp->nocb_lock_contended)))
125                 cpu_relax();
126 }
127
128 /*
129  * Conditionally acquire the specified rcu_data structure's
130  * ->nocb_bypass_lock.
131  */
132 static bool rcu_nocb_bypass_trylock(struct rcu_data *rdp)
133 {
134         lockdep_assert_irqs_disabled();
135         return raw_spin_trylock(&rdp->nocb_bypass_lock);
136 }
137
138 /*
139  * Release the specified rcu_data structure's ->nocb_bypass_lock.
140  */
141 static void rcu_nocb_bypass_unlock(struct rcu_data *rdp)
142         __releases(&rdp->nocb_bypass_lock)
143 {
144         lockdep_assert_irqs_disabled();
145         raw_spin_unlock(&rdp->nocb_bypass_lock);
146 }
147
148 /*
149  * Acquire the specified rcu_data structure's ->nocb_lock, but only
150  * if it corresponds to a no-CBs CPU.
151  */
152 static void rcu_nocb_lock(struct rcu_data *rdp)
153 {
154         lockdep_assert_irqs_disabled();
155         if (!rcu_rdp_is_offloaded(rdp))
156                 return;
157         raw_spin_lock(&rdp->nocb_lock);
158 }
159
160 /*
161  * Release the specified rcu_data structure's ->nocb_lock, but only
162  * if it corresponds to a no-CBs CPU.
163  */
164 static void rcu_nocb_unlock(struct rcu_data *rdp)
165 {
166         if (rcu_rdp_is_offloaded(rdp)) {
167                 lockdep_assert_irqs_disabled();
168                 raw_spin_unlock(&rdp->nocb_lock);
169         }
170 }
171
172 /*
173  * Release the specified rcu_data structure's ->nocb_lock and restore
174  * interrupts, but only if it corresponds to a no-CBs CPU.
175  */
176 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
177                                        unsigned long flags)
178 {
179         if (rcu_rdp_is_offloaded(rdp)) {
180                 lockdep_assert_irqs_disabled();
181                 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
182         } else {
183                 local_irq_restore(flags);
184         }
185 }
186
187 /* Lockdep check that ->cblist may be safely accessed. */
188 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
189 {
190         lockdep_assert_irqs_disabled();
191         if (rcu_rdp_is_offloaded(rdp))
192                 lockdep_assert_held(&rdp->nocb_lock);
193 }
194
195 /*
196  * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended
197  * grace period.
198  */
199 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
200 {
201         swake_up_all(sq);
202 }
203
204 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
205 {
206         return &rnp->nocb_gp_wq[rcu_seq_ctr(rnp->gp_seq) & 0x1];
207 }
208
209 static void rcu_init_one_nocb(struct rcu_node *rnp)
210 {
211         init_swait_queue_head(&rnp->nocb_gp_wq[0]);
212         init_swait_queue_head(&rnp->nocb_gp_wq[1]);
213 }
214
215 static bool __wake_nocb_gp(struct rcu_data *rdp_gp,
216                            struct rcu_data *rdp,
217                            bool force, unsigned long flags)
218         __releases(rdp_gp->nocb_gp_lock)
219 {
220         bool needwake = false;
221
222         if (!READ_ONCE(rdp_gp->nocb_gp_kthread)) {
223                 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
224                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
225                                     TPS("AlreadyAwake"));
226                 return false;
227         }
228
229         if (rdp_gp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
230                 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
231                 del_timer(&rdp_gp->nocb_timer);
232         }
233
234         if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) {
235                 WRITE_ONCE(rdp_gp->nocb_gp_sleep, false);
236                 needwake = true;
237         }
238         raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
239         if (needwake) {
240                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DoWake"));
241                 wake_up_process(rdp_gp->nocb_gp_kthread);
242         }
243
244         return needwake;
245 }
246
247 /*
248  * Kick the GP kthread for this NOCB group.
249  */
250 static bool wake_nocb_gp(struct rcu_data *rdp, bool force)
251 {
252         unsigned long flags;
253         struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
254
255         raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
256         return __wake_nocb_gp(rdp_gp, rdp, force, flags);
257 }
258
259 /*
260  * Arrange to wake the GP kthread for this NOCB group at some future
261  * time when it is safe to do so.
262  */
263 static void wake_nocb_gp_defer(struct rcu_data *rdp, int waketype,
264                                const char *reason)
265 {
266         unsigned long flags;
267         struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
268
269         raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
270
271         /*
272          * Bypass wakeup overrides previous deferments. In case
273          * of callback storm, no need to wake up too early.
274          */
275         if (waketype == RCU_NOCB_WAKE_BYPASS) {
276                 mod_timer(&rdp_gp->nocb_timer, jiffies + 2);
277                 WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
278         } else {
279                 if (rdp_gp->nocb_defer_wakeup < RCU_NOCB_WAKE)
280                         mod_timer(&rdp_gp->nocb_timer, jiffies + 1);
281                 if (rdp_gp->nocb_defer_wakeup < waketype)
282                         WRITE_ONCE(rdp_gp->nocb_defer_wakeup, waketype);
283         }
284
285         raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
286
287         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, reason);
288 }
289
290 /*
291  * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
292  * However, if there is a callback to be enqueued and if ->nocb_bypass
293  * proves to be initially empty, just return false because the no-CB GP
294  * kthread may need to be awakened in this case.
295  *
296  * Note that this function always returns true if rhp is NULL.
297  */
298 static bool rcu_nocb_do_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
299                                      unsigned long j)
300 {
301         struct rcu_cblist rcl;
302
303         WARN_ON_ONCE(!rcu_rdp_is_offloaded(rdp));
304         rcu_lockdep_assert_cblist_protected(rdp);
305         lockdep_assert_held(&rdp->nocb_bypass_lock);
306         if (rhp && !rcu_cblist_n_cbs(&rdp->nocb_bypass)) {
307                 raw_spin_unlock(&rdp->nocb_bypass_lock);
308                 return false;
309         }
310         /* Note: ->cblist.len already accounts for ->nocb_bypass contents. */
311         if (rhp)
312                 rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
313         rcu_cblist_flush_enqueue(&rcl, &rdp->nocb_bypass, rhp);
314         rcu_segcblist_insert_pend_cbs(&rdp->cblist, &rcl);
315         WRITE_ONCE(rdp->nocb_bypass_first, j);
316         rcu_nocb_bypass_unlock(rdp);
317         return true;
318 }
319
320 /*
321  * Flush the ->nocb_bypass queue into ->cblist, enqueuing rhp if non-NULL.
322  * However, if there is a callback to be enqueued and if ->nocb_bypass
323  * proves to be initially empty, just return false because the no-CB GP
324  * kthread may need to be awakened in this case.
325  *
326  * Note that this function always returns true if rhp is NULL.
327  */
328 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
329                                   unsigned long j)
330 {
331         if (!rcu_rdp_is_offloaded(rdp))
332                 return true;
333         rcu_lockdep_assert_cblist_protected(rdp);
334         rcu_nocb_bypass_lock(rdp);
335         return rcu_nocb_do_flush_bypass(rdp, rhp, j);
336 }
337
338 /*
339  * If the ->nocb_bypass_lock is immediately available, flush the
340  * ->nocb_bypass queue into ->cblist.
341  */
342 static void rcu_nocb_try_flush_bypass(struct rcu_data *rdp, unsigned long j)
343 {
344         rcu_lockdep_assert_cblist_protected(rdp);
345         if (!rcu_rdp_is_offloaded(rdp) ||
346             !rcu_nocb_bypass_trylock(rdp))
347                 return;
348         WARN_ON_ONCE(!rcu_nocb_do_flush_bypass(rdp, NULL, j));
349 }
350
351 /*
352  * See whether it is appropriate to use the ->nocb_bypass list in order
353  * to control contention on ->nocb_lock.  A limited number of direct
354  * enqueues are permitted into ->cblist per jiffy.  If ->nocb_bypass
355  * is non-empty, further callbacks must be placed into ->nocb_bypass,
356  * otherwise rcu_barrier() breaks.  Use rcu_nocb_flush_bypass() to switch
357  * back to direct use of ->cblist.  However, ->nocb_bypass should not be
358  * used if ->cblist is empty, because otherwise callbacks can be stranded
359  * on ->nocb_bypass because we cannot count on the current CPU ever again
360  * invoking call_rcu().  The general rule is that if ->nocb_bypass is
361  * non-empty, the corresponding no-CBs grace-period kthread must not be
362  * in an indefinite sleep state.
363  *
364  * Finally, it is not permitted to use the bypass during early boot,
365  * as doing so would confuse the auto-initialization code.  Besides
366  * which, there is no point in worrying about lock contention while
367  * there is only one CPU in operation.
368  */
369 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
370                                 bool *was_alldone, unsigned long flags)
371 {
372         unsigned long c;
373         unsigned long cur_gp_seq;
374         unsigned long j = jiffies;
375         long ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
376
377         lockdep_assert_irqs_disabled();
378
379         // Pure softirq/rcuc based processing: no bypassing, no
380         // locking.
381         if (!rcu_rdp_is_offloaded(rdp)) {
382                 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
383                 return false;
384         }
385
386         // In the process of (de-)offloading: no bypassing, but
387         // locking.
388         if (!rcu_segcblist_completely_offloaded(&rdp->cblist)) {
389                 rcu_nocb_lock(rdp);
390                 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
391                 return false; /* Not offloaded, no bypassing. */
392         }
393
394         // Don't use ->nocb_bypass during early boot.
395         if (rcu_scheduler_active != RCU_SCHEDULER_RUNNING) {
396                 rcu_nocb_lock(rdp);
397                 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
398                 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
399                 return false;
400         }
401
402         // If we have advanced to a new jiffy, reset counts to allow
403         // moving back from ->nocb_bypass to ->cblist.
404         if (j == rdp->nocb_nobypass_last) {
405                 c = rdp->nocb_nobypass_count + 1;
406         } else {
407                 WRITE_ONCE(rdp->nocb_nobypass_last, j);
408                 c = rdp->nocb_nobypass_count - nocb_nobypass_lim_per_jiffy;
409                 if (ULONG_CMP_LT(rdp->nocb_nobypass_count,
410                                  nocb_nobypass_lim_per_jiffy))
411                         c = 0;
412                 else if (c > nocb_nobypass_lim_per_jiffy)
413                         c = nocb_nobypass_lim_per_jiffy;
414         }
415         WRITE_ONCE(rdp->nocb_nobypass_count, c);
416
417         // If there hasn't yet been all that many ->cblist enqueues
418         // this jiffy, tell the caller to enqueue onto ->cblist.  But flush
419         // ->nocb_bypass first.
420         if (rdp->nocb_nobypass_count < nocb_nobypass_lim_per_jiffy) {
421                 rcu_nocb_lock(rdp);
422                 *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
423                 if (*was_alldone)
424                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
425                                             TPS("FirstQ"));
426                 WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, j));
427                 WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
428                 return false; // Caller must enqueue the callback.
429         }
430
431         // If ->nocb_bypass has been used too long or is too full,
432         // flush ->nocb_bypass to ->cblist.
433         if ((ncbs && j != READ_ONCE(rdp->nocb_bypass_first)) ||
434             ncbs >= qhimark) {
435                 rcu_nocb_lock(rdp);
436                 if (!rcu_nocb_flush_bypass(rdp, rhp, j)) {
437                         *was_alldone = !rcu_segcblist_pend_cbs(&rdp->cblist);
438                         if (*was_alldone)
439                                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
440                                                     TPS("FirstQ"));
441                         WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
442                         return false; // Caller must enqueue the callback.
443                 }
444                 if (j != rdp->nocb_gp_adv_time &&
445                     rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
446                     rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
447                         rcu_advance_cbs_nowake(rdp->mynode, rdp);
448                         rdp->nocb_gp_adv_time = j;
449                 }
450                 rcu_nocb_unlock_irqrestore(rdp, flags);
451                 return true; // Callback already enqueued.
452         }
453
454         // We need to use the bypass.
455         rcu_nocb_wait_contended(rdp);
456         rcu_nocb_bypass_lock(rdp);
457         ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
458         rcu_segcblist_inc_len(&rdp->cblist); /* Must precede enqueue. */
459         rcu_cblist_enqueue(&rdp->nocb_bypass, rhp);
460         if (!ncbs) {
461                 WRITE_ONCE(rdp->nocb_bypass_first, j);
462                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("FirstBQ"));
463         }
464         rcu_nocb_bypass_unlock(rdp);
465         smp_mb(); /* Order enqueue before wake. */
466         if (ncbs) {
467                 local_irq_restore(flags);
468         } else {
469                 // No-CBs GP kthread might be indefinitely asleep, if so, wake.
470                 rcu_nocb_lock(rdp); // Rare during call_rcu() flood.
471                 if (!rcu_segcblist_pend_cbs(&rdp->cblist)) {
472                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
473                                             TPS("FirstBQwake"));
474                         __call_rcu_nocb_wake(rdp, true, flags);
475                 } else {
476                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
477                                             TPS("FirstBQnoWake"));
478                         rcu_nocb_unlock_irqrestore(rdp, flags);
479                 }
480         }
481         return true; // Callback already enqueued.
482 }
483
484 /*
485  * Awaken the no-CBs grace-period kthread if needed, either due to it
486  * legitimately being asleep or due to overload conditions.
487  *
488  * If warranted, also wake up the kthread servicing this CPUs queues.
489  */
490 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_alldone,
491                                  unsigned long flags)
492                                  __releases(rdp->nocb_lock)
493 {
494         unsigned long cur_gp_seq;
495         unsigned long j;
496         long len;
497         struct task_struct *t;
498
499         // If we are being polled or there is no kthread, just leave.
500         t = READ_ONCE(rdp->nocb_gp_kthread);
501         if (rcu_nocb_poll || !t) {
502                 rcu_nocb_unlock_irqrestore(rdp, flags);
503                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
504                                     TPS("WakeNotPoll"));
505                 return;
506         }
507         // Need to actually to a wakeup.
508         len = rcu_segcblist_n_cbs(&rdp->cblist);
509         if (was_alldone) {
510                 rdp->qlen_last_fqs_check = len;
511                 if (!irqs_disabled_flags(flags)) {
512                         /* ... if queue was empty ... */
513                         rcu_nocb_unlock_irqrestore(rdp, flags);
514                         wake_nocb_gp(rdp, false);
515                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
516                                             TPS("WakeEmpty"));
517                 } else {
518                         rcu_nocb_unlock_irqrestore(rdp, flags);
519                         wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE,
520                                            TPS("WakeEmptyIsDeferred"));
521                 }
522         } else if (len > rdp->qlen_last_fqs_check + qhimark) {
523                 /* ... or if many callbacks queued. */
524                 rdp->qlen_last_fqs_check = len;
525                 j = jiffies;
526                 if (j != rdp->nocb_gp_adv_time &&
527                     rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
528                     rcu_seq_done(&rdp->mynode->gp_seq, cur_gp_seq)) {
529                         rcu_advance_cbs_nowake(rdp->mynode, rdp);
530                         rdp->nocb_gp_adv_time = j;
531                 }
532                 smp_mb(); /* Enqueue before timer_pending(). */
533                 if ((rdp->nocb_cb_sleep ||
534                      !rcu_segcblist_ready_cbs(&rdp->cblist)) &&
535                     !timer_pending(&rdp->nocb_timer)) {
536                         rcu_nocb_unlock_irqrestore(rdp, flags);
537                         wake_nocb_gp_defer(rdp, RCU_NOCB_WAKE_FORCE,
538                                            TPS("WakeOvfIsDeferred"));
539                 } else {
540                         rcu_nocb_unlock_irqrestore(rdp, flags);
541                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
542                 }
543         } else {
544                 rcu_nocb_unlock_irqrestore(rdp, flags);
545                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WakeNot"));
546         }
547 }
548
549 static int nocb_gp_toggle_rdp(struct rcu_data *rdp,
550                                bool *wake_state)
551 {
552         struct rcu_segcblist *cblist = &rdp->cblist;
553         unsigned long flags;
554         int ret;
555
556         rcu_nocb_lock_irqsave(rdp, flags);
557         if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED) &&
558             !rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
559                 /*
560                  * Offloading. Set our flag and notify the offload worker.
561                  * We will handle this rdp until it ever gets de-offloaded.
562                  */
563                 rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_GP);
564                 if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
565                         *wake_state = true;
566                 ret = 1;
567         } else if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED) &&
568                    rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP)) {
569                 /*
570                  * De-offloading. Clear our flag and notify the de-offload worker.
571                  * We will ignore this rdp until it ever gets re-offloaded.
572                  */
573                 rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_GP);
574                 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB))
575                         *wake_state = true;
576                 ret = 0;
577         } else {
578                 WARN_ON_ONCE(1);
579                 ret = -1;
580         }
581
582         rcu_nocb_unlock_irqrestore(rdp, flags);
583
584         return ret;
585 }
586
587 static void nocb_gp_sleep(struct rcu_data *my_rdp, int cpu)
588 {
589         trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Sleep"));
590         swait_event_interruptible_exclusive(my_rdp->nocb_gp_wq,
591                                         !READ_ONCE(my_rdp->nocb_gp_sleep));
592         trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("EndSleep"));
593 }
594
595 /*
596  * No-CBs GP kthreads come here to wait for additional callbacks to show up
597  * or for grace periods to end.
598  */
599 static void nocb_gp_wait(struct rcu_data *my_rdp)
600 {
601         bool bypass = false;
602         long bypass_ncbs;
603         int __maybe_unused cpu = my_rdp->cpu;
604         unsigned long cur_gp_seq;
605         unsigned long flags;
606         bool gotcbs = false;
607         unsigned long j = jiffies;
608         bool needwait_gp = false; // This prevents actual uninitialized use.
609         bool needwake;
610         bool needwake_gp;
611         struct rcu_data *rdp, *rdp_toggling = NULL;
612         struct rcu_node *rnp;
613         unsigned long wait_gp_seq = 0; // Suppress "use uninitialized" warning.
614         bool wasempty = false;
615
616         /*
617          * Each pass through the following loop checks for CBs and for the
618          * nearest grace period (if any) to wait for next.  The CB kthreads
619          * and the global grace-period kthread are awakened if needed.
620          */
621         WARN_ON_ONCE(my_rdp->nocb_gp_rdp != my_rdp);
622         /*
623          * An rcu_data structure is removed from the list after its
624          * CPU is de-offloaded and added to the list before that CPU is
625          * (re-)offloaded.  If the following loop happens to be referencing
626          * that rcu_data structure during the time that the corresponding
627          * CPU is de-offloaded and then immediately re-offloaded, this
628          * loop's rdp pointer will be carried to the end of the list by
629          * the resulting pair of list operations.  This can cause the loop
630          * to skip over some of the rcu_data structures that were supposed
631          * to have been scanned.  Fortunately a new iteration through the
632          * entire loop is forced after a given CPU's rcu_data structure
633          * is added to the list, so the skipped-over rcu_data structures
634          * won't be ignored for long.
635          */
636         list_for_each_entry(rdp, &my_rdp->nocb_head_rdp, nocb_entry_rdp) {
637                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Check"));
638                 rcu_nocb_lock_irqsave(rdp, flags);
639                 lockdep_assert_held(&rdp->nocb_lock);
640                 bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
641                 if (bypass_ncbs &&
642                     (time_after(j, READ_ONCE(rdp->nocb_bypass_first) + 1) ||
643                      bypass_ncbs > 2 * qhimark)) {
644                         // Bypass full or old, so flush it.
645                         (void)rcu_nocb_try_flush_bypass(rdp, j);
646                         bypass_ncbs = rcu_cblist_n_cbs(&rdp->nocb_bypass);
647                 } else if (!bypass_ncbs && rcu_segcblist_empty(&rdp->cblist)) {
648                         rcu_nocb_unlock_irqrestore(rdp, flags);
649                         continue; /* No callbacks here, try next. */
650                 }
651                 if (bypass_ncbs) {
652                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
653                                             TPS("Bypass"));
654                         bypass = true;
655                 }
656                 rnp = rdp->mynode;
657
658                 // Advance callbacks if helpful and low contention.
659                 needwake_gp = false;
660                 if (!rcu_segcblist_restempty(&rdp->cblist,
661                                              RCU_NEXT_READY_TAIL) ||
662                     (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq) &&
663                      rcu_seq_done(&rnp->gp_seq, cur_gp_seq))) {
664                         raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
665                         needwake_gp = rcu_advance_cbs(rnp, rdp);
666                         wasempty = rcu_segcblist_restempty(&rdp->cblist,
667                                                            RCU_NEXT_READY_TAIL);
668                         raw_spin_unlock_rcu_node(rnp); /* irqs disabled. */
669                 }
670                 // Need to wait on some grace period?
671                 WARN_ON_ONCE(wasempty &&
672                              !rcu_segcblist_restempty(&rdp->cblist,
673                                                       RCU_NEXT_READY_TAIL));
674                 if (rcu_segcblist_nextgp(&rdp->cblist, &cur_gp_seq)) {
675                         if (!needwait_gp ||
676                             ULONG_CMP_LT(cur_gp_seq, wait_gp_seq))
677                                 wait_gp_seq = cur_gp_seq;
678                         needwait_gp = true;
679                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu,
680                                             TPS("NeedWaitGP"));
681                 }
682                 if (rcu_segcblist_ready_cbs(&rdp->cblist)) {
683                         needwake = rdp->nocb_cb_sleep;
684                         WRITE_ONCE(rdp->nocb_cb_sleep, false);
685                         smp_mb(); /* CB invocation -after- GP end. */
686                 } else {
687                         needwake = false;
688                 }
689                 rcu_nocb_unlock_irqrestore(rdp, flags);
690                 if (needwake) {
691                         swake_up_one(&rdp->nocb_cb_wq);
692                         gotcbs = true;
693                 }
694                 if (needwake_gp)
695                         rcu_gp_kthread_wake();
696         }
697
698         my_rdp->nocb_gp_bypass = bypass;
699         my_rdp->nocb_gp_gp = needwait_gp;
700         my_rdp->nocb_gp_seq = needwait_gp ? wait_gp_seq : 0;
701
702         if (bypass && !rcu_nocb_poll) {
703                 // At least one child with non-empty ->nocb_bypass, so set
704                 // timer in order to avoid stranding its callbacks.
705                 wake_nocb_gp_defer(my_rdp, RCU_NOCB_WAKE_BYPASS,
706                                    TPS("WakeBypassIsDeferred"));
707         }
708         if (rcu_nocb_poll) {
709                 /* Polling, so trace if first poll in the series. */
710                 if (gotcbs)
711                         trace_rcu_nocb_wake(rcu_state.name, cpu, TPS("Poll"));
712                 if (list_empty(&my_rdp->nocb_head_rdp)) {
713                         raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
714                         if (!my_rdp->nocb_toggling_rdp)
715                                 WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
716                         raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
717                         /* Wait for any offloading rdp */
718                         nocb_gp_sleep(my_rdp, cpu);
719                 } else {
720                         schedule_timeout_idle(1);
721                 }
722         } else if (!needwait_gp) {
723                 /* Wait for callbacks to appear. */
724                 nocb_gp_sleep(my_rdp, cpu);
725         } else {
726                 rnp = my_rdp->mynode;
727                 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("StartWait"));
728                 swait_event_interruptible_exclusive(
729                         rnp->nocb_gp_wq[rcu_seq_ctr(wait_gp_seq) & 0x1],
730                         rcu_seq_done(&rnp->gp_seq, wait_gp_seq) ||
731                         !READ_ONCE(my_rdp->nocb_gp_sleep));
732                 trace_rcu_this_gp(rnp, my_rdp, wait_gp_seq, TPS("EndWait"));
733         }
734
735         if (!rcu_nocb_poll) {
736                 raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
737                 // (De-)queue an rdp to/from the group if its nocb state is changing
738                 rdp_toggling = my_rdp->nocb_toggling_rdp;
739                 if (rdp_toggling)
740                         my_rdp->nocb_toggling_rdp = NULL;
741
742                 if (my_rdp->nocb_defer_wakeup > RCU_NOCB_WAKE_NOT) {
743                         WRITE_ONCE(my_rdp->nocb_defer_wakeup, RCU_NOCB_WAKE_NOT);
744                         del_timer(&my_rdp->nocb_timer);
745                 }
746                 WRITE_ONCE(my_rdp->nocb_gp_sleep, true);
747                 raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
748         } else {
749                 rdp_toggling = READ_ONCE(my_rdp->nocb_toggling_rdp);
750                 if (rdp_toggling) {
751                         /*
752                          * Paranoid locking to make sure nocb_toggling_rdp is well
753                          * reset *before* we (re)set SEGCBLIST_KTHREAD_GP or we could
754                          * race with another round of nocb toggling for this rdp.
755                          * Nocb locking should prevent from that already but we stick
756                          * to paranoia, especially in rare path.
757                          */
758                         raw_spin_lock_irqsave(&my_rdp->nocb_gp_lock, flags);
759                         my_rdp->nocb_toggling_rdp = NULL;
760                         raw_spin_unlock_irqrestore(&my_rdp->nocb_gp_lock, flags);
761                 }
762         }
763
764         if (rdp_toggling) {
765                 bool wake_state = false;
766                 int ret;
767
768                 ret = nocb_gp_toggle_rdp(rdp_toggling, &wake_state);
769                 if (ret == 1)
770                         list_add_tail(&rdp_toggling->nocb_entry_rdp, &my_rdp->nocb_head_rdp);
771                 else if (ret == 0)
772                         list_del(&rdp_toggling->nocb_entry_rdp);
773                 if (wake_state)
774                         swake_up_one(&rdp_toggling->nocb_state_wq);
775         }
776
777         my_rdp->nocb_gp_seq = -1;
778         WARN_ON(signal_pending(current));
779 }
780
781 /*
782  * No-CBs grace-period-wait kthread.  There is one of these per group
783  * of CPUs, but only once at least one CPU in that group has come online
784  * at least once since boot.  This kthread checks for newly posted
785  * callbacks from any of the CPUs it is responsible for, waits for a
786  * grace period, then awakens all of the rcu_nocb_cb_kthread() instances
787  * that then have callback-invocation work to do.
788  */
789 static int rcu_nocb_gp_kthread(void *arg)
790 {
791         struct rcu_data *rdp = arg;
792
793         for (;;) {
794                 WRITE_ONCE(rdp->nocb_gp_loops, rdp->nocb_gp_loops + 1);
795                 nocb_gp_wait(rdp);
796                 cond_resched_tasks_rcu_qs();
797         }
798         return 0;
799 }
800
801 static inline bool nocb_cb_can_run(struct rcu_data *rdp)
802 {
803         u8 flags = SEGCBLIST_OFFLOADED | SEGCBLIST_KTHREAD_CB;
804
805         return rcu_segcblist_test_flags(&rdp->cblist, flags);
806 }
807
808 static inline bool nocb_cb_wait_cond(struct rcu_data *rdp)
809 {
810         return nocb_cb_can_run(rdp) && !READ_ONCE(rdp->nocb_cb_sleep);
811 }
812
813 /*
814  * Invoke any ready callbacks from the corresponding no-CBs CPU,
815  * then, if there are no more, wait for more to appear.
816  */
817 static void nocb_cb_wait(struct rcu_data *rdp)
818 {
819         struct rcu_segcblist *cblist = &rdp->cblist;
820         unsigned long cur_gp_seq;
821         unsigned long flags;
822         bool needwake_state = false;
823         bool needwake_gp = false;
824         bool can_sleep = true;
825         struct rcu_node *rnp = rdp->mynode;
826
827         do {
828                 swait_event_interruptible_exclusive(rdp->nocb_cb_wq,
829                                                     nocb_cb_wait_cond(rdp));
830
831                 // VVV Ensure CB invocation follows _sleep test.
832                 if (smp_load_acquire(&rdp->nocb_cb_sleep)) { // ^^^
833                         WARN_ON(signal_pending(current));
834                         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("WokeEmpty"));
835                 }
836         } while (!nocb_cb_can_run(rdp));
837
838
839         local_irq_save(flags);
840         rcu_momentary_dyntick_idle();
841         local_irq_restore(flags);
842         /*
843          * Disable BH to provide the expected environment.  Also, when
844          * transitioning to/from NOCB mode, a self-requeuing callback might
845          * be invoked from softirq.  A short grace period could cause both
846          * instances of this callback would execute concurrently.
847          */
848         local_bh_disable();
849         rcu_do_batch(rdp);
850         local_bh_enable();
851         lockdep_assert_irqs_enabled();
852         rcu_nocb_lock_irqsave(rdp, flags);
853         if (rcu_segcblist_nextgp(cblist, &cur_gp_seq) &&
854             rcu_seq_done(&rnp->gp_seq, cur_gp_seq) &&
855             raw_spin_trylock_rcu_node(rnp)) { /* irqs already disabled. */
856                 needwake_gp = rcu_advance_cbs(rdp->mynode, rdp);
857                 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
858         }
859
860         if (rcu_segcblist_test_flags(cblist, SEGCBLIST_OFFLOADED)) {
861                 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB)) {
862                         rcu_segcblist_set_flags(cblist, SEGCBLIST_KTHREAD_CB);
863                         if (rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
864                                 needwake_state = true;
865                 }
866                 if (rcu_segcblist_ready_cbs(cblist))
867                         can_sleep = false;
868         } else {
869                 /*
870                  * De-offloading. Clear our flag and notify the de-offload worker.
871                  * We won't touch the callbacks and keep sleeping until we ever
872                  * get re-offloaded.
873                  */
874                 WARN_ON_ONCE(!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB));
875                 rcu_segcblist_clear_flags(cblist, SEGCBLIST_KTHREAD_CB);
876                 if (!rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP))
877                         needwake_state = true;
878         }
879
880         WRITE_ONCE(rdp->nocb_cb_sleep, can_sleep);
881
882         if (rdp->nocb_cb_sleep)
883                 trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("CBSleep"));
884
885         rcu_nocb_unlock_irqrestore(rdp, flags);
886         if (needwake_gp)
887                 rcu_gp_kthread_wake();
888
889         if (needwake_state)
890                 swake_up_one(&rdp->nocb_state_wq);
891 }
892
893 /*
894  * Per-rcu_data kthread, but only for no-CBs CPUs.  Repeatedly invoke
895  * nocb_cb_wait() to do the dirty work.
896  */
897 static int rcu_nocb_cb_kthread(void *arg)
898 {
899         struct rcu_data *rdp = arg;
900
901         // Each pass through this loop does one callback batch, and,
902         // if there are no more ready callbacks, waits for them.
903         for (;;) {
904                 nocb_cb_wait(rdp);
905                 cond_resched_tasks_rcu_qs();
906         }
907         return 0;
908 }
909
910 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
911 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
912 {
913         return READ_ONCE(rdp->nocb_defer_wakeup) >= level;
914 }
915
916 /* Do a deferred wakeup of rcu_nocb_kthread(). */
917 static bool do_nocb_deferred_wakeup_common(struct rcu_data *rdp_gp,
918                                            struct rcu_data *rdp, int level,
919                                            unsigned long flags)
920         __releases(rdp_gp->nocb_gp_lock)
921 {
922         int ndw;
923         int ret;
924
925         if (!rcu_nocb_need_deferred_wakeup(rdp_gp, level)) {
926                 raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
927                 return false;
928         }
929
930         ndw = rdp_gp->nocb_defer_wakeup;
931         ret = __wake_nocb_gp(rdp_gp, rdp, ndw == RCU_NOCB_WAKE_FORCE, flags);
932         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("DeferredWake"));
933
934         return ret;
935 }
936
937 /* Do a deferred wakeup of rcu_nocb_kthread() from a timer handler. */
938 static void do_nocb_deferred_wakeup_timer(struct timer_list *t)
939 {
940         unsigned long flags;
941         struct rcu_data *rdp = from_timer(rdp, t, nocb_timer);
942
943         WARN_ON_ONCE(rdp->nocb_gp_rdp != rdp);
944         trace_rcu_nocb_wake(rcu_state.name, rdp->cpu, TPS("Timer"));
945
946         raw_spin_lock_irqsave(&rdp->nocb_gp_lock, flags);
947         smp_mb__after_spinlock(); /* Timer expire before wakeup. */
948         do_nocb_deferred_wakeup_common(rdp, rdp, RCU_NOCB_WAKE_BYPASS, flags);
949 }
950
951 /*
952  * Do a deferred wakeup of rcu_nocb_kthread() from fastpath.
953  * This means we do an inexact common-case check.  Note that if
954  * we miss, ->nocb_timer will eventually clean things up.
955  */
956 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
957 {
958         unsigned long flags;
959         struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
960
961         if (!rdp_gp || !rcu_nocb_need_deferred_wakeup(rdp_gp, RCU_NOCB_WAKE))
962                 return false;
963
964         raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
965         return do_nocb_deferred_wakeup_common(rdp_gp, rdp, RCU_NOCB_WAKE, flags);
966 }
967
968 void rcu_nocb_flush_deferred_wakeup(void)
969 {
970         do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
971 }
972 EXPORT_SYMBOL_GPL(rcu_nocb_flush_deferred_wakeup);
973
974 static int rdp_offload_toggle(struct rcu_data *rdp,
975                                bool offload, unsigned long flags)
976         __releases(rdp->nocb_lock)
977 {
978         struct rcu_segcblist *cblist = &rdp->cblist;
979         struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
980         bool wake_gp = false;
981
982         rcu_segcblist_offload(cblist, offload);
983
984         if (rdp->nocb_cb_sleep)
985                 rdp->nocb_cb_sleep = false;
986         rcu_nocb_unlock_irqrestore(rdp, flags);
987
988         /*
989          * Ignore former value of nocb_cb_sleep and force wake up as it could
990          * have been spuriously set to false already.
991          */
992         swake_up_one(&rdp->nocb_cb_wq);
993
994         raw_spin_lock_irqsave(&rdp_gp->nocb_gp_lock, flags);
995         // Queue this rdp for add/del to/from the list to iterate on rcuog
996         WRITE_ONCE(rdp_gp->nocb_toggling_rdp, rdp);
997         if (rdp_gp->nocb_gp_sleep) {
998                 rdp_gp->nocb_gp_sleep = false;
999                 wake_gp = true;
1000         }
1001         raw_spin_unlock_irqrestore(&rdp_gp->nocb_gp_lock, flags);
1002
1003         return wake_gp;
1004 }
1005
1006 static long rcu_nocb_rdp_deoffload(void *arg)
1007 {
1008         struct rcu_data *rdp = arg;
1009         struct rcu_segcblist *cblist = &rdp->cblist;
1010         unsigned long flags;
1011         int wake_gp;
1012         struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1013
1014         /*
1015          * rcu_nocb_rdp_deoffload() may be called directly if
1016          * rcuog/o[p] spawn failed, because at this time the rdp->cpu
1017          * is not online yet.
1018          */
1019         WARN_ON_ONCE((rdp->cpu != raw_smp_processor_id()) && cpu_online(rdp->cpu));
1020
1021         pr_info("De-offloading %d\n", rdp->cpu);
1022
1023         rcu_nocb_lock_irqsave(rdp, flags);
1024         /*
1025          * Flush once and for all now. This suffices because we are
1026          * running on the target CPU holding ->nocb_lock (thus having
1027          * interrupts disabled), and because rdp_offload_toggle()
1028          * invokes rcu_segcblist_offload(), which clears SEGCBLIST_OFFLOADED.
1029          * Thus future calls to rcu_segcblist_completely_offloaded() will
1030          * return false, which means that future calls to rcu_nocb_try_bypass()
1031          * will refuse to put anything into the bypass.
1032          */
1033         WARN_ON_ONCE(!rcu_nocb_flush_bypass(rdp, NULL, jiffies));
1034         /*
1035          * Start with invoking rcu_core() early. This way if the current thread
1036          * happens to preempt an ongoing call to rcu_core() in the middle,
1037          * leaving some work dismissed because rcu_core() still thinks the rdp is
1038          * completely offloaded, we are guaranteed a nearby future instance of
1039          * rcu_core() to catch up.
1040          */
1041         rcu_segcblist_set_flags(cblist, SEGCBLIST_RCU_CORE);
1042         invoke_rcu_core();
1043         wake_gp = rdp_offload_toggle(rdp, false, flags);
1044
1045         mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
1046         if (rdp_gp->nocb_gp_kthread) {
1047                 if (wake_gp)
1048                         wake_up_process(rdp_gp->nocb_gp_kthread);
1049
1050                 /*
1051                  * If rcuo[p] kthread spawn failed, directly remove SEGCBLIST_KTHREAD_CB.
1052                  * Just wait SEGCBLIST_KTHREAD_GP to be cleared by rcuog.
1053                  */
1054                 if (!rdp->nocb_cb_kthread) {
1055                         rcu_nocb_lock_irqsave(rdp, flags);
1056                         rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB);
1057                         rcu_nocb_unlock_irqrestore(rdp, flags);
1058                 }
1059
1060                 swait_event_exclusive(rdp->nocb_state_wq,
1061                                         !rcu_segcblist_test_flags(cblist,
1062                                           SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP));
1063         } else {
1064                 /*
1065                  * No kthread to clear the flags for us or remove the rdp from the nocb list
1066                  * to iterate. Do it here instead. Locking doesn't look stricly necessary
1067                  * but we stick to paranoia in this rare path.
1068                  */
1069                 rcu_nocb_lock_irqsave(rdp, flags);
1070                 rcu_segcblist_clear_flags(&rdp->cblist,
1071                                 SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP);
1072                 rcu_nocb_unlock_irqrestore(rdp, flags);
1073
1074                 list_del(&rdp->nocb_entry_rdp);
1075         }
1076         mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
1077
1078         /*
1079          * Lock one last time to acquire latest callback updates from kthreads
1080          * so we can later handle callbacks locally without locking.
1081          */
1082         rcu_nocb_lock_irqsave(rdp, flags);
1083         /*
1084          * Theoretically we could clear SEGCBLIST_LOCKING after the nocb
1085          * lock is released but how about being paranoid for once?
1086          */
1087         rcu_segcblist_clear_flags(cblist, SEGCBLIST_LOCKING);
1088         /*
1089          * Without SEGCBLIST_LOCKING, we can't use
1090          * rcu_nocb_unlock_irqrestore() anymore.
1091          */
1092         raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags);
1093
1094         /* Sanity check */
1095         WARN_ON_ONCE(rcu_cblist_n_cbs(&rdp->nocb_bypass));
1096
1097
1098         return 0;
1099 }
1100
1101 int rcu_nocb_cpu_deoffload(int cpu)
1102 {
1103         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1104         int ret = 0;
1105
1106         cpus_read_lock();
1107         mutex_lock(&rcu_state.barrier_mutex);
1108         if (rcu_rdp_is_offloaded(rdp)) {
1109                 if (cpu_online(cpu)) {
1110                         ret = work_on_cpu(cpu, rcu_nocb_rdp_deoffload, rdp);
1111                         if (!ret)
1112                                 cpumask_clear_cpu(cpu, rcu_nocb_mask);
1113                 } else {
1114                         pr_info("NOCB: Cannot CB-deoffload offline CPU %d\n", rdp->cpu);
1115                         ret = -EINVAL;
1116                 }
1117         }
1118         mutex_unlock(&rcu_state.barrier_mutex);
1119         cpus_read_unlock();
1120
1121         return ret;
1122 }
1123 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_deoffload);
1124
1125 static long rcu_nocb_rdp_offload(void *arg)
1126 {
1127         struct rcu_data *rdp = arg;
1128         struct rcu_segcblist *cblist = &rdp->cblist;
1129         unsigned long flags;
1130         int wake_gp;
1131         struct rcu_data *rdp_gp = rdp->nocb_gp_rdp;
1132
1133         WARN_ON_ONCE(rdp->cpu != raw_smp_processor_id());
1134         /*
1135          * For now we only support re-offload, ie: the rdp must have been
1136          * offloaded on boot first.
1137          */
1138         if (!rdp->nocb_gp_rdp)
1139                 return -EINVAL;
1140
1141         if (WARN_ON_ONCE(!rdp_gp->nocb_gp_kthread))
1142                 return -EINVAL;
1143
1144         pr_info("Offloading %d\n", rdp->cpu);
1145
1146         /*
1147          * Can't use rcu_nocb_lock_irqsave() before SEGCBLIST_LOCKING
1148          * is set.
1149          */
1150         raw_spin_lock_irqsave(&rdp->nocb_lock, flags);
1151
1152         /*
1153          * We didn't take the nocb lock while working on the
1154          * rdp->cblist with SEGCBLIST_LOCKING cleared (pure softirq/rcuc mode).
1155          * Every modifications that have been done previously on
1156          * rdp->cblist must be visible remotely by the nocb kthreads
1157          * upon wake up after reading the cblist flags.
1158          *
1159          * The layout against nocb_lock enforces that ordering:
1160          *
1161          *  __rcu_nocb_rdp_offload()   nocb_cb_wait()/nocb_gp_wait()
1162          * -------------------------   ----------------------------
1163          *      WRITE callbacks           rcu_nocb_lock()
1164          *      rcu_nocb_lock()           READ flags
1165          *      WRITE flags               READ callbacks
1166          *      rcu_nocb_unlock()         rcu_nocb_unlock()
1167          */
1168         wake_gp = rdp_offload_toggle(rdp, true, flags);
1169         if (wake_gp)
1170                 wake_up_process(rdp_gp->nocb_gp_kthread);
1171         swait_event_exclusive(rdp->nocb_state_wq,
1172                               rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_CB) &&
1173                               rcu_segcblist_test_flags(cblist, SEGCBLIST_KTHREAD_GP));
1174
1175         /*
1176          * All kthreads are ready to work, we can finally relieve rcu_core() and
1177          * enable nocb bypass.
1178          */
1179         rcu_nocb_lock_irqsave(rdp, flags);
1180         rcu_segcblist_clear_flags(cblist, SEGCBLIST_RCU_CORE);
1181         rcu_nocb_unlock_irqrestore(rdp, flags);
1182
1183         return 0;
1184 }
1185
1186 int rcu_nocb_cpu_offload(int cpu)
1187 {
1188         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1189         int ret = 0;
1190
1191         cpus_read_lock();
1192         mutex_lock(&rcu_state.barrier_mutex);
1193         if (!rcu_rdp_is_offloaded(rdp)) {
1194                 if (cpu_online(cpu)) {
1195                         ret = work_on_cpu(cpu, rcu_nocb_rdp_offload, rdp);
1196                         if (!ret)
1197                                 cpumask_set_cpu(cpu, rcu_nocb_mask);
1198                 } else {
1199                         pr_info("NOCB: Cannot CB-offload offline CPU %d\n", rdp->cpu);
1200                         ret = -EINVAL;
1201                 }
1202         }
1203         mutex_unlock(&rcu_state.barrier_mutex);
1204         cpus_read_unlock();
1205
1206         return ret;
1207 }
1208 EXPORT_SYMBOL_GPL(rcu_nocb_cpu_offload);
1209
1210 void __init rcu_init_nohz(void)
1211 {
1212         int cpu;
1213         bool need_rcu_nocb_mask = false;
1214         bool offload_all = false;
1215         struct rcu_data *rdp;
1216
1217 #if defined(CONFIG_RCU_NOCB_CPU_DEFAULT_ALL)
1218         if (!rcu_state.nocb_is_setup) {
1219                 need_rcu_nocb_mask = true;
1220                 offload_all = true;
1221         }
1222 #endif /* #if defined(CONFIG_RCU_NOCB_CPU_DEFAULT_ALL) */
1223
1224 #if defined(CONFIG_NO_HZ_FULL)
1225         if (tick_nohz_full_running && !cpumask_empty(tick_nohz_full_mask)) {
1226                 need_rcu_nocb_mask = true;
1227                 offload_all = false; /* NO_HZ_FULL has its own mask. */
1228         }
1229 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
1230
1231         if (need_rcu_nocb_mask) {
1232                 if (!cpumask_available(rcu_nocb_mask)) {
1233                         if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) {
1234                                 pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n");
1235                                 return;
1236                         }
1237                 }
1238                 rcu_state.nocb_is_setup = true;
1239         }
1240
1241         if (!rcu_state.nocb_is_setup)
1242                 return;
1243
1244 #if defined(CONFIG_NO_HZ_FULL)
1245         if (tick_nohz_full_running)
1246                 cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
1247 #endif /* #if defined(CONFIG_NO_HZ_FULL) */
1248
1249         if (offload_all)
1250                 cpumask_setall(rcu_nocb_mask);
1251
1252         if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) {
1253                 pr_info("\tNote: kernel parameter 'rcu_nocbs=', 'nohz_full', or 'isolcpus=' contains nonexistent CPUs.\n");
1254                 cpumask_and(rcu_nocb_mask, cpu_possible_mask,
1255                             rcu_nocb_mask);
1256         }
1257         if (cpumask_empty(rcu_nocb_mask))
1258                 pr_info("\tOffload RCU callbacks from CPUs: (none).\n");
1259         else
1260                 pr_info("\tOffload RCU callbacks from CPUs: %*pbl.\n",
1261                         cpumask_pr_args(rcu_nocb_mask));
1262         if (rcu_nocb_poll)
1263                 pr_info("\tPoll for callbacks from no-CBs CPUs.\n");
1264
1265         for_each_cpu(cpu, rcu_nocb_mask) {
1266                 rdp = per_cpu_ptr(&rcu_data, cpu);
1267                 if (rcu_segcblist_empty(&rdp->cblist))
1268                         rcu_segcblist_init(&rdp->cblist);
1269                 rcu_segcblist_offload(&rdp->cblist, true);
1270                 rcu_segcblist_set_flags(&rdp->cblist, SEGCBLIST_KTHREAD_CB | SEGCBLIST_KTHREAD_GP);
1271                 rcu_segcblist_clear_flags(&rdp->cblist, SEGCBLIST_RCU_CORE);
1272         }
1273         rcu_organize_nocb_kthreads();
1274 }
1275
1276 /* Initialize per-rcu_data variables for no-CBs CPUs. */
1277 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1278 {
1279         init_swait_queue_head(&rdp->nocb_cb_wq);
1280         init_swait_queue_head(&rdp->nocb_gp_wq);
1281         init_swait_queue_head(&rdp->nocb_state_wq);
1282         raw_spin_lock_init(&rdp->nocb_lock);
1283         raw_spin_lock_init(&rdp->nocb_bypass_lock);
1284         raw_spin_lock_init(&rdp->nocb_gp_lock);
1285         timer_setup(&rdp->nocb_timer, do_nocb_deferred_wakeup_timer, 0);
1286         rcu_cblist_init(&rdp->nocb_bypass);
1287         mutex_init(&rdp->nocb_gp_kthread_mutex);
1288 }
1289
1290 /*
1291  * If the specified CPU is a no-CBs CPU that does not already have its
1292  * rcuo CB kthread, spawn it.  Additionally, if the rcuo GP kthread
1293  * for this CPU's group has not yet been created, spawn it as well.
1294  */
1295 static void rcu_spawn_cpu_nocb_kthread(int cpu)
1296 {
1297         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
1298         struct rcu_data *rdp_gp;
1299         struct task_struct *t;
1300         struct sched_param sp;
1301
1302         if (!rcu_scheduler_fully_active || !rcu_state.nocb_is_setup)
1303                 return;
1304
1305         /* If there already is an rcuo kthread, then nothing to do. */
1306         if (rdp->nocb_cb_kthread)
1307                 return;
1308
1309         /* If we didn't spawn the GP kthread first, reorganize! */
1310         sp.sched_priority = kthread_prio;
1311         rdp_gp = rdp->nocb_gp_rdp;
1312         mutex_lock(&rdp_gp->nocb_gp_kthread_mutex);
1313         if (!rdp_gp->nocb_gp_kthread) {
1314                 t = kthread_run(rcu_nocb_gp_kthread, rdp_gp,
1315                                 "rcuog/%d", rdp_gp->cpu);
1316                 if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo GP kthread, OOM is now expected behavior\n", __func__)) {
1317                         mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
1318                         goto end;
1319                 }
1320                 WRITE_ONCE(rdp_gp->nocb_gp_kthread, t);
1321                 if (kthread_prio)
1322                         sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1323         }
1324         mutex_unlock(&rdp_gp->nocb_gp_kthread_mutex);
1325
1326         /* Spawn the kthread for this CPU. */
1327         t = kthread_run(rcu_nocb_cb_kthread, rdp,
1328                         "rcuo%c/%d", rcu_state.abbr, cpu);
1329         if (WARN_ONCE(IS_ERR(t), "%s: Could not start rcuo CB kthread, OOM is now expected behavior\n", __func__))
1330                 goto end;
1331
1332         if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_CB_BOOST) && kthread_prio)
1333                 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1334
1335         WRITE_ONCE(rdp->nocb_cb_kthread, t);
1336         WRITE_ONCE(rdp->nocb_gp_kthread, rdp_gp->nocb_gp_kthread);
1337         return;
1338 end:
1339         mutex_lock(&rcu_state.barrier_mutex);
1340         if (rcu_rdp_is_offloaded(rdp)) {
1341                 rcu_nocb_rdp_deoffload(rdp);
1342                 cpumask_clear_cpu(cpu, rcu_nocb_mask);
1343         }
1344         mutex_unlock(&rcu_state.barrier_mutex);
1345 }
1346
1347 /* How many CB CPU IDs per GP kthread?  Default of -1 for sqrt(nr_cpu_ids). */
1348 static int rcu_nocb_gp_stride = -1;
1349 module_param(rcu_nocb_gp_stride, int, 0444);
1350
1351 /*
1352  * Initialize GP-CB relationships for all no-CBs CPU.
1353  */
1354 static void __init rcu_organize_nocb_kthreads(void)
1355 {
1356         int cpu;
1357         bool firsttime = true;
1358         bool gotnocbs = false;
1359         bool gotnocbscbs = true;
1360         int ls = rcu_nocb_gp_stride;
1361         int nl = 0;  /* Next GP kthread. */
1362         struct rcu_data *rdp;
1363         struct rcu_data *rdp_gp = NULL;  /* Suppress misguided gcc warn. */
1364
1365         if (!cpumask_available(rcu_nocb_mask))
1366                 return;
1367         if (ls == -1) {
1368                 ls = nr_cpu_ids / int_sqrt(nr_cpu_ids);
1369                 rcu_nocb_gp_stride = ls;
1370         }
1371
1372         /*
1373          * Each pass through this loop sets up one rcu_data structure.
1374          * Should the corresponding CPU come online in the future, then
1375          * we will spawn the needed set of rcu_nocb_kthread() kthreads.
1376          */
1377         for_each_possible_cpu(cpu) {
1378                 rdp = per_cpu_ptr(&rcu_data, cpu);
1379                 if (rdp->cpu >= nl) {
1380                         /* New GP kthread, set up for CBs & next GP. */
1381                         gotnocbs = true;
1382                         nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
1383                         rdp_gp = rdp;
1384                         INIT_LIST_HEAD(&rdp->nocb_head_rdp);
1385                         if (dump_tree) {
1386                                 if (!firsttime)
1387                                         pr_cont("%s\n", gotnocbscbs
1388                                                         ? "" : " (self only)");
1389                                 gotnocbscbs = false;
1390                                 firsttime = false;
1391                                 pr_alert("%s: No-CB GP kthread CPU %d:",
1392                                          __func__, cpu);
1393                         }
1394                 } else {
1395                         /* Another CB kthread, link to previous GP kthread. */
1396                         gotnocbscbs = true;
1397                         if (dump_tree)
1398                                 pr_cont(" %d", cpu);
1399                 }
1400                 rdp->nocb_gp_rdp = rdp_gp;
1401                 if (cpumask_test_cpu(cpu, rcu_nocb_mask))
1402                         list_add_tail(&rdp->nocb_entry_rdp, &rdp_gp->nocb_head_rdp);
1403         }
1404         if (gotnocbs && dump_tree)
1405                 pr_cont("%s\n", gotnocbscbs ? "" : " (self only)");
1406 }
1407
1408 /*
1409  * Bind the current task to the offloaded CPUs.  If there are no offloaded
1410  * CPUs, leave the task unbound.  Splat if the bind attempt fails.
1411  */
1412 void rcu_bind_current_to_nocb(void)
1413 {
1414         if (cpumask_available(rcu_nocb_mask) && !cpumask_empty(rcu_nocb_mask))
1415                 WARN_ON(sched_setaffinity(current->pid, rcu_nocb_mask));
1416 }
1417 EXPORT_SYMBOL_GPL(rcu_bind_current_to_nocb);
1418
1419 // The ->on_cpu field is available only in CONFIG_SMP=y, so...
1420 #ifdef CONFIG_SMP
1421 static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1422 {
1423         return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
1424 }
1425 #else // #ifdef CONFIG_SMP
1426 static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
1427 {
1428         return "";
1429 }
1430 #endif // #else #ifdef CONFIG_SMP
1431
1432 /*
1433  * Dump out nocb grace-period kthread state for the specified rcu_data
1434  * structure.
1435  */
1436 static void show_rcu_nocb_gp_state(struct rcu_data *rdp)
1437 {
1438         struct rcu_node *rnp = rdp->mynode;
1439
1440         pr_info("nocb GP %d %c%c%c%c%c %c[%c%c] %c%c:%ld rnp %d:%d %lu %c CPU %d%s\n",
1441                 rdp->cpu,
1442                 "kK"[!!rdp->nocb_gp_kthread],
1443                 "lL"[raw_spin_is_locked(&rdp->nocb_gp_lock)],
1444                 "dD"[!!rdp->nocb_defer_wakeup],
1445                 "tT"[timer_pending(&rdp->nocb_timer)],
1446                 "sS"[!!rdp->nocb_gp_sleep],
1447                 ".W"[swait_active(&rdp->nocb_gp_wq)],
1448                 ".W"[swait_active(&rnp->nocb_gp_wq[0])],
1449                 ".W"[swait_active(&rnp->nocb_gp_wq[1])],
1450                 ".B"[!!rdp->nocb_gp_bypass],
1451                 ".G"[!!rdp->nocb_gp_gp],
1452                 (long)rdp->nocb_gp_seq,
1453                 rnp->grplo, rnp->grphi, READ_ONCE(rdp->nocb_gp_loops),
1454                 rdp->nocb_gp_kthread ? task_state_to_char(rdp->nocb_gp_kthread) : '.',
1455                 rdp->nocb_gp_kthread ? (int)task_cpu(rdp->nocb_gp_kthread) : -1,
1456                 show_rcu_should_be_on_cpu(rdp->nocb_gp_kthread));
1457 }
1458
1459 /* Dump out nocb kthread state for the specified rcu_data structure. */
1460 static void show_rcu_nocb_state(struct rcu_data *rdp)
1461 {
1462         char bufw[20];
1463         char bufr[20];
1464         struct rcu_data *nocb_next_rdp;
1465         struct rcu_segcblist *rsclp = &rdp->cblist;
1466         bool waslocked;
1467         bool wassleep;
1468
1469         if (rdp->nocb_gp_rdp == rdp)
1470                 show_rcu_nocb_gp_state(rdp);
1471
1472         nocb_next_rdp = list_next_or_null_rcu(&rdp->nocb_gp_rdp->nocb_head_rdp,
1473                                               &rdp->nocb_entry_rdp,
1474                                               typeof(*rdp),
1475                                               nocb_entry_rdp);
1476
1477         sprintf(bufw, "%ld", rsclp->gp_seq[RCU_WAIT_TAIL]);
1478         sprintf(bufr, "%ld", rsclp->gp_seq[RCU_NEXT_READY_TAIL]);
1479         pr_info("   CB %d^%d->%d %c%c%c%c%c%c F%ld L%ld C%d %c%c%s%c%s%c%c q%ld %c CPU %d%s\n",
1480                 rdp->cpu, rdp->nocb_gp_rdp->cpu,
1481                 nocb_next_rdp ? nocb_next_rdp->cpu : -1,
1482                 "kK"[!!rdp->nocb_cb_kthread],
1483                 "bB"[raw_spin_is_locked(&rdp->nocb_bypass_lock)],
1484                 "cC"[!!atomic_read(&rdp->nocb_lock_contended)],
1485                 "lL"[raw_spin_is_locked(&rdp->nocb_lock)],
1486                 "sS"[!!rdp->nocb_cb_sleep],
1487                 ".W"[swait_active(&rdp->nocb_cb_wq)],
1488                 jiffies - rdp->nocb_bypass_first,
1489                 jiffies - rdp->nocb_nobypass_last,
1490                 rdp->nocb_nobypass_count,
1491                 ".D"[rcu_segcblist_ready_cbs(rsclp)],
1492                 ".W"[!rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL)],
1493                 rcu_segcblist_segempty(rsclp, RCU_WAIT_TAIL) ? "" : bufw,
1494                 ".R"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL)],
1495                 rcu_segcblist_segempty(rsclp, RCU_NEXT_READY_TAIL) ? "" : bufr,
1496                 ".N"[!rcu_segcblist_segempty(rsclp, RCU_NEXT_TAIL)],
1497                 ".B"[!!rcu_cblist_n_cbs(&rdp->nocb_bypass)],
1498                 rcu_segcblist_n_cbs(&rdp->cblist),
1499                 rdp->nocb_cb_kthread ? task_state_to_char(rdp->nocb_cb_kthread) : '.',
1500                 rdp->nocb_cb_kthread ? (int)task_cpu(rdp->nocb_cb_kthread) : -1,
1501                 show_rcu_should_be_on_cpu(rdp->nocb_cb_kthread));
1502
1503         /* It is OK for GP kthreads to have GP state. */
1504         if (rdp->nocb_gp_rdp == rdp)
1505                 return;
1506
1507         waslocked = raw_spin_is_locked(&rdp->nocb_gp_lock);
1508         wassleep = swait_active(&rdp->nocb_gp_wq);
1509         if (!rdp->nocb_gp_sleep && !waslocked && !wassleep)
1510                 return;  /* Nothing untoward. */
1511
1512         pr_info("   nocb GP activity on CB-only CPU!!! %c%c%c %c\n",
1513                 "lL"[waslocked],
1514                 "dD"[!!rdp->nocb_defer_wakeup],
1515                 "sS"[!!rdp->nocb_gp_sleep],
1516                 ".W"[wassleep]);
1517 }
1518
1519 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
1520
1521 static inline int rcu_lockdep_is_held_nocb(struct rcu_data *rdp)
1522 {
1523         return 0;
1524 }
1525
1526 static inline bool rcu_current_is_nocb_kthread(struct rcu_data *rdp)
1527 {
1528         return false;
1529 }
1530
1531 /* No ->nocb_lock to acquire.  */
1532 static void rcu_nocb_lock(struct rcu_data *rdp)
1533 {
1534 }
1535
1536 /* No ->nocb_lock to release.  */
1537 static void rcu_nocb_unlock(struct rcu_data *rdp)
1538 {
1539 }
1540
1541 /* No ->nocb_lock to release.  */
1542 static void rcu_nocb_unlock_irqrestore(struct rcu_data *rdp,
1543                                        unsigned long flags)
1544 {
1545         local_irq_restore(flags);
1546 }
1547
1548 /* Lockdep check that ->cblist may be safely accessed. */
1549 static void rcu_lockdep_assert_cblist_protected(struct rcu_data *rdp)
1550 {
1551         lockdep_assert_irqs_disabled();
1552 }
1553
1554 static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq)
1555 {
1556 }
1557
1558 static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp)
1559 {
1560         return NULL;
1561 }
1562
1563 static void rcu_init_one_nocb(struct rcu_node *rnp)
1564 {
1565 }
1566
1567 static bool rcu_nocb_flush_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
1568                                   unsigned long j)
1569 {
1570         return true;
1571 }
1572
1573 static bool rcu_nocb_try_bypass(struct rcu_data *rdp, struct rcu_head *rhp,
1574                                 bool *was_alldone, unsigned long flags)
1575 {
1576         return false;
1577 }
1578
1579 static void __call_rcu_nocb_wake(struct rcu_data *rdp, bool was_empty,
1580                                  unsigned long flags)
1581 {
1582         WARN_ON_ONCE(1);  /* Should be dead code! */
1583 }
1584
1585 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
1586 {
1587 }
1588
1589 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp, int level)
1590 {
1591         return false;
1592 }
1593
1594 static bool do_nocb_deferred_wakeup(struct rcu_data *rdp)
1595 {
1596         return false;
1597 }
1598
1599 static void rcu_spawn_cpu_nocb_kthread(int cpu)
1600 {
1601 }
1602
1603 static void show_rcu_nocb_state(struct rcu_data *rdp)
1604 {
1605 }
1606
1607 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */