perf/core: Fix narrow startup race when creating the perf nr_addr_filters sysfs file
[platform/kernel/linux-starfive.git] / kernel / rcu / tree_exp.h
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3  * RCU expedited grace periods
4  *
5  * Copyright IBM Corporation, 2016
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  */
9
10 #include <linux/lockdep.h>
11
12 static void rcu_exp_handler(void *unused);
13 static int rcu_print_task_exp_stall(struct rcu_node *rnp);
14 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp);
15
16 /*
17  * Record the start of an expedited grace period.
18  */
19 static void rcu_exp_gp_seq_start(void)
20 {
21         rcu_seq_start(&rcu_state.expedited_sequence);
22         rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
23 }
24
25 /*
26  * Return the value that the expedited-grace-period counter will have
27  * at the end of the current grace period.
28  */
29 static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
30 {
31         return rcu_seq_endval(&rcu_state.expedited_sequence);
32 }
33
34 /*
35  * Record the end of an expedited grace period.
36  */
37 static void rcu_exp_gp_seq_end(void)
38 {
39         rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
40         rcu_seq_end(&rcu_state.expedited_sequence);
41         smp_mb(); /* Ensure that consecutive grace periods serialize. */
42 }
43
44 /*
45  * Take a snapshot of the expedited-grace-period counter, which is the
46  * earliest value that will indicate that a full grace period has
47  * elapsed since the current time.
48  */
49 static unsigned long rcu_exp_gp_seq_snap(void)
50 {
51         unsigned long s;
52
53         smp_mb(); /* Caller's modifications seen first by other CPUs. */
54         s = rcu_seq_snap(&rcu_state.expedited_sequence);
55         trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
56         return s;
57 }
58
59 /*
60  * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
61  * if a full expedited grace period has elapsed since that snapshot
62  * was taken.
63  */
64 static bool rcu_exp_gp_seq_done(unsigned long s)
65 {
66         return rcu_seq_done(&rcu_state.expedited_sequence, s);
67 }
68
69 /*
70  * Reset the ->expmaskinit values in the rcu_node tree to reflect any
71  * recent CPU-online activity.  Note that these masks are not cleared
72  * when CPUs go offline, so they reflect the union of all CPUs that have
73  * ever been online.  This means that this function normally takes its
74  * no-work-to-do fastpath.
75  */
76 static void sync_exp_reset_tree_hotplug(void)
77 {
78         bool done;
79         unsigned long flags;
80         unsigned long mask;
81         unsigned long oldmask;
82         int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
83         struct rcu_node *rnp;
84         struct rcu_node *rnp_up;
85
86         /* If no new CPUs onlined since last time, nothing to do. */
87         if (likely(ncpus == rcu_state.ncpus_snap))
88                 return;
89         rcu_state.ncpus_snap = ncpus;
90
91         /*
92          * Each pass through the following loop propagates newly onlined
93          * CPUs for the current rcu_node structure up the rcu_node tree.
94          */
95         rcu_for_each_leaf_node(rnp) {
96                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
97                 if (rnp->expmaskinit == rnp->expmaskinitnext) {
98                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
99                         continue;  /* No new CPUs, nothing to do. */
100                 }
101
102                 /* Update this node's mask, track old value for propagation. */
103                 oldmask = rnp->expmaskinit;
104                 rnp->expmaskinit = rnp->expmaskinitnext;
105                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
106
107                 /* If was already nonzero, nothing to propagate. */
108                 if (oldmask)
109                         continue;
110
111                 /* Propagate the new CPU up the tree. */
112                 mask = rnp->grpmask;
113                 rnp_up = rnp->parent;
114                 done = false;
115                 while (rnp_up) {
116                         raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
117                         if (rnp_up->expmaskinit)
118                                 done = true;
119                         rnp_up->expmaskinit |= mask;
120                         raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
121                         if (done)
122                                 break;
123                         mask = rnp_up->grpmask;
124                         rnp_up = rnp_up->parent;
125                 }
126         }
127 }
128
129 /*
130  * Reset the ->expmask values in the rcu_node tree in preparation for
131  * a new expedited grace period.
132  */
133 static void __maybe_unused sync_exp_reset_tree(void)
134 {
135         unsigned long flags;
136         struct rcu_node *rnp;
137
138         sync_exp_reset_tree_hotplug();
139         rcu_for_each_node_breadth_first(rnp) {
140                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
141                 WARN_ON_ONCE(rnp->expmask);
142                 WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
143                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
144         }
145 }
146
147 /*
148  * Return non-zero if there is no RCU expedited grace period in progress
149  * for the specified rcu_node structure, in other words, if all CPUs and
150  * tasks covered by the specified rcu_node structure have done their bit
151  * for the current expedited grace period.
152  */
153 static bool sync_rcu_exp_done(struct rcu_node *rnp)
154 {
155         raw_lockdep_assert_held_rcu_node(rnp);
156         return READ_ONCE(rnp->exp_tasks) == NULL &&
157                READ_ONCE(rnp->expmask) == 0;
158 }
159
160 /*
161  * Like sync_rcu_exp_done(), but where the caller does not hold the
162  * rcu_node's ->lock.
163  */
164 static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
165 {
166         unsigned long flags;
167         bool ret;
168
169         raw_spin_lock_irqsave_rcu_node(rnp, flags);
170         ret = sync_rcu_exp_done(rnp);
171         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
172
173         return ret;
174 }
175
176 /*
177  * Report the exit from RCU read-side critical section for the last task
178  * that queued itself during or before the current expedited preemptible-RCU
179  * grace period.  This event is reported either to the rcu_node structure on
180  * which the task was queued or to one of that rcu_node structure's ancestors,
181  * recursively up the tree.  (Calm down, calm down, we do the recursion
182  * iteratively!)
183  */
184 static void __rcu_report_exp_rnp(struct rcu_node *rnp,
185                                  bool wake, unsigned long flags)
186         __releases(rnp->lock)
187 {
188         unsigned long mask;
189
190         raw_lockdep_assert_held_rcu_node(rnp);
191         for (;;) {
192                 if (!sync_rcu_exp_done(rnp)) {
193                         if (!rnp->expmask)
194                                 rcu_initiate_boost(rnp, flags);
195                         else
196                                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
197                         break;
198                 }
199                 if (rnp->parent == NULL) {
200                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
201                         if (wake) {
202                                 smp_mb(); /* EGP done before wake_up(). */
203                                 swake_up_one_online(&rcu_state.expedited_wq);
204                         }
205                         break;
206                 }
207                 mask = rnp->grpmask;
208                 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
209                 rnp = rnp->parent;
210                 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
211                 WARN_ON_ONCE(!(rnp->expmask & mask));
212                 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
213         }
214 }
215
216 /*
217  * Report expedited quiescent state for specified node.  This is a
218  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
219  */
220 static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
221 {
222         unsigned long flags;
223
224         raw_spin_lock_irqsave_rcu_node(rnp, flags);
225         __rcu_report_exp_rnp(rnp, wake, flags);
226 }
227
228 /*
229  * Report expedited quiescent state for multiple CPUs, all covered by the
230  * specified leaf rcu_node structure.
231  */
232 static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
233                                     unsigned long mask, bool wake)
234 {
235         int cpu;
236         unsigned long flags;
237         struct rcu_data *rdp;
238
239         raw_spin_lock_irqsave_rcu_node(rnp, flags);
240         if (!(rnp->expmask & mask)) {
241                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
242                 return;
243         }
244         WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
245         for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
246                 rdp = per_cpu_ptr(&rcu_data, cpu);
247                 if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
248                         continue;
249                 rdp->rcu_forced_tick_exp = false;
250                 tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
251         }
252         __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
253 }
254
255 /*
256  * Report expedited quiescent state for specified rcu_data (CPU).
257  */
258 static void rcu_report_exp_rdp(struct rcu_data *rdp)
259 {
260         WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
261         rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
262 }
263
264 /* Common code for work-done checking. */
265 static bool sync_exp_work_done(unsigned long s)
266 {
267         if (rcu_exp_gp_seq_done(s)) {
268                 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
269                 smp_mb(); /* Ensure test happens before caller kfree(). */
270                 return true;
271         }
272         return false;
273 }
274
275 /*
276  * Funnel-lock acquisition for expedited grace periods.  Returns true
277  * if some other task completed an expedited grace period that this task
278  * can piggy-back on, and with no mutex held.  Otherwise, returns false
279  * with the mutex held, indicating that the caller must actually do the
280  * expedited grace period.
281  */
282 static bool exp_funnel_lock(unsigned long s)
283 {
284         struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
285         struct rcu_node *rnp = rdp->mynode;
286         struct rcu_node *rnp_root = rcu_get_root();
287
288         /* Low-contention fastpath. */
289         if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
290             (rnp == rnp_root ||
291              ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
292             mutex_trylock(&rcu_state.exp_mutex))
293                 goto fastpath;
294
295         /*
296          * Each pass through the following loop works its way up
297          * the rcu_node tree, returning if others have done the work or
298          * otherwise falls through to acquire ->exp_mutex.  The mapping
299          * from CPU to rcu_node structure can be inexact, as it is just
300          * promoting locality and is not strictly needed for correctness.
301          */
302         for (; rnp != NULL; rnp = rnp->parent) {
303                 if (sync_exp_work_done(s))
304                         return true;
305
306                 /* Work not done, either wait here or go up. */
307                 spin_lock(&rnp->exp_lock);
308                 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
309
310                         /* Someone else doing GP, so wait for them. */
311                         spin_unlock(&rnp->exp_lock);
312                         trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
313                                                   rnp->grplo, rnp->grphi,
314                                                   TPS("wait"));
315                         wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
316                                    sync_exp_work_done(s));
317                         return true;
318                 }
319                 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
320                 spin_unlock(&rnp->exp_lock);
321                 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
322                                           rnp->grplo, rnp->grphi, TPS("nxtlvl"));
323         }
324         mutex_lock(&rcu_state.exp_mutex);
325 fastpath:
326         if (sync_exp_work_done(s)) {
327                 mutex_unlock(&rcu_state.exp_mutex);
328                 return true;
329         }
330         rcu_exp_gp_seq_start();
331         trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
332         return false;
333 }
334
335 /*
336  * Select the CPUs within the specified rcu_node that the upcoming
337  * expedited grace period needs to wait for.
338  */
339 static void __sync_rcu_exp_select_node_cpus(struct rcu_exp_work *rewp)
340 {
341         int cpu;
342         unsigned long flags;
343         unsigned long mask_ofl_test;
344         unsigned long mask_ofl_ipi;
345         int ret;
346         struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
347
348         raw_spin_lock_irqsave_rcu_node(rnp, flags);
349
350         /* Each pass checks a CPU for identity, offline, and idle. */
351         mask_ofl_test = 0;
352         for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
353                 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
354                 unsigned long mask = rdp->grpmask;
355                 int snap;
356
357                 if (raw_smp_processor_id() == cpu ||
358                     !(rnp->qsmaskinitnext & mask)) {
359                         mask_ofl_test |= mask;
360                 } else {
361                         snap = rcu_dynticks_snap(cpu);
362                         if (rcu_dynticks_in_eqs(snap))
363                                 mask_ofl_test |= mask;
364                         else
365                                 rdp->exp_dynticks_snap = snap;
366                 }
367         }
368         mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
369
370         /*
371          * Need to wait for any blocked tasks as well.  Note that
372          * additional blocking tasks will also block the expedited GP
373          * until such time as the ->expmask bits are cleared.
374          */
375         if (rcu_preempt_has_tasks(rnp))
376                 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
377         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
378
379         /* IPI the remaining CPUs for expedited quiescent state. */
380         for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
381                 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
382                 unsigned long mask = rdp->grpmask;
383
384 retry_ipi:
385                 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
386                         mask_ofl_test |= mask;
387                         continue;
388                 }
389                 if (get_cpu() == cpu) {
390                         mask_ofl_test |= mask;
391                         put_cpu();
392                         continue;
393                 }
394                 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
395                 put_cpu();
396                 /* The CPU will report the QS in response to the IPI. */
397                 if (!ret)
398                         continue;
399
400                 /* Failed, raced with CPU hotplug operation. */
401                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
402                 if ((rnp->qsmaskinitnext & mask) &&
403                     (rnp->expmask & mask)) {
404                         /* Online, so delay for a bit and try again. */
405                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
406                         trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
407                         schedule_timeout_idle(1);
408                         goto retry_ipi;
409                 }
410                 /* CPU really is offline, so we must report its QS. */
411                 if (rnp->expmask & mask)
412                         mask_ofl_test |= mask;
413                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
414         }
415         /* Report quiescent states for those that went offline. */
416         if (mask_ofl_test)
417                 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
418 }
419
420 static void rcu_exp_sel_wait_wake(unsigned long s);
421
422 #ifdef CONFIG_RCU_EXP_KTHREAD
423 static void sync_rcu_exp_select_node_cpus(struct kthread_work *wp)
424 {
425         struct rcu_exp_work *rewp =
426                 container_of(wp, struct rcu_exp_work, rew_work);
427
428         __sync_rcu_exp_select_node_cpus(rewp);
429 }
430
431 static inline bool rcu_gp_par_worker_started(void)
432 {
433         return !!READ_ONCE(rcu_exp_par_gp_kworker);
434 }
435
436 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
437 {
438         kthread_init_work(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
439         /*
440          * Use rcu_exp_par_gp_kworker, because flushing a work item from
441          * another work item on the same kthread worker can result in
442          * deadlock.
443          */
444         kthread_queue_work(rcu_exp_par_gp_kworker, &rnp->rew.rew_work);
445 }
446
447 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
448 {
449         kthread_flush_work(&rnp->rew.rew_work);
450 }
451
452 /*
453  * Work-queue handler to drive an expedited grace period forward.
454  */
455 static void wait_rcu_exp_gp(struct kthread_work *wp)
456 {
457         struct rcu_exp_work *rewp;
458
459         rewp = container_of(wp, struct rcu_exp_work, rew_work);
460         rcu_exp_sel_wait_wake(rewp->rew_s);
461 }
462
463 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
464 {
465         kthread_init_work(&rew->rew_work, wait_rcu_exp_gp);
466         kthread_queue_work(rcu_exp_gp_kworker, &rew->rew_work);
467 }
468
469 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew)
470 {
471 }
472 #else /* !CONFIG_RCU_EXP_KTHREAD */
473 static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
474 {
475         struct rcu_exp_work *rewp =
476                 container_of(wp, struct rcu_exp_work, rew_work);
477
478         __sync_rcu_exp_select_node_cpus(rewp);
479 }
480
481 static inline bool rcu_gp_par_worker_started(void)
482 {
483         return !!READ_ONCE(rcu_par_gp_wq);
484 }
485
486 static inline void sync_rcu_exp_select_cpus_queue_work(struct rcu_node *rnp)
487 {
488         int cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
489
490         INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
491         /* If all offline, queue the work on an unbound CPU. */
492         if (unlikely(cpu > rnp->grphi - rnp->grplo))
493                 cpu = WORK_CPU_UNBOUND;
494         else
495                 cpu += rnp->grplo;
496         queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
497 }
498
499 static inline void sync_rcu_exp_select_cpus_flush_work(struct rcu_node *rnp)
500 {
501         flush_work(&rnp->rew.rew_work);
502 }
503
504 /*
505  * Work-queue handler to drive an expedited grace period forward.
506  */
507 static void wait_rcu_exp_gp(struct work_struct *wp)
508 {
509         struct rcu_exp_work *rewp;
510
511         rewp = container_of(wp, struct rcu_exp_work, rew_work);
512         rcu_exp_sel_wait_wake(rewp->rew_s);
513 }
514
515 static inline void synchronize_rcu_expedited_queue_work(struct rcu_exp_work *rew)
516 {
517         INIT_WORK_ONSTACK(&rew->rew_work, wait_rcu_exp_gp);
518         queue_work(rcu_gp_wq, &rew->rew_work);
519 }
520
521 static inline void synchronize_rcu_expedited_destroy_work(struct rcu_exp_work *rew)
522 {
523         destroy_work_on_stack(&rew->rew_work);
524 }
525 #endif /* CONFIG_RCU_EXP_KTHREAD */
526
527 /*
528  * Select the nodes that the upcoming expedited grace period needs
529  * to wait for.
530  */
531 static void sync_rcu_exp_select_cpus(void)
532 {
533         struct rcu_node *rnp;
534
535         trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
536         sync_exp_reset_tree();
537         trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
538
539         /* Schedule work for each leaf rcu_node structure. */
540         rcu_for_each_leaf_node(rnp) {
541                 rnp->exp_need_flush = false;
542                 if (!READ_ONCE(rnp->expmask))
543                         continue; /* Avoid early boot non-existent wq. */
544                 if (!rcu_gp_par_worker_started() ||
545                     rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
546                     rcu_is_last_leaf_node(rnp)) {
547                         /* No worker started yet or last leaf, do direct call. */
548                         sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
549                         continue;
550                 }
551                 sync_rcu_exp_select_cpus_queue_work(rnp);
552                 rnp->exp_need_flush = true;
553         }
554
555         /* Wait for jobs (if any) to complete. */
556         rcu_for_each_leaf_node(rnp)
557                 if (rnp->exp_need_flush)
558                         sync_rcu_exp_select_cpus_flush_work(rnp);
559 }
560
561 /*
562  * Wait for the expedited grace period to elapse, within time limit.
563  * If the time limit is exceeded without the grace period elapsing,
564  * return false, otherwise return true.
565  */
566 static bool synchronize_rcu_expedited_wait_once(long tlimit)
567 {
568         int t;
569         struct rcu_node *rnp_root = rcu_get_root();
570
571         t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
572                                           sync_rcu_exp_done_unlocked(rnp_root),
573                                           tlimit);
574         // Workqueues should not be signaled.
575         if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
576                 return true;
577         WARN_ON(t < 0);  /* workqueues should not be signaled. */
578         return false;
579 }
580
581 /*
582  * Wait for the expedited grace period to elapse, issuing any needed
583  * RCU CPU stall warnings along the way.
584  */
585 static void synchronize_rcu_expedited_wait(void)
586 {
587         int cpu;
588         unsigned long j;
589         unsigned long jiffies_stall;
590         unsigned long jiffies_start;
591         unsigned long mask;
592         int ndetected;
593         struct rcu_data *rdp;
594         struct rcu_node *rnp;
595         struct rcu_node *rnp_root = rcu_get_root();
596         unsigned long flags;
597
598         trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
599         jiffies_stall = rcu_exp_jiffies_till_stall_check();
600         jiffies_start = jiffies;
601         if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
602                 if (synchronize_rcu_expedited_wait_once(1))
603                         return;
604                 rcu_for_each_leaf_node(rnp) {
605                         raw_spin_lock_irqsave_rcu_node(rnp, flags);
606                         mask = READ_ONCE(rnp->expmask);
607                         for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
608                                 rdp = per_cpu_ptr(&rcu_data, cpu);
609                                 if (rdp->rcu_forced_tick_exp)
610                                         continue;
611                                 rdp->rcu_forced_tick_exp = true;
612                                 if (cpu_online(cpu))
613                                         tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
614                         }
615                         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
616                 }
617                 j = READ_ONCE(jiffies_till_first_fqs);
618                 if (synchronize_rcu_expedited_wait_once(j + HZ))
619                         return;
620         }
621
622         for (;;) {
623                 if (synchronize_rcu_expedited_wait_once(jiffies_stall))
624                         return;
625                 if (rcu_stall_is_suppressed())
626                         continue;
627                 trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
628                 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
629                        rcu_state.name);
630                 ndetected = 0;
631                 rcu_for_each_leaf_node(rnp) {
632                         ndetected += rcu_print_task_exp_stall(rnp);
633                         for_each_leaf_node_possible_cpu(rnp, cpu) {
634                                 struct rcu_data *rdp;
635
636                                 mask = leaf_node_cpu_bit(rnp, cpu);
637                                 if (!(READ_ONCE(rnp->expmask) & mask))
638                                         continue;
639                                 ndetected++;
640                                 rdp = per_cpu_ptr(&rcu_data, cpu);
641                                 pr_cont(" %d-%c%c%c%c", cpu,
642                                         "O."[!!cpu_online(cpu)],
643                                         "o."[!!(rdp->grpmask & rnp->expmaskinit)],
644                                         "N."[!!(rdp->grpmask & rnp->expmaskinitnext)],
645                                         "D."[!!data_race(rdp->cpu_no_qs.b.exp)]);
646                         }
647                 }
648                 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
649                         jiffies - jiffies_start, rcu_state.expedited_sequence,
650                         data_race(rnp_root->expmask),
651                         ".T"[!!data_race(rnp_root->exp_tasks)]);
652                 if (ndetected) {
653                         pr_err("blocking rcu_node structures (internal RCU debug):");
654                         rcu_for_each_node_breadth_first(rnp) {
655                                 if (rnp == rnp_root)
656                                         continue; /* printed unconditionally */
657                                 if (sync_rcu_exp_done_unlocked(rnp))
658                                         continue;
659                                 pr_cont(" l=%u:%d-%d:%#lx/%c",
660                                         rnp->level, rnp->grplo, rnp->grphi,
661                                         data_race(rnp->expmask),
662                                         ".T"[!!data_race(rnp->exp_tasks)]);
663                         }
664                         pr_cont("\n");
665                 }
666                 rcu_for_each_leaf_node(rnp) {
667                         for_each_leaf_node_possible_cpu(rnp, cpu) {
668                                 mask = leaf_node_cpu_bit(rnp, cpu);
669                                 if (!(READ_ONCE(rnp->expmask) & mask))
670                                         continue;
671                                 preempt_disable(); // For smp_processor_id() in dump_cpu_task().
672                                 dump_cpu_task(cpu);
673                                 preempt_enable();
674                         }
675                         rcu_exp_print_detail_task_stall_rnp(rnp);
676                 }
677                 jiffies_stall = 3 * rcu_exp_jiffies_till_stall_check() + 3;
678                 panic_on_rcu_stall();
679         }
680 }
681
682 /*
683  * Wait for the current expedited grace period to complete, and then
684  * wake up everyone who piggybacked on the just-completed expedited
685  * grace period.  Also update all the ->exp_seq_rq counters as needed
686  * in order to avoid counter-wrap problems.
687  */
688 static void rcu_exp_wait_wake(unsigned long s)
689 {
690         struct rcu_node *rnp;
691
692         synchronize_rcu_expedited_wait();
693
694         // Switch over to wakeup mode, allowing the next GP to proceed.
695         // End the previous grace period only after acquiring the mutex
696         // to ensure that only one GP runs concurrently with wakeups.
697         mutex_lock(&rcu_state.exp_wake_mutex);
698         rcu_exp_gp_seq_end();
699         trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
700
701         rcu_for_each_node_breadth_first(rnp) {
702                 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
703                         spin_lock(&rnp->exp_lock);
704                         /* Recheck, avoid hang in case someone just arrived. */
705                         if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
706                                 WRITE_ONCE(rnp->exp_seq_rq, s);
707                         spin_unlock(&rnp->exp_lock);
708                 }
709                 smp_mb(); /* All above changes before wakeup. */
710                 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
711         }
712         trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
713         mutex_unlock(&rcu_state.exp_wake_mutex);
714 }
715
716 /*
717  * Common code to drive an expedited grace period forward, used by
718  * workqueues and mid-boot-time tasks.
719  */
720 static void rcu_exp_sel_wait_wake(unsigned long s)
721 {
722         /* Initialize the rcu_node tree in preparation for the wait. */
723         sync_rcu_exp_select_cpus();
724
725         /* Wait and clean up, including waking everyone. */
726         rcu_exp_wait_wake(s);
727 }
728
729 #ifdef CONFIG_PREEMPT_RCU
730
731 /*
732  * Remote handler for smp_call_function_single().  If there is an
733  * RCU read-side critical section in effect, request that the
734  * next rcu_read_unlock() record the quiescent state up the
735  * ->expmask fields in the rcu_node tree.  Otherwise, immediately
736  * report the quiescent state.
737  */
738 static void rcu_exp_handler(void *unused)
739 {
740         int depth = rcu_preempt_depth();
741         unsigned long flags;
742         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
743         struct rcu_node *rnp = rdp->mynode;
744         struct task_struct *t = current;
745
746         /*
747          * First, the common case of not being in an RCU read-side
748          * critical section.  If also enabled or idle, immediately
749          * report the quiescent state, otherwise defer.
750          */
751         if (!depth) {
752                 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
753                     rcu_is_cpu_rrupt_from_idle()) {
754                         rcu_report_exp_rdp(rdp);
755                 } else {
756                         WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
757                         set_tsk_need_resched(t);
758                         set_preempt_need_resched();
759                 }
760                 return;
761         }
762
763         /*
764          * Second, the less-common case of being in an RCU read-side
765          * critical section.  In this case we can count on a future
766          * rcu_read_unlock().  However, this rcu_read_unlock() might
767          * execute on some other CPU, but in that case there will be
768          * a future context switch.  Either way, if the expedited
769          * grace period is still waiting on this CPU, set ->deferred_qs
770          * so that the eventual quiescent state will be reported.
771          * Note that there is a large group of race conditions that
772          * can have caused this quiescent state to already have been
773          * reported, so we really do need to check ->expmask.
774          */
775         if (depth > 0) {
776                 raw_spin_lock_irqsave_rcu_node(rnp, flags);
777                 if (rnp->expmask & rdp->grpmask) {
778                         WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
779                         t->rcu_read_unlock_special.b.exp_hint = true;
780                 }
781                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
782                 return;
783         }
784
785         // Finally, negative nesting depth should not happen.
786         WARN_ON_ONCE(1);
787 }
788
789 /* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
790 static void sync_sched_exp_online_cleanup(int cpu)
791 {
792 }
793
794 /*
795  * Scan the current list of tasks blocked within RCU read-side critical
796  * sections, printing out the tid of each that is blocking the current
797  * expedited grace period.
798  */
799 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
800 {
801         unsigned long flags;
802         int ndetected = 0;
803         struct task_struct *t;
804
805         raw_spin_lock_irqsave_rcu_node(rnp, flags);
806         if (!rnp->exp_tasks) {
807                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
808                 return 0;
809         }
810         t = list_entry(rnp->exp_tasks->prev,
811                        struct task_struct, rcu_node_entry);
812         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
813                 pr_cont(" P%d", t->pid);
814                 ndetected++;
815         }
816         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
817         return ndetected;
818 }
819
820 /*
821  * Scan the current list of tasks blocked within RCU read-side critical
822  * sections, dumping the stack of each that is blocking the current
823  * expedited grace period.
824  */
825 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
826 {
827         unsigned long flags;
828         struct task_struct *t;
829
830         if (!rcu_exp_stall_task_details)
831                 return;
832         raw_spin_lock_irqsave_rcu_node(rnp, flags);
833         if (!READ_ONCE(rnp->exp_tasks)) {
834                 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
835                 return;
836         }
837         t = list_entry(rnp->exp_tasks->prev,
838                        struct task_struct, rcu_node_entry);
839         list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
840                 /*
841                  * We could be printing a lot while holding a spinlock.
842                  * Avoid triggering hard lockup.
843                  */
844                 touch_nmi_watchdog();
845                 sched_show_task(t);
846         }
847         raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
848 }
849
850 #else /* #ifdef CONFIG_PREEMPT_RCU */
851
852 /* Request an expedited quiescent state. */
853 static void rcu_exp_need_qs(void)
854 {
855         __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
856         /* Store .exp before .rcu_urgent_qs. */
857         smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
858         set_tsk_need_resched(current);
859         set_preempt_need_resched();
860 }
861
862 /* Invoked on each online non-idle CPU for expedited quiescent state. */
863 static void rcu_exp_handler(void *unused)
864 {
865         struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
866         struct rcu_node *rnp = rdp->mynode;
867         bool preempt_bh_enabled = !(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK));
868
869         if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
870             __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
871                 return;
872         if (rcu_is_cpu_rrupt_from_idle() ||
873             (IS_ENABLED(CONFIG_PREEMPT_COUNT) && preempt_bh_enabled)) {
874                 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
875                 return;
876         }
877         rcu_exp_need_qs();
878 }
879
880 /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
881 static void sync_sched_exp_online_cleanup(int cpu)
882 {
883         unsigned long flags;
884         int my_cpu;
885         struct rcu_data *rdp;
886         int ret;
887         struct rcu_node *rnp;
888
889         rdp = per_cpu_ptr(&rcu_data, cpu);
890         rnp = rdp->mynode;
891         my_cpu = get_cpu();
892         /* Quiescent state either not needed or already requested, leave. */
893         if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
894             READ_ONCE(rdp->cpu_no_qs.b.exp)) {
895                 put_cpu();
896                 return;
897         }
898         /* Quiescent state needed on current CPU, so set it up locally. */
899         if (my_cpu == cpu) {
900                 local_irq_save(flags);
901                 rcu_exp_need_qs();
902                 local_irq_restore(flags);
903                 put_cpu();
904                 return;
905         }
906         /* Quiescent state needed on some other CPU, send IPI. */
907         ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
908         put_cpu();
909         WARN_ON_ONCE(ret);
910 }
911
912 /*
913  * Because preemptible RCU does not exist, we never have to check for
914  * tasks blocked within RCU read-side critical sections that are
915  * blocking the current expedited grace period.
916  */
917 static int rcu_print_task_exp_stall(struct rcu_node *rnp)
918 {
919         return 0;
920 }
921
922 /*
923  * Because preemptible RCU does not exist, we never have to print out
924  * tasks blocked within RCU read-side critical sections that are blocking
925  * the current expedited grace period.
926  */
927 static void rcu_exp_print_detail_task_stall_rnp(struct rcu_node *rnp)
928 {
929 }
930
931 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
932
933 /**
934  * synchronize_rcu_expedited - Brute-force RCU grace period
935  *
936  * Wait for an RCU grace period, but expedite it.  The basic idea is to
937  * IPI all non-idle non-nohz online CPUs.  The IPI handler checks whether
938  * the CPU is in an RCU critical section, and if so, it sets a flag that
939  * causes the outermost rcu_read_unlock() to report the quiescent state
940  * for RCU-preempt or asks the scheduler for help for RCU-sched.  On the
941  * other hand, if the CPU is not in an RCU read-side critical section,
942  * the IPI handler reports the quiescent state immediately.
943  *
944  * Although this is a great improvement over previous expedited
945  * implementations, it is still unfriendly to real-time workloads, so is
946  * thus not recommended for any sort of common-case code.  In fact, if
947  * you are using synchronize_rcu_expedited() in a loop, please restructure
948  * your code to batch your updates, and then use a single synchronize_rcu()
949  * instead.
950  *
951  * This has the same semantics as (but is more brutal than) synchronize_rcu().
952  */
953 void synchronize_rcu_expedited(void)
954 {
955         bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
956         unsigned long flags;
957         struct rcu_exp_work rew;
958         struct rcu_node *rnp;
959         unsigned long s;
960
961         RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
962                          lock_is_held(&rcu_lock_map) ||
963                          lock_is_held(&rcu_sched_lock_map),
964                          "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
965
966         /* Is the state is such that the call is a grace period? */
967         if (rcu_blocking_is_gp()) {
968                 // Note well that this code runs with !PREEMPT && !SMP.
969                 // In addition, all code that advances grace periods runs
970                 // at process level.  Therefore, this expedited GP overlaps
971                 // with other expedited GPs only by being fully nested within
972                 // them, which allows reuse of ->gp_seq_polled_exp_snap.
973                 rcu_poll_gp_seq_start_unlocked(&rcu_state.gp_seq_polled_exp_snap);
974                 rcu_poll_gp_seq_end_unlocked(&rcu_state.gp_seq_polled_exp_snap);
975
976                 local_irq_save(flags);
977                 WARN_ON_ONCE(num_online_cpus() > 1);
978                 rcu_state.expedited_sequence += (1 << RCU_SEQ_CTR_SHIFT);
979                 local_irq_restore(flags);
980                 return;  // Context allows vacuous grace periods.
981         }
982
983         /* If expedited grace periods are prohibited, fall back to normal. */
984         if (rcu_gp_is_normal()) {
985                 wait_rcu_gp(call_rcu_hurry);
986                 return;
987         }
988
989         /* Take a snapshot of the sequence number.  */
990         s = rcu_exp_gp_seq_snap();
991         if (exp_funnel_lock(s))
992                 return;  /* Someone else did our work for us. */
993
994         /* Ensure that load happens before action based on it. */
995         if (unlikely(boottime)) {
996                 /* Direct call during scheduler init and early_initcalls(). */
997                 rcu_exp_sel_wait_wake(s);
998         } else {
999                 /* Marshall arguments & schedule the expedited grace period. */
1000                 rew.rew_s = s;
1001                 synchronize_rcu_expedited_queue_work(&rew);
1002         }
1003
1004         /* Wait for expedited grace period to complete. */
1005         rnp = rcu_get_root();
1006         wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
1007                    sync_exp_work_done(s));
1008         smp_mb(); /* Work actions happen before return. */
1009
1010         /* Let the next expedited grace period start. */
1011         mutex_unlock(&rcu_state.exp_mutex);
1012
1013         if (likely(!boottime))
1014                 synchronize_rcu_expedited_destroy_work(&rew);
1015 }
1016 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
1017
1018 /*
1019  * Ensure that start_poll_synchronize_rcu_expedited() has the expedited
1020  * RCU grace periods that it needs.
1021  */
1022 static void sync_rcu_do_polled_gp(struct work_struct *wp)
1023 {
1024         unsigned long flags;
1025         int i = 0;
1026         struct rcu_node *rnp = container_of(wp, struct rcu_node, exp_poll_wq);
1027         unsigned long s;
1028
1029         raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1030         s = rnp->exp_seq_poll_rq;
1031         rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
1032         raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1033         if (s == RCU_GET_STATE_COMPLETED)
1034                 return;
1035         while (!poll_state_synchronize_rcu(s)) {
1036                 synchronize_rcu_expedited();
1037                 if (i == 10 || i == 20)
1038                         pr_info("%s: i = %d s = %lx gp_seq_polled = %lx\n", __func__, i, s, READ_ONCE(rcu_state.gp_seq_polled));
1039                 i++;
1040         }
1041         raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1042         s = rnp->exp_seq_poll_rq;
1043         if (poll_state_synchronize_rcu(s))
1044                 rnp->exp_seq_poll_rq = RCU_GET_STATE_COMPLETED;
1045         raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1046 }
1047
1048 /**
1049  * start_poll_synchronize_rcu_expedited - Snapshot current RCU state and start expedited grace period
1050  *
1051  * Returns a cookie to pass to a call to cond_synchronize_rcu(),
1052  * cond_synchronize_rcu_expedited(), or poll_state_synchronize_rcu(),
1053  * allowing them to determine whether or not any sort of grace period has
1054  * elapsed in the meantime.  If the needed expedited grace period is not
1055  * already slated to start, initiates that grace period.
1056  */
1057 unsigned long start_poll_synchronize_rcu_expedited(void)
1058 {
1059         unsigned long flags;
1060         struct rcu_data *rdp;
1061         struct rcu_node *rnp;
1062         unsigned long s;
1063
1064         s = get_state_synchronize_rcu();
1065         rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
1066         rnp = rdp->mynode;
1067         if (rcu_init_invoked())
1068                 raw_spin_lock_irqsave(&rnp->exp_poll_lock, flags);
1069         if (!poll_state_synchronize_rcu(s)) {
1070                 if (rcu_init_invoked()) {
1071                         rnp->exp_seq_poll_rq = s;
1072                         queue_work(rcu_gp_wq, &rnp->exp_poll_wq);
1073                 }
1074         }
1075         if (rcu_init_invoked())
1076                 raw_spin_unlock_irqrestore(&rnp->exp_poll_lock, flags);
1077
1078         return s;
1079 }
1080 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited);
1081
1082 /**
1083  * start_poll_synchronize_rcu_expedited_full - Take a full snapshot and start expedited grace period
1084  * @rgosp: Place to put snapshot of grace-period state
1085  *
1086  * Places the normal and expedited grace-period states in rgosp.  This
1087  * state value can be passed to a later call to cond_synchronize_rcu_full()
1088  * or poll_state_synchronize_rcu_full() to determine whether or not a
1089  * grace period (whether normal or expedited) has elapsed in the meantime.
1090  * If the needed expedited grace period is not already slated to start,
1091  * initiates that grace period.
1092  */
1093 void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
1094 {
1095         get_state_synchronize_rcu_full(rgosp);
1096         (void)start_poll_synchronize_rcu_expedited();
1097 }
1098 EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu_expedited_full);
1099
1100 /**
1101  * cond_synchronize_rcu_expedited - Conditionally wait for an expedited RCU grace period
1102  *
1103  * @oldstate: value from get_state_synchronize_rcu(), start_poll_synchronize_rcu(), or start_poll_synchronize_rcu_expedited()
1104  *
1105  * If any type of full RCU grace period has elapsed since the earlier
1106  * call to get_state_synchronize_rcu(), start_poll_synchronize_rcu(),
1107  * or start_poll_synchronize_rcu_expedited(), just return.  Otherwise,
1108  * invoke synchronize_rcu_expedited() to wait for a full grace period.
1109  *
1110  * Yes, this function does not take counter wrap into account.
1111  * But counter wrap is harmless.  If the counter wraps, we have waited for
1112  * more than 2 billion grace periods (and way more on a 64-bit system!),
1113  * so waiting for a couple of additional grace periods should be just fine.
1114  *
1115  * This function provides the same memory-ordering guarantees that
1116  * would be provided by a synchronize_rcu() that was invoked at the call
1117  * to the function that provided @oldstate and that returned at the end
1118  * of this function.
1119  */
1120 void cond_synchronize_rcu_expedited(unsigned long oldstate)
1121 {
1122         if (!poll_state_synchronize_rcu(oldstate))
1123                 synchronize_rcu_expedited();
1124 }
1125 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited);
1126
1127 /**
1128  * cond_synchronize_rcu_expedited_full - Conditionally wait for an expedited RCU grace period
1129  * @rgosp: value from get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), or start_poll_synchronize_rcu_expedited_full()
1130  *
1131  * If a full RCU grace period has elapsed since the call to
1132  * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(),
1133  * or start_poll_synchronize_rcu_expedited_full() from which @rgosp was
1134  * obtained, just return.  Otherwise, invoke synchronize_rcu_expedited()
1135  * to wait for a full grace period.
1136  *
1137  * Yes, this function does not take counter wrap into account.
1138  * But counter wrap is harmless.  If the counter wraps, we have waited for
1139  * more than 2 billion grace periods (and way more on a 64-bit system!),
1140  * so waiting for a couple of additional grace periods should be just fine.
1141  *
1142  * This function provides the same memory-ordering guarantees that
1143  * would be provided by a synchronize_rcu() that was invoked at the call
1144  * to the function that provided @rgosp and that returned at the end of
1145  * this function.
1146  */
1147 void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
1148 {
1149         if (!poll_state_synchronize_rcu_full(rgosp))
1150                 synchronize_rcu_expedited();
1151 }
1152 EXPORT_SYMBOL_GPL(cond_synchronize_rcu_expedited_full);