1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Task-based RCU implementations.
5 * Copyright (C) 2020 Paul E. McKenney
8 #ifdef CONFIG_TASKS_RCU_GENERIC
10 ////////////////////////////////////////////////////////////////////////
12 // Generic data structures.
15 typedef void (*rcu_tasks_gp_func_t)(struct rcu_tasks *rtp);
16 typedef void (*pregp_func_t)(void);
17 typedef void (*pertask_func_t)(struct task_struct *t, struct list_head *hop);
18 typedef void (*postscan_func_t)(struct list_head *hop);
19 typedef void (*holdouts_func_t)(struct list_head *hop, bool ndrpt, bool *frptp);
20 typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
23 * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism.
24 * @cbs_head: Head of callback list.
25 * @cbs_tail: Tail pointer for callback list.
26 * @cbs_wq: Wait queue allowing new callback to get kthread's attention.
27 * @cbs_lock: Lock protecting callback list.
28 * @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
29 * @gp_func: This flavor's grace-period-wait function.
30 * @gp_state: Grace period's most recent state transition (debugging).
31 * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
32 * @init_fract: Initial backoff sleep interval.
33 * @gp_jiffies: Time of last @gp_state transition.
34 * @gp_start: Most recent grace-period start in jiffies.
35 * @n_gps: Number of grace periods completed since boot.
36 * @n_ipis: Number of IPIs sent to encourage grace periods to end.
37 * @n_ipis_fails: Number of IPI-send failures.
38 * @pregp_func: This flavor's pre-grace-period function (optional).
39 * @pertask_func: This flavor's per-task scan function (optional).
40 * @postscan_func: This flavor's post-task scan function (optional).
41 * @holdouts_func: This flavor's holdout-list scan function (optional).
42 * @postgp_func: This flavor's post-grace-period function (optional).
43 * @call_func: This flavor's call_rcu()-equivalent function.
44 * @name: This flavor's textual name.
45 * @kname: This flavor's kthread name.
48 struct rcu_head *cbs_head;
49 struct rcu_head **cbs_tail;
50 struct wait_queue_head cbs_wq;
51 raw_spinlock_t cbs_lock;
55 unsigned long gp_jiffies;
56 unsigned long gp_start;
59 unsigned long n_ipis_fails;
60 struct task_struct *kthread_ptr;
61 rcu_tasks_gp_func_t gp_func;
62 pregp_func_t pregp_func;
63 pertask_func_t pertask_func;
64 postscan_func_t postscan_func;
65 holdouts_func_t holdouts_func;
66 postgp_func_t postgp_func;
67 call_rcu_func_t call_func;
72 #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \
73 static struct rcu_tasks rt_name = \
75 .cbs_tail = &rt_name.cbs_head, \
76 .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \
77 .cbs_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_lock), \
84 /* Track exiting tasks in order to allow them to be waited for. */
85 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
87 /* Avoid IPIing CPUs early in the grace period. */
88 #define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
89 static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
90 module_param(rcu_task_ipi_delay, int, 0644);
92 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
93 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
94 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
95 module_param(rcu_task_stall_timeout, int, 0644);
97 /* RCU tasks grace-period state for debugging. */
99 #define RTGS_WAIT_WAIT_CBS 1
100 #define RTGS_WAIT_GP 2
101 #define RTGS_PRE_WAIT_GP 3
102 #define RTGS_SCAN_TASKLIST 4
103 #define RTGS_POST_SCAN_TASKLIST 5
104 #define RTGS_WAIT_SCAN_HOLDOUTS 6
105 #define RTGS_SCAN_HOLDOUTS 7
106 #define RTGS_POST_GP 8
107 #define RTGS_WAIT_READERS 9
108 #define RTGS_INVOKE_CBS 10
109 #define RTGS_WAIT_CBS 11
110 #ifndef CONFIG_TINY_RCU
111 static const char * const rcu_tasks_gp_state_names[] = {
113 "RTGS_WAIT_WAIT_CBS",
116 "RTGS_SCAN_TASKLIST",
117 "RTGS_POST_SCAN_TASKLIST",
118 "RTGS_WAIT_SCAN_HOLDOUTS",
119 "RTGS_SCAN_HOLDOUTS",
125 #endif /* #ifndef CONFIG_TINY_RCU */
127 ////////////////////////////////////////////////////////////////////////
131 /* Record grace-period phase and time. */
132 static void set_tasks_gp_state(struct rcu_tasks *rtp, int newstate)
134 rtp->gp_state = newstate;
135 rtp->gp_jiffies = jiffies;
138 #ifndef CONFIG_TINY_RCU
139 /* Return state name. */
140 static const char *tasks_gp_state_getname(struct rcu_tasks *rtp)
142 int i = data_race(rtp->gp_state); // Let KCSAN detect update races
143 int j = READ_ONCE(i); // Prevent the compiler from reading twice
145 if (j >= ARRAY_SIZE(rcu_tasks_gp_state_names))
147 return rcu_tasks_gp_state_names[j];
149 #endif /* #ifndef CONFIG_TINY_RCU */
151 // Enqueue a callback for the specified flavor of Tasks RCU.
152 static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
153 struct rcu_tasks *rtp)
160 raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
161 needwake = !rtp->cbs_head;
162 WRITE_ONCE(*rtp->cbs_tail, rhp);
163 rtp->cbs_tail = &rhp->next;
164 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
165 /* We can't create the thread unless interrupts are enabled. */
166 if (needwake && READ_ONCE(rtp->kthread_ptr))
167 wake_up(&rtp->cbs_wq);
170 // Wait for a grace period for the specified flavor of Tasks RCU.
171 static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp)
173 /* Complain if the scheduler has not started. */
174 WARN_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
175 "synchronize_rcu_tasks called too soon");
177 /* Wait for the grace period. */
178 wait_rcu_gp(rtp->call_func);
181 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
182 static int __noreturn rcu_tasks_kthread(void *arg)
185 struct rcu_head *list;
186 struct rcu_head *next;
187 struct rcu_tasks *rtp = arg;
189 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
190 housekeeping_affine(current, HK_FLAG_RCU);
191 WRITE_ONCE(rtp->kthread_ptr, current); // Let GPs start!
194 * Each pass through the following loop makes one check for
195 * newly arrived callbacks, and, if there are some, waits for
196 * one RCU-tasks grace period and then invokes the callbacks.
197 * This loop is terminated by the system going down. ;-)
200 set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
202 /* Pick up any new callbacks. */
203 raw_spin_lock_irqsave(&rtp->cbs_lock, flags);
204 smp_mb__after_spinlock(); // Order updates vs. GP.
205 list = rtp->cbs_head;
206 rtp->cbs_head = NULL;
207 rtp->cbs_tail = &rtp->cbs_head;
208 raw_spin_unlock_irqrestore(&rtp->cbs_lock, flags);
210 /* If there were none, wait a bit and start over. */
212 wait_event_interruptible(rtp->cbs_wq,
213 READ_ONCE(rtp->cbs_head));
214 if (!rtp->cbs_head) {
215 WARN_ON(signal_pending(current));
216 set_tasks_gp_state(rtp, RTGS_WAIT_WAIT_CBS);
217 schedule_timeout_idle(HZ/10);
222 // Wait for one grace period.
223 set_tasks_gp_state(rtp, RTGS_WAIT_GP);
224 rtp->gp_start = jiffies;
228 /* Invoke the callbacks. */
229 set_tasks_gp_state(rtp, RTGS_INVOKE_CBS);
238 /* Paranoid sleep to keep this from entering a tight loop */
239 schedule_timeout_idle(rtp->gp_sleep);
243 /* Spawn RCU-tasks grace-period kthread. */
244 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
246 struct task_struct *t;
248 t = kthread_run(rcu_tasks_kthread, rtp, "%s_kthread", rtp->kname);
249 if (WARN_ONCE(IS_ERR(t), "%s: Could not start %s grace-period kthread, OOM is now expected behavior\n", __func__, rtp->name))
251 smp_mb(); /* Ensure others see full kthread. */
254 #ifndef CONFIG_TINY_RCU
257 * Print any non-default Tasks RCU settings.
259 static void __init rcu_tasks_bootup_oddness(void)
261 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
262 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
263 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
264 #endif /* #ifdef CONFIG_TASKS_RCU */
265 #ifdef CONFIG_TASKS_RCU
266 pr_info("\tTrampoline variant of Tasks RCU enabled.\n");
267 #endif /* #ifdef CONFIG_TASKS_RCU */
268 #ifdef CONFIG_TASKS_RUDE_RCU
269 pr_info("\tRude variant of Tasks RCU enabled.\n");
270 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
271 #ifdef CONFIG_TASKS_TRACE_RCU
272 pr_info("\tTracing variant of Tasks RCU enabled.\n");
273 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
276 #endif /* #ifndef CONFIG_TINY_RCU */
278 #ifndef CONFIG_TINY_RCU
279 /* Dump out rcutorture-relevant state common to all RCU-tasks flavors. */
280 static void show_rcu_tasks_generic_gp_kthread(struct rcu_tasks *rtp, char *s)
282 pr_info("%s: %s(%d) since %lu g:%lu i:%lu/%lu %c%c %s\n",
284 tasks_gp_state_getname(rtp), data_race(rtp->gp_state),
285 jiffies - data_race(rtp->gp_jiffies),
286 data_race(rtp->n_gps),
287 data_race(rtp->n_ipis_fails), data_race(rtp->n_ipis),
288 ".k"[!!data_race(rtp->kthread_ptr)],
289 ".C"[!!data_race(rtp->cbs_head)],
292 #endif // #ifndef CONFIG_TINY_RCU
294 static void exit_tasks_rcu_finish_trace(struct task_struct *t);
296 #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU)
298 ////////////////////////////////////////////////////////////////////////
300 // Shared code between task-list-scanning variants of Tasks RCU.
302 /* Wait for one RCU-tasks grace period. */
303 static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
305 struct task_struct *g, *t;
306 unsigned long lastreport;
310 set_tasks_gp_state(rtp, RTGS_PRE_WAIT_GP);
314 * There were callbacks, so we need to wait for an RCU-tasks
315 * grace period. Start off by scanning the task list for tasks
316 * that are not already voluntarily blocked. Mark these tasks
317 * and make a list of them in holdouts.
319 set_tasks_gp_state(rtp, RTGS_SCAN_TASKLIST);
321 for_each_process_thread(g, t)
322 rtp->pertask_func(t, &holdouts);
325 set_tasks_gp_state(rtp, RTGS_POST_SCAN_TASKLIST);
326 rtp->postscan_func(&holdouts);
329 * Each pass through the following loop scans the list of holdout
330 * tasks, removing any that are no longer holdouts. When the list
331 * is empty, we are done.
333 lastreport = jiffies;
335 // Start off with initial wait and slowly back off to 1 HZ wait.
336 fract = rtp->init_fract;
338 while (!list_empty(&holdouts)) {
343 /* Slowly back off waiting for holdouts */
344 set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS);
345 schedule_timeout_idle(fract);
350 rtst = READ_ONCE(rcu_task_stall_timeout);
351 needreport = rtst > 0 && time_after(jiffies, lastreport + rtst);
353 lastreport = jiffies;
355 WARN_ON(signal_pending(current));
356 set_tasks_gp_state(rtp, RTGS_SCAN_HOLDOUTS);
357 rtp->holdouts_func(&holdouts, needreport, &firstreport);
360 set_tasks_gp_state(rtp, RTGS_POST_GP);
361 rtp->postgp_func(rtp);
364 #endif /* #if defined(CONFIG_TASKS_RCU) || defined(CONFIG_TASKS_TRACE_RCU) */
366 #ifdef CONFIG_TASKS_RCU
368 ////////////////////////////////////////////////////////////////////////
370 // Simple variant of RCU whose quiescent states are voluntary context
371 // switch, cond_resched_rcu_qs(), user-space execution, and idle.
372 // As such, grace periods can take one good long time. There are no
373 // read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
374 // because this implementation is intended to get the system into a safe
375 // state for some of the manipulations involved in tracing and the like.
376 // Finally, this implementation does not support high call_rcu_tasks()
377 // rates from multiple CPUs. If this is required, per-CPU callback lists
380 // The implementation uses rcu_tasks_wait_gp(), which relies on function
381 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_kthread()
382 // function sets these function pointers up so that rcu_tasks_wait_gp()
383 // invokes these functions in this order:
385 // rcu_tasks_pregp_step():
386 // Invokes synchronize_rcu() in order to wait for all in-flight
387 // t->on_rq and t->nvcsw transitions to complete. This works because
388 // all such transitions are carried out with interrupts disabled.
389 // rcu_tasks_pertask(), invoked on every non-idle task:
390 // For every runnable non-idle task other than the current one, use
391 // get_task_struct() to pin down that task, snapshot that task's
392 // number of voluntary context switches, and add that task to the
394 // rcu_tasks_postscan():
395 // Invoke synchronize_srcu() to ensure that all tasks that were
396 // in the process of exiting (and which thus might not know to
397 // synchronize with this RCU Tasks grace period) have completed
399 // check_all_holdout_tasks(), repeatedly until holdout list is empty:
400 // Scans the holdout list, attempting to identify a quiescent state
401 // for each task on the list. If there is a quiescent state, the
402 // corresponding task is removed from the holdout list.
403 // rcu_tasks_postgp():
404 // Invokes synchronize_rcu() in order to ensure that all prior
405 // t->on_rq and t->nvcsw transitions are seen by all CPUs and tasks
406 // to have happened before the end of this RCU Tasks grace period.
407 // Again, this works because all such transitions are carried out
408 // with interrupts disabled.
410 // For each exiting task, the exit_tasks_rcu_start() and
411 // exit_tasks_rcu_finish() functions begin and end, respectively, the SRCU
412 // read-side critical sections waited for by rcu_tasks_postscan().
414 // Pre-grace-period update-side code is ordered before the grace via the
415 // ->cbs_lock and the smp_mb__after_spinlock(). Pre-grace-period read-side
416 // code is ordered before the grace period via synchronize_rcu() call
417 // in rcu_tasks_pregp_step() and by the scheduler's locks and interrupt
420 /* Pre-grace-period preparation. */
421 static void rcu_tasks_pregp_step(void)
424 * Wait for all pre-existing t->on_rq and t->nvcsw transitions
425 * to complete. Invoking synchronize_rcu() suffices because all
426 * these transitions occur with interrupts disabled. Without this
427 * synchronize_rcu(), a read-side critical section that started
428 * before the grace period might be incorrectly seen as having
429 * started after the grace period.
431 * This synchronize_rcu() also dispenses with the need for a
432 * memory barrier on the first store to t->rcu_tasks_holdout,
433 * as it forces the store to happen after the beginning of the
439 /* Per-task initial processing. */
440 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
442 if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
444 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
445 WRITE_ONCE(t->rcu_tasks_holdout, true);
446 list_add(&t->rcu_tasks_holdout_list, hop);
450 /* Processing between scanning taskslist and draining the holdout list. */
451 static void rcu_tasks_postscan(struct list_head *hop)
454 * Wait for tasks that are in the process of exiting. This
455 * does only part of the job, ensuring that all tasks that were
456 * previously exiting reach the point where they have disabled
457 * preemption, allowing the later synchronize_rcu() to finish
460 synchronize_srcu(&tasks_rcu_exit_srcu);
463 /* See if tasks are still holding out, complain if so. */
464 static void check_holdout_task(struct task_struct *t,
465 bool needreport, bool *firstreport)
469 if (!READ_ONCE(t->rcu_tasks_holdout) ||
470 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
471 !READ_ONCE(t->on_rq) ||
472 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
473 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
474 WRITE_ONCE(t->rcu_tasks_holdout, false);
475 list_del_init(&t->rcu_tasks_holdout_list);
479 rcu_request_urgent_qs_task(t);
483 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
484 *firstreport = false;
487 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
488 t, ".I"[is_idle_task(t)],
489 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
490 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
491 t->rcu_tasks_idle_cpu, cpu);
495 /* Scan the holdout lists for tasks no longer holding out. */
496 static void check_all_holdout_tasks(struct list_head *hop,
497 bool needreport, bool *firstreport)
499 struct task_struct *t, *t1;
501 list_for_each_entry_safe(t, t1, hop, rcu_tasks_holdout_list) {
502 check_holdout_task(t, needreport, firstreport);
507 /* Finish off the Tasks-RCU grace period. */
508 static void rcu_tasks_postgp(struct rcu_tasks *rtp)
511 * Because ->on_rq and ->nvcsw are not guaranteed to have a full
512 * memory barriers prior to them in the schedule() path, memory
513 * reordering on other CPUs could cause their RCU-tasks read-side
514 * critical sections to extend past the end of the grace period.
515 * However, because these ->nvcsw updates are carried out with
516 * interrupts disabled, we can use synchronize_rcu() to force the
517 * needed ordering on all such CPUs.
519 * This synchronize_rcu() also confines all ->rcu_tasks_holdout
520 * accesses to be within the grace period, avoiding the need for
521 * memory barriers for ->rcu_tasks_holdout accesses.
523 * In addition, this synchronize_rcu() waits for exiting tasks
524 * to complete their final preempt_disable() region of execution,
525 * cleaning up after the synchronize_srcu() above.
530 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func);
531 DEFINE_RCU_TASKS(rcu_tasks, rcu_tasks_wait_gp, call_rcu_tasks, "RCU Tasks");
534 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
535 * @rhp: structure to be used for queueing the RCU updates.
536 * @func: actual callback function to be invoked after the grace period
538 * The callback function will be invoked some time after a full grace
539 * period elapses, in other words after all currently executing RCU
540 * read-side critical sections have completed. call_rcu_tasks() assumes
541 * that the read-side critical sections end at a voluntary context
542 * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
543 * or transition to usermode execution. As such, there are no read-side
544 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
545 * this primitive is intended to determine that all tasks have passed
546 * through a safe state, not so much for data-structure synchronization.
548 * See the description of call_rcu() for more detailed information on
549 * memory ordering guarantees.
551 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
553 call_rcu_tasks_generic(rhp, func, &rcu_tasks);
555 EXPORT_SYMBOL_GPL(call_rcu_tasks);
558 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
560 * Control will return to the caller some time after a full rcu-tasks
561 * grace period has elapsed, in other words after all currently
562 * executing rcu-tasks read-side critical sections have elapsed. These
563 * read-side critical sections are delimited by calls to schedule(),
564 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
565 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
567 * This is a very specialized primitive, intended only for a few uses in
568 * tracing and other situations requiring manipulation of function
569 * preambles and profiling hooks. The synchronize_rcu_tasks() function
570 * is not (yet) intended for heavy use from multiple CPUs.
572 * See the description of synchronize_rcu() for more detailed information
573 * on memory ordering guarantees.
575 void synchronize_rcu_tasks(void)
577 synchronize_rcu_tasks_generic(&rcu_tasks);
579 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
582 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
584 * Although the current implementation is guaranteed to wait, it is not
585 * obligated to, for example, if there are no pending callbacks.
587 void rcu_barrier_tasks(void)
589 /* There is only one callback queue, so this is easy. ;-) */
590 synchronize_rcu_tasks();
592 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
594 static int __init rcu_spawn_tasks_kthread(void)
596 rcu_tasks.gp_sleep = HZ / 10;
597 rcu_tasks.init_fract = HZ / 10;
598 rcu_tasks.pregp_func = rcu_tasks_pregp_step;
599 rcu_tasks.pertask_func = rcu_tasks_pertask;
600 rcu_tasks.postscan_func = rcu_tasks_postscan;
601 rcu_tasks.holdouts_func = check_all_holdout_tasks;
602 rcu_tasks.postgp_func = rcu_tasks_postgp;
603 rcu_spawn_tasks_kthread_generic(&rcu_tasks);
607 #if !defined(CONFIG_TINY_RCU)
608 void show_rcu_tasks_classic_gp_kthread(void)
610 show_rcu_tasks_generic_gp_kthread(&rcu_tasks, "");
612 EXPORT_SYMBOL_GPL(show_rcu_tasks_classic_gp_kthread);
613 #endif // !defined(CONFIG_TINY_RCU)
615 /* Do the srcu_read_lock() for the above synchronize_srcu(). */
616 void exit_tasks_rcu_start(void) __acquires(&tasks_rcu_exit_srcu)
619 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
623 /* Do the srcu_read_unlock() for the above synchronize_srcu(). */
624 void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu)
626 struct task_struct *t = current;
629 __srcu_read_unlock(&tasks_rcu_exit_srcu, t->rcu_tasks_idx);
631 exit_tasks_rcu_finish_trace(t);
634 #else /* #ifdef CONFIG_TASKS_RCU */
635 void exit_tasks_rcu_start(void) { }
636 void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); }
637 #endif /* #else #ifdef CONFIG_TASKS_RCU */
639 #ifdef CONFIG_TASKS_RUDE_RCU
641 ////////////////////////////////////////////////////////////////////////
643 // "Rude" variant of Tasks RCU, inspired by Steve Rostedt's trick of
644 // passing an empty function to schedule_on_each_cpu(). This approach
645 // provides an asynchronous call_rcu_tasks_rude() API and batching of
646 // concurrent calls to the synchronous synchronize_rcu_tasks_rude() API.
647 // This invokes schedule_on_each_cpu() in order to send IPIs far and wide
648 // and induces otherwise unnecessary context switches on all online CPUs,
649 // whether idle or not.
651 // Callback handling is provided by the rcu_tasks_kthread() function.
653 // Ordering is provided by the scheduler's context-switch code.
655 // Empty function to allow workqueues to force a context switch.
656 static void rcu_tasks_be_rude(struct work_struct *work)
660 // Wait for one rude RCU-tasks grace period.
661 static void rcu_tasks_rude_wait_gp(struct rcu_tasks *rtp)
663 if (num_online_cpus() <= 1)
664 return; // Fastpath for only one CPU.
666 rtp->n_ipis += cpumask_weight(cpu_online_mask);
667 schedule_on_each_cpu(rcu_tasks_be_rude);
670 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func);
671 DEFINE_RCU_TASKS(rcu_tasks_rude, rcu_tasks_rude_wait_gp, call_rcu_tasks_rude,
675 * call_rcu_tasks_rude() - Queue a callback rude task-based grace period
676 * @rhp: structure to be used for queueing the RCU updates.
677 * @func: actual callback function to be invoked after the grace period
679 * The callback function will be invoked some time after a full grace
680 * period elapses, in other words after all currently executing RCU
681 * read-side critical sections have completed. call_rcu_tasks_rude()
682 * assumes that the read-side critical sections end at context switch,
683 * cond_resched_rcu_qs(), or transition to usermode execution. As such,
684 * there are no read-side primitives analogous to rcu_read_lock() and
685 * rcu_read_unlock() because this primitive is intended to determine
686 * that all tasks have passed through a safe state, not so much for
687 * data-structure synchronization.
689 * See the description of call_rcu() for more detailed information on
690 * memory ordering guarantees.
692 void call_rcu_tasks_rude(struct rcu_head *rhp, rcu_callback_t func)
694 call_rcu_tasks_generic(rhp, func, &rcu_tasks_rude);
696 EXPORT_SYMBOL_GPL(call_rcu_tasks_rude);
699 * synchronize_rcu_tasks_rude - wait for a rude rcu-tasks grace period
701 * Control will return to the caller some time after a rude rcu-tasks
702 * grace period has elapsed, in other words after all currently
703 * executing rcu-tasks read-side critical sections have elapsed. These
704 * read-side critical sections are delimited by calls to schedule(),
705 * cond_resched_tasks_rcu_qs(), userspace execution, and (in theory,
706 * anyway) cond_resched().
708 * This is a very specialized primitive, intended only for a few uses in
709 * tracing and other situations requiring manipulation of function preambles
710 * and profiling hooks. The synchronize_rcu_tasks_rude() function is not
711 * (yet) intended for heavy use from multiple CPUs.
713 * See the description of synchronize_rcu() for more detailed information
714 * on memory ordering guarantees.
716 void synchronize_rcu_tasks_rude(void)
718 synchronize_rcu_tasks_generic(&rcu_tasks_rude);
720 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_rude);
723 * rcu_barrier_tasks_rude - Wait for in-flight call_rcu_tasks_rude() callbacks.
725 * Although the current implementation is guaranteed to wait, it is not
726 * obligated to, for example, if there are no pending callbacks.
728 void rcu_barrier_tasks_rude(void)
730 /* There is only one callback queue, so this is easy. ;-) */
731 synchronize_rcu_tasks_rude();
733 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
735 static int __init rcu_spawn_tasks_rude_kthread(void)
737 rcu_tasks_rude.gp_sleep = HZ / 10;
738 rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
742 #if !defined(CONFIG_TINY_RCU)
743 void show_rcu_tasks_rude_gp_kthread(void)
745 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_rude, "");
747 EXPORT_SYMBOL_GPL(show_rcu_tasks_rude_gp_kthread);
748 #endif // !defined(CONFIG_TINY_RCU)
749 #endif /* #ifdef CONFIG_TASKS_RUDE_RCU */
751 ////////////////////////////////////////////////////////////////////////
753 // Tracing variant of Tasks RCU. This variant is designed to be used
754 // to protect tracing hooks, including those of BPF. This variant
757 // 1. Has explicit read-side markers to allow finite grace periods
758 // in the face of in-kernel loops for PREEMPT=n builds.
760 // 2. Protects code in the idle loop, exception entry/exit, and
761 // CPU-hotplug code paths, similar to the capabilities of SRCU.
763 // 3. Avoids expensive read-side instruction, having overhead similar
764 // to that of Preemptible RCU.
766 // There are of course downsides. The grace-period code can send IPIs to
767 // CPUs, even when those CPUs are in the idle loop or in nohz_full userspace.
768 // It is necessary to scan the full tasklist, much as for Tasks RCU. There
769 // is a single callback queue guarded by a single lock, again, much as for
770 // Tasks RCU. If needed, these downsides can be at least partially remedied.
772 // Perhaps most important, this variant of RCU does not affect the vanilla
773 // flavors, rcu_preempt and rcu_sched. The fact that RCU Tasks Trace
774 // readers can operate from idle, offline, and exception entry/exit in no
775 // way allows rcu_preempt and rcu_sched readers to also do so.
777 // The implementation uses rcu_tasks_wait_gp(), which relies on function
778 // pointers in the rcu_tasks structure. The rcu_spawn_tasks_trace_kthread()
779 // function sets these function pointers up so that rcu_tasks_wait_gp()
780 // invokes these functions in this order:
782 // rcu_tasks_trace_pregp_step():
783 // Initialize the count of readers and block CPU-hotplug operations.
784 // rcu_tasks_trace_pertask(), invoked on every non-idle task:
785 // Initialize per-task state and attempt to identify an immediate
786 // quiescent state for that task, or, failing that, attempt to
787 // set that task's .need_qs flag so that task's next outermost
788 // rcu_read_unlock_trace() will report the quiescent state (in which
789 // case the count of readers is incremented). If both attempts fail,
790 // the task is added to a "holdout" list. Note that IPIs are used
791 // to invoke trc_read_check_handler() in the context of running tasks
792 // in order to avoid ordering overhead on common-case shared-variable
794 // rcu_tasks_trace_postscan():
795 // Initialize state and attempt to identify an immediate quiescent
796 // state as above (but only for idle tasks), unblock CPU-hotplug
797 // operations, and wait for an RCU grace period to avoid races with
798 // tasks that are in the process of exiting.
799 // check_all_holdout_tasks_trace(), repeatedly until holdout list is empty:
800 // Scans the holdout list, attempting to identify a quiescent state
801 // for each task on the list. If there is a quiescent state, the
802 // corresponding task is removed from the holdout list.
803 // rcu_tasks_trace_postgp():
804 // Wait for the count of readers do drop to zero, reporting any stalls.
805 // Also execute full memory barriers to maintain ordering with code
806 // executing after the grace period.
808 // The exit_tasks_rcu_finish_trace() synchronizes with exiting tasks.
810 // Pre-grace-period update-side code is ordered before the grace
811 // period via the ->cbs_lock and barriers in rcu_tasks_kthread().
812 // Pre-grace-period read-side code is ordered before the grace period by
813 // atomic_dec_and_test() of the count of readers (for IPIed readers) and by
814 // scheduler context-switch ordering (for locked-down non-running readers).
816 // The lockdep state must be outside of #ifdef to be useful.
817 #ifdef CONFIG_DEBUG_LOCK_ALLOC
818 static struct lock_class_key rcu_lock_trace_key;
819 struct lockdep_map rcu_trace_lock_map =
820 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_trace", &rcu_lock_trace_key);
821 EXPORT_SYMBOL_GPL(rcu_trace_lock_map);
822 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
824 #ifdef CONFIG_TASKS_TRACE_RCU
826 static atomic_t trc_n_readers_need_end; // Number of waited-for readers.
827 static DECLARE_WAIT_QUEUE_HEAD(trc_wait); // List of holdout tasks.
829 // Record outstanding IPIs to each CPU. No point in sending two...
830 static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
832 // The number of detections of task quiescent state relying on
833 // heavyweight readers executing explicit memory barriers.
834 static unsigned long n_heavy_reader_attempts;
835 static unsigned long n_heavy_reader_updates;
836 static unsigned long n_heavy_reader_ofl_updates;
838 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
839 DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
843 * This irq_work handler allows rcu_read_unlock_trace() to be invoked
844 * while the scheduler locks are held.
846 static void rcu_read_unlock_iw(struct irq_work *iwp)
850 static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
852 /* If we are the last reader, wake up the grace-period kthread. */
853 void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
855 int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
857 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
858 t->trc_reader_special.b.need_mb)
859 smp_mb(); // Pairs with update-side barriers.
860 // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
862 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
863 WRITE_ONCE(t->trc_reader_nesting, nesting);
864 if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
865 irq_work_queue(&rcu_tasks_trace_iw);
867 EXPORT_SYMBOL_GPL(rcu_read_unlock_trace_special);
869 /* Add a task to the holdout list, if it is not already on the list. */
870 static void trc_add_holdout(struct task_struct *t, struct list_head *bhp)
872 if (list_empty(&t->trc_holdout_list)) {
874 list_add(&t->trc_holdout_list, bhp);
878 /* Remove a task from the holdout list, if it is in fact present. */
879 static void trc_del_holdout(struct task_struct *t)
881 if (!list_empty(&t->trc_holdout_list)) {
882 list_del_init(&t->trc_holdout_list);
887 /* IPI handler to check task state. */
888 static void trc_read_check_handler(void *t_in)
890 struct task_struct *t = current;
891 struct task_struct *texp = t_in;
893 // If the task is no longer running on this CPU, leave.
894 if (unlikely(texp != t)) {
895 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
897 goto reset_ipi; // Already on holdout list, so will check later.
900 // If the task is not in a read-side critical section, and
901 // if this is the last reader, awaken the grace-period kthread.
902 if (likely(!READ_ONCE(t->trc_reader_nesting))) {
903 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
905 // Mark as checked after decrement to avoid false
906 // positives on the above WARN_ON_ONCE().
907 WRITE_ONCE(t->trc_reader_checked, true);
910 // If we are racing with an rcu_read_unlock_trace(), try again later.
911 if (unlikely(READ_ONCE(t->trc_reader_nesting) < 0)) {
912 if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
916 WRITE_ONCE(t->trc_reader_checked, true);
918 // Get here if the task is in a read-side critical section. Set
919 // its state so that it will awaken the grace-period kthread upon
920 // exit from that critical section.
921 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
922 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
925 // Allow future IPIs to be sent on CPU and for task.
926 // Also order this IPI handler against any later manipulations of
927 // the intended task.
928 smp_store_release(per_cpu_ptr(&trc_ipi_to_cpu, smp_processor_id()), false); // ^^^
929 smp_store_release(&texp->trc_ipi_to_cpu, -1); // ^^^
932 /* Callback function for scheduler to check locked-down task. */
933 static bool trc_inspect_reader(struct task_struct *t, void *arg)
935 int cpu = task_cpu(t);
937 bool ofl = cpu_is_offline(cpu);
940 WARN_ON_ONCE(ofl && !is_idle_task(t));
942 // If no chance of heavyweight readers, do it the hard way.
943 if (!ofl && !IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB))
946 // If heavyweight readers are enabled on the remote task,
947 // we can inspect its state despite its currently running.
948 // However, we cannot safely change its state.
949 n_heavy_reader_attempts++;
950 if (!ofl && // Check for "running" idle tasks on offline CPUs.
951 !rcu_dynticks_zero_in_eqs(cpu, &t->trc_reader_nesting))
952 return false; // No quiescent state, do it the hard way.
953 n_heavy_reader_updates++;
955 n_heavy_reader_ofl_updates++;
958 // The task is not running, so C-language access is safe.
959 in_qs = likely(!t->trc_reader_nesting);
962 // Mark as checked so that the grace-period kthread will
963 // remove it from the holdout list.
964 t->trc_reader_checked = true;
967 return true; // Already in quiescent state, done!!!
969 // The task is in a read-side critical section, so set up its
970 // state so that it will awaken the grace-period kthread upon exit
971 // from that critical section.
972 atomic_inc(&trc_n_readers_need_end); // One more to wait on.
973 WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs));
974 WRITE_ONCE(t->trc_reader_special.b.need_qs, true);
978 /* Attempt to extract the state for the specified task. */
979 static void trc_wait_for_one_reader(struct task_struct *t,
980 struct list_head *bhp)
984 // If a previous IPI is still in flight, let it complete.
985 if (smp_load_acquire(&t->trc_ipi_to_cpu) != -1) // Order IPI
988 // The current task had better be in a quiescent state.
990 t->trc_reader_checked = true;
991 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
995 // Attempt to nail down the task for inspection.
997 if (try_invoke_on_locked_down_task(t, trc_inspect_reader, NULL)) {
1003 // If this task is not yet on the holdout list, then we are in
1004 // an RCU read-side critical section. Otherwise, the invocation of
1005 // rcu_add_holdout() that added it to the list did the necessary
1006 // get_task_struct(). Either way, the task cannot be freed out
1007 // from under this code.
1009 // If currently running, send an IPI, either way, add to list.
1010 trc_add_holdout(t, bhp);
1012 time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
1013 // The task is currently running, so try IPIing it.
1016 // If there is already an IPI outstanding, let it happen.
1017 if (per_cpu(trc_ipi_to_cpu, cpu) || t->trc_ipi_to_cpu >= 0)
1020 atomic_inc(&trc_n_readers_need_end);
1021 per_cpu(trc_ipi_to_cpu, cpu) = true;
1022 t->trc_ipi_to_cpu = cpu;
1023 rcu_tasks_trace.n_ipis++;
1024 if (smp_call_function_single(cpu,
1025 trc_read_check_handler, t, 0)) {
1026 // Just in case there is some other reason for
1027 // failure than the target CPU being offline.
1028 rcu_tasks_trace.n_ipis_fails++;
1029 per_cpu(trc_ipi_to_cpu, cpu) = false;
1030 t->trc_ipi_to_cpu = cpu;
1031 if (atomic_dec_and_test(&trc_n_readers_need_end)) {
1039 /* Initialize for a new RCU-tasks-trace grace period. */
1040 static void rcu_tasks_trace_pregp_step(void)
1044 // Allow for fast-acting IPIs.
1045 atomic_set(&trc_n_readers_need_end, 1);
1047 // There shouldn't be any old IPIs, but...
1048 for_each_possible_cpu(cpu)
1049 WARN_ON_ONCE(per_cpu(trc_ipi_to_cpu, cpu));
1051 // Disable CPU hotplug across the tasklist scan.
1052 // This also waits for all readers in CPU-hotplug code paths.
1056 /* Do first-round processing for the specified task. */
1057 static void rcu_tasks_trace_pertask(struct task_struct *t,
1058 struct list_head *hop)
1060 // During early boot when there is only the one boot CPU, there
1061 // is no idle task for the other CPUs. Just return.
1062 if (unlikely(t == NULL))
1065 WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
1066 WRITE_ONCE(t->trc_reader_checked, false);
1067 t->trc_ipi_to_cpu = -1;
1068 trc_wait_for_one_reader(t, hop);
1072 * Do intermediate processing between task and holdout scans and
1073 * pick up the idle tasks.
1075 static void rcu_tasks_trace_postscan(struct list_head *hop)
1079 for_each_possible_cpu(cpu)
1080 rcu_tasks_trace_pertask(idle_task(cpu), hop);
1082 // Re-enable CPU hotplug now that the tasklist scan has completed.
1085 // Wait for late-stage exiting tasks to finish exiting.
1086 // These might have passed the call to exit_tasks_rcu_finish().
1088 // Any tasks that exit after this point will set ->trc_reader_checked.
1091 /* Show the state of a task stalling the current RCU tasks trace GP. */
1092 static void show_stalled_task_trace(struct task_struct *t, bool *firstreport)
1097 pr_err("INFO: rcu_tasks_trace detected stalls on tasks:\n");
1098 *firstreport = false;
1100 // FIXME: This should attempt to use try_invoke_on_nonrunning_task().
1102 pr_alert("P%d: %c%c%c nesting: %d%c cpu: %d\n",
1104 ".I"[READ_ONCE(t->trc_ipi_to_cpu) > 0],
1105 ".i"[is_idle_task(t)],
1106 ".N"[cpu > 0 && tick_nohz_full_cpu(cpu)],
1107 READ_ONCE(t->trc_reader_nesting),
1108 " N"[!!READ_ONCE(t->trc_reader_special.b.need_qs)],
1113 /* List stalled IPIs for RCU tasks trace. */
1114 static void show_stalled_ipi_trace(void)
1118 for_each_possible_cpu(cpu)
1119 if (per_cpu(trc_ipi_to_cpu, cpu))
1120 pr_alert("\tIPI outstanding to CPU %d\n", cpu);
1123 /* Do one scan of the holdout list. */
1124 static void check_all_holdout_tasks_trace(struct list_head *hop,
1125 bool needreport, bool *firstreport)
1127 struct task_struct *g, *t;
1129 // Disable CPU hotplug across the holdout list scan.
1132 list_for_each_entry_safe(t, g, hop, trc_holdout_list) {
1133 // If safe and needed, try to check the current task.
1134 if (READ_ONCE(t->trc_ipi_to_cpu) == -1 &&
1135 !READ_ONCE(t->trc_reader_checked))
1136 trc_wait_for_one_reader(t, hop);
1138 // If check succeeded, remove this task from the list.
1139 if (READ_ONCE(t->trc_reader_checked))
1141 else if (needreport)
1142 show_stalled_task_trace(t, firstreport);
1145 // Re-enable CPU hotplug now that the holdout list scan has completed.
1150 pr_err("INFO: rcu_tasks_trace detected stalls? (Late IPI?)\n");
1151 show_stalled_ipi_trace();
1155 /* Wait for grace period to complete and provide ordering. */
1156 static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
1159 struct task_struct *g, *t;
1160 LIST_HEAD(holdouts);
1163 // Remove the safety count.
1164 smp_mb__before_atomic(); // Order vs. earlier atomics
1165 atomic_dec(&trc_n_readers_need_end);
1166 smp_mb__after_atomic(); // Order vs. later atomics
1168 // Wait for readers.
1169 set_tasks_gp_state(rtp, RTGS_WAIT_READERS);
1171 ret = wait_event_idle_exclusive_timeout(
1173 atomic_read(&trc_n_readers_need_end) == 0,
1174 READ_ONCE(rcu_task_stall_timeout));
1176 break; // Count reached zero.
1177 // Stall warning time, so make a list of the offenders.
1179 for_each_process_thread(g, t)
1180 if (READ_ONCE(t->trc_reader_special.b.need_qs))
1181 trc_add_holdout(t, &holdouts);
1184 list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
1185 if (READ_ONCE(t->trc_reader_special.b.need_qs))
1186 show_stalled_task_trace(t, &firstreport);
1187 trc_del_holdout(t); // Release task_struct reference.
1190 pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
1191 show_stalled_ipi_trace();
1192 pr_err("\t%d holdouts\n", atomic_read(&trc_n_readers_need_end));
1194 smp_mb(); // Caller's code must be ordered after wakeup.
1195 // Pairs with pretty much every ordering primitive.
1198 /* Report any needed quiescent state for this exiting task. */
1199 static void exit_tasks_rcu_finish_trace(struct task_struct *t)
1201 WRITE_ONCE(t->trc_reader_checked, true);
1202 WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
1203 WRITE_ONCE(t->trc_reader_nesting, 0);
1204 if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
1205 rcu_read_unlock_trace_special(t, 0);
1209 * call_rcu_tasks_trace() - Queue a callback trace task-based grace period
1210 * @rhp: structure to be used for queueing the RCU updates.
1211 * @func: actual callback function to be invoked after the grace period
1213 * The callback function will be invoked some time after a full grace
1214 * period elapses, in other words after all currently executing RCU
1215 * read-side critical sections have completed. call_rcu_tasks_trace()
1216 * assumes that the read-side critical sections end at context switch,
1217 * cond_resched_rcu_qs(), or transition to usermode execution. As such,
1218 * there are no read-side primitives analogous to rcu_read_lock() and
1219 * rcu_read_unlock() because this primitive is intended to determine
1220 * that all tasks have passed through a safe state, not so much for
1221 * data-structure synchronization.
1223 * See the description of call_rcu() for more detailed information on
1224 * memory ordering guarantees.
1226 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
1228 call_rcu_tasks_generic(rhp, func, &rcu_tasks_trace);
1230 EXPORT_SYMBOL_GPL(call_rcu_tasks_trace);
1233 * synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
1235 * Control will return to the caller some time after a trace rcu-tasks
1236 * grace period has elapsed, in other words after all currently executing
1237 * rcu-tasks read-side critical sections have elapsed. These read-side
1238 * critical sections are delimited by calls to rcu_read_lock_trace()
1239 * and rcu_read_unlock_trace().
1241 * This is a very specialized primitive, intended only for a few uses in
1242 * tracing and other situations requiring manipulation of function preambles
1243 * and profiling hooks. The synchronize_rcu_tasks_trace() function is not
1244 * (yet) intended for heavy use from multiple CPUs.
1246 * See the description of synchronize_rcu() for more detailed information
1247 * on memory ordering guarantees.
1249 void synchronize_rcu_tasks_trace(void)
1251 RCU_LOCKDEP_WARN(lock_is_held(&rcu_trace_lock_map), "Illegal synchronize_rcu_tasks_trace() in RCU Tasks Trace read-side critical section");
1252 synchronize_rcu_tasks_generic(&rcu_tasks_trace);
1254 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks_trace);
1257 * rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
1259 * Although the current implementation is guaranteed to wait, it is not
1260 * obligated to, for example, if there are no pending callbacks.
1262 void rcu_barrier_tasks_trace(void)
1264 /* There is only one callback queue, so this is easy. ;-) */
1265 synchronize_rcu_tasks_trace();
1267 EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
1269 static int __init rcu_spawn_tasks_trace_kthread(void)
1271 if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
1272 rcu_tasks_trace.gp_sleep = HZ / 10;
1273 rcu_tasks_trace.init_fract = HZ / 10;
1275 rcu_tasks_trace.gp_sleep = HZ / 200;
1276 if (rcu_tasks_trace.gp_sleep <= 0)
1277 rcu_tasks_trace.gp_sleep = 1;
1278 rcu_tasks_trace.init_fract = HZ / 200;
1279 if (rcu_tasks_trace.init_fract <= 0)
1280 rcu_tasks_trace.init_fract = 1;
1282 rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
1283 rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
1284 rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
1285 rcu_tasks_trace.holdouts_func = check_all_holdout_tasks_trace;
1286 rcu_tasks_trace.postgp_func = rcu_tasks_trace_postgp;
1287 rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
1291 #if !defined(CONFIG_TINY_RCU)
1292 void show_rcu_tasks_trace_gp_kthread(void)
1296 sprintf(buf, "N%d h:%lu/%lu/%lu", atomic_read(&trc_n_readers_need_end),
1297 data_race(n_heavy_reader_ofl_updates),
1298 data_race(n_heavy_reader_updates),
1299 data_race(n_heavy_reader_attempts));
1300 show_rcu_tasks_generic_gp_kthread(&rcu_tasks_trace, buf);
1302 EXPORT_SYMBOL_GPL(show_rcu_tasks_trace_gp_kthread);
1303 #endif // !defined(CONFIG_TINY_RCU)
1305 #else /* #ifdef CONFIG_TASKS_TRACE_RCU */
1306 static void exit_tasks_rcu_finish_trace(struct task_struct *t) { }
1307 #endif /* #else #ifdef CONFIG_TASKS_TRACE_RCU */
1309 #ifndef CONFIG_TINY_RCU
1310 void show_rcu_tasks_gp_kthreads(void)
1312 show_rcu_tasks_classic_gp_kthread();
1313 show_rcu_tasks_rude_gp_kthread();
1314 show_rcu_tasks_trace_gp_kthread();
1316 #endif /* #ifndef CONFIG_TINY_RCU */
1318 #ifdef CONFIG_PROVE_RCU
1319 struct rcu_tasks_test_desc {
1325 static struct rcu_tasks_test_desc tests[] = {
1327 .name = "call_rcu_tasks()",
1328 /* If not defined, the test is skipped. */
1329 .notrun = !IS_ENABLED(CONFIG_TASKS_RCU),
1332 .name = "call_rcu_tasks_rude()",
1333 /* If not defined, the test is skipped. */
1334 .notrun = !IS_ENABLED(CONFIG_TASKS_RUDE_RCU),
1337 .name = "call_rcu_tasks_trace()",
1338 /* If not defined, the test is skipped. */
1339 .notrun = !IS_ENABLED(CONFIG_TASKS_TRACE_RCU)
1343 static void test_rcu_tasks_callback(struct rcu_head *rhp)
1345 struct rcu_tasks_test_desc *rttd =
1346 container_of(rhp, struct rcu_tasks_test_desc, rh);
1348 pr_info("Callback from %s invoked.\n", rttd->name);
1350 rttd->notrun = true;
1353 void rcu_tasks_initiate_self_tests(void)
1355 pr_info("Running RCU-tasks wait API self tests\n");
1356 #ifdef CONFIG_TASKS_RCU
1357 synchronize_rcu_tasks();
1358 call_rcu_tasks(&tests[0].rh, test_rcu_tasks_callback);
1361 #ifdef CONFIG_TASKS_RUDE_RCU
1362 synchronize_rcu_tasks_rude();
1363 call_rcu_tasks_rude(&tests[1].rh, test_rcu_tasks_callback);
1366 #ifdef CONFIG_TASKS_TRACE_RCU
1367 synchronize_rcu_tasks_trace();
1368 call_rcu_tasks_trace(&tests[2].rh, test_rcu_tasks_callback);
1372 static int rcu_tasks_verify_self_tests(void)
1377 for (i = 0; i < ARRAY_SIZE(tests); i++) {
1378 if (!tests[i].notrun) { // still hanging.
1379 pr_err("%s has been failed.\n", tests[i].name);
1389 late_initcall(rcu_tasks_verify_self_tests);
1390 #endif /* #ifdef CONFIG_PROVE_RCU */
1392 void __init rcu_init_tasks_generic(void)
1394 #ifdef CONFIG_TASKS_RCU
1395 rcu_spawn_tasks_kthread();
1398 #ifdef CONFIG_TASKS_RUDE_RCU
1399 rcu_spawn_tasks_rude_kthread();
1402 #ifdef CONFIG_TASKS_TRACE_RCU
1403 rcu_spawn_tasks_trace_kthread();
1407 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
1408 static inline void rcu_tasks_bootup_oddness(void) {}
1409 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */