1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update mechanism for mutual exclusion
5 * Copyright IBM Corporation, 2001
7 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 * Manfred Spraul <manfred@colorfullife.com>
10 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
11 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
13 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
14 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
16 * For detailed explanation of Read-Copy Update mechanism see -
17 * http://lse.sourceforge.net/locking/rcupdate.html
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/spinlock.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/sched/signal.h>
27 #include <linux/sched/debug.h>
28 #include <linux/atomic.h>
29 #include <linux/bitops.h>
30 #include <linux/percpu.h>
31 #include <linux/notifier.h>
32 #include <linux/cpu.h>
33 #include <linux/mutex.h>
34 #include <linux/export.h>
35 #include <linux/hardirq.h>
36 #include <linux/delay.h>
37 #include <linux/moduleparam.h>
38 #include <linux/kthread.h>
39 #include <linux/tick.h>
40 #include <linux/rcupdate_wait.h>
41 #include <linux/sched/isolation.h>
42 #include <linux/kprobes.h>
44 #define CREATE_TRACE_POINTS
48 #ifdef MODULE_PARAM_PREFIX
49 #undef MODULE_PARAM_PREFIX
51 #define MODULE_PARAM_PREFIX "rcupdate."
53 #ifndef CONFIG_TINY_RCU
54 extern int rcu_expedited; /* from sysctl */
55 module_param(rcu_expedited, int, 0);
56 extern int rcu_normal; /* from sysctl */
57 module_param(rcu_normal, int, 0);
58 static int rcu_normal_after_boot;
59 module_param(rcu_normal_after_boot, int, 0);
60 #endif /* #ifndef CONFIG_TINY_RCU */
62 #ifdef CONFIG_DEBUG_LOCK_ALLOC
64 * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section?
65 * @ret: Best guess answer if lockdep cannot be relied on
67 * Returns true if lockdep must be ignored, in which case *ret contains
68 * the best guess described below. Otherwise returns false, in which
69 * case *ret tells the caller nothing and the caller should instead
72 * If CONFIG_DEBUG_LOCK_ALLOC is selected, set *ret to nonzero iff in an
73 * RCU-sched read-side critical section. In absence of
74 * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
75 * critical section unless it can prove otherwise. Note that disabling
76 * of preemption (including disabling irqs) counts as an RCU-sched
77 * read-side critical section. This is useful for debug checks in functions
78 * that required that they be called within an RCU-sched read-side
81 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
82 * and while lockdep is disabled.
84 * Note that if the CPU is in the idle loop from an RCU point of view (ie:
85 * that we are in the section between rcu_idle_enter() and rcu_idle_exit())
86 * then rcu_read_lock_held() sets *ret to false even if the CPU did an
87 * rcu_read_lock(). The reason for this is that RCU ignores CPUs that are
88 * in such a section, considering these as in extended quiescent state,
89 * so such a CPU is effectively never in an RCU read-side critical section
90 * regardless of what RCU primitives it invokes. This state of affairs is
91 * required --- we need to keep an RCU-free window in idle where the CPU may
92 * possibly enter into low power mode. This way we can notice an extended
93 * quiescent state to other CPUs that started a grace period. Otherwise
94 * we would delay any grace period as long as we run in the idle task.
96 * Similarly, we avoid claiming an RCU read lock held if the current
99 static bool rcu_read_lock_held_common(bool *ret)
101 if (!debug_lockdep_rcu_enabled()) {
105 if (!rcu_is_watching()) {
109 if (!rcu_lockdep_current_cpu_online()) {
116 int rcu_read_lock_sched_held(void)
120 if (rcu_read_lock_held_common(&ret))
122 return lock_is_held(&rcu_sched_lock_map) || !preemptible();
124 EXPORT_SYMBOL(rcu_read_lock_sched_held);
127 #ifndef CONFIG_TINY_RCU
130 * Should expedited grace-period primitives always fall back to their
131 * non-expedited counterparts? Intended for use within RCU. Note
132 * that if the user specifies both rcu_expedited and rcu_normal, then
133 * rcu_normal wins. (Except during the time period during boot from
134 * when the first task is spawned until the rcu_set_runtime_mode()
135 * core_initcall() is invoked, at which point everything is expedited.)
137 bool rcu_gp_is_normal(void)
139 return READ_ONCE(rcu_normal) &&
140 rcu_scheduler_active != RCU_SCHEDULER_INIT;
142 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
144 static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
147 * Should normal grace-period primitives be expedited? Intended for
148 * use within RCU. Note that this function takes the rcu_expedited
149 * sysfs/boot variable and rcu_scheduler_active into account as well
150 * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
151 * until rcu_gp_is_expedited() returns false is a -really- bad idea.
153 bool rcu_gp_is_expedited(void)
155 return rcu_expedited || atomic_read(&rcu_expedited_nesting);
157 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
160 * rcu_expedite_gp - Expedite future RCU grace periods
162 * After a call to this function, future calls to synchronize_rcu() and
163 * friends act as the corresponding synchronize_rcu_expedited() function
164 * had instead been called.
166 void rcu_expedite_gp(void)
168 atomic_inc(&rcu_expedited_nesting);
170 EXPORT_SYMBOL_GPL(rcu_expedite_gp);
173 * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
175 * Undo a prior call to rcu_expedite_gp(). If all prior calls to
176 * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
177 * and if the rcu_expedited sysfs/boot parameter is not set, then all
178 * subsequent calls to synchronize_rcu() and friends will return to
179 * their normal non-expedited behavior.
181 void rcu_unexpedite_gp(void)
183 atomic_dec(&rcu_expedited_nesting);
185 EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
188 * Inform RCU of the end of the in-kernel boot sequence.
190 void rcu_end_inkernel_boot(void)
193 if (rcu_normal_after_boot)
194 WRITE_ONCE(rcu_normal, 1);
197 #endif /* #ifndef CONFIG_TINY_RCU */
200 * Test each non-SRCU synchronous grace-period wait API. This is
201 * useful just after a change in mode for these primitives, and
204 void rcu_test_sync_prims(void)
206 if (!IS_ENABLED(CONFIG_PROVE_RCU))
209 synchronize_rcu_expedited();
212 #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
215 * Switch to run-time mode once RCU has fully initialized.
217 static int __init rcu_set_runtime_mode(void)
219 rcu_test_sync_prims();
220 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
221 rcu_test_sync_prims();
224 core_initcall(rcu_set_runtime_mode);
226 #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
228 #ifdef CONFIG_DEBUG_LOCK_ALLOC
229 static struct lock_class_key rcu_lock_key;
230 struct lockdep_map rcu_lock_map =
231 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
232 EXPORT_SYMBOL_GPL(rcu_lock_map);
234 static struct lock_class_key rcu_bh_lock_key;
235 struct lockdep_map rcu_bh_lock_map =
236 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
237 EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
239 static struct lock_class_key rcu_sched_lock_key;
240 struct lockdep_map rcu_sched_lock_map =
241 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
242 EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
244 static struct lock_class_key rcu_callback_key;
245 struct lockdep_map rcu_callback_map =
246 STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
247 EXPORT_SYMBOL_GPL(rcu_callback_map);
249 int notrace debug_lockdep_rcu_enabled(void)
251 return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
252 current->lockdep_recursion == 0;
254 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
255 NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled);
258 * rcu_read_lock_held() - might we be in RCU read-side critical section?
260 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
261 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
262 * this assumes we are in an RCU read-side critical section unless it can
263 * prove otherwise. This is useful for debug checks in functions that
264 * require that they be called within an RCU read-side critical section.
266 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
267 * and while lockdep is disabled.
269 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
270 * occur in the same context, for example, it is illegal to invoke
271 * rcu_read_unlock() in process context if the matching rcu_read_lock()
272 * was invoked from within an irq handler.
274 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
275 * offline from an RCU perspective, so check for those as well.
277 int rcu_read_lock_held(void)
281 if (rcu_read_lock_held_common(&ret))
283 return lock_is_held(&rcu_lock_map);
285 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
288 * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
290 * Check for bottom half being disabled, which covers both the
291 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
292 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
293 * will show the situation. This is useful for debug checks in functions
294 * that require that they be called within an RCU read-side critical
297 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
299 * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
300 * offline from an RCU perspective, so check for those as well.
302 int rcu_read_lock_bh_held(void)
306 if (rcu_read_lock_held_common(&ret))
308 return in_softirq() || irqs_disabled();
310 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
312 int rcu_read_lock_any_held(void)
316 if (rcu_read_lock_held_common(&ret))
318 if (lock_is_held(&rcu_lock_map) ||
319 lock_is_held(&rcu_bh_lock_map) ||
320 lock_is_held(&rcu_sched_lock_map))
322 return !preemptible();
324 EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
326 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
329 * wakeme_after_rcu() - Callback function to awaken a task after grace period
330 * @head: Pointer to rcu_head member within rcu_synchronize structure
332 * Awaken the corresponding task now that a grace period has elapsed.
334 void wakeme_after_rcu(struct rcu_head *head)
336 struct rcu_synchronize *rcu;
338 rcu = container_of(head, struct rcu_synchronize, head);
339 complete(&rcu->completion);
341 EXPORT_SYMBOL_GPL(wakeme_after_rcu);
343 void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
344 struct rcu_synchronize *rs_array)
349 /* Initialize and register callbacks for each crcu_array element. */
350 for (i = 0; i < n; i++) {
352 (crcu_array[i] == call_rcu)) {
356 init_rcu_head_on_stack(&rs_array[i].head);
357 init_completion(&rs_array[i].completion);
358 for (j = 0; j < i; j++)
359 if (crcu_array[j] == crcu_array[i])
362 (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
365 /* Wait for all callbacks to be invoked. */
366 for (i = 0; i < n; i++) {
368 (crcu_array[i] == call_rcu))
370 for (j = 0; j < i; j++)
371 if (crcu_array[j] == crcu_array[i])
374 wait_for_completion(&rs_array[i].completion);
375 destroy_rcu_head_on_stack(&rs_array[i].head);
378 EXPORT_SYMBOL_GPL(__wait_rcu_gp);
380 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
381 void init_rcu_head(struct rcu_head *head)
383 debug_object_init(head, &rcuhead_debug_descr);
385 EXPORT_SYMBOL_GPL(init_rcu_head);
387 void destroy_rcu_head(struct rcu_head *head)
389 debug_object_free(head, &rcuhead_debug_descr);
391 EXPORT_SYMBOL_GPL(destroy_rcu_head);
393 static bool rcuhead_is_static_object(void *addr)
399 * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
400 * @head: pointer to rcu_head structure to be initialized
402 * This function informs debugobjects of a new rcu_head structure that
403 * has been allocated as an auto variable on the stack. This function
404 * is not required for rcu_head structures that are statically defined or
405 * that are dynamically allocated on the heap. This function has no
406 * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
408 void init_rcu_head_on_stack(struct rcu_head *head)
410 debug_object_init_on_stack(head, &rcuhead_debug_descr);
412 EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
415 * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
416 * @head: pointer to rcu_head structure to be initialized
418 * This function informs debugobjects that an on-stack rcu_head structure
419 * is about to go out of scope. As with init_rcu_head_on_stack(), this
420 * function is not required for rcu_head structures that are statically
421 * defined or that are dynamically allocated on the heap. Also as with
422 * init_rcu_head_on_stack(), this function has no effect for
423 * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
425 void destroy_rcu_head_on_stack(struct rcu_head *head)
427 debug_object_free(head, &rcuhead_debug_descr);
429 EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
431 struct debug_obj_descr rcuhead_debug_descr = {
433 .is_static_object = rcuhead_is_static_object,
435 EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
436 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
438 #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
439 void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
441 unsigned long c_old, unsigned long c)
443 trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
445 EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
447 #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
451 #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
452 /* Get rcutorture access to sched_setaffinity(). */
453 long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
457 ret = sched_setaffinity(pid, in_mask);
458 WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
461 EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
464 #ifdef CONFIG_RCU_STALL_COMMON
465 int rcu_cpu_stall_ftrace_dump __read_mostly;
466 module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
467 int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
468 EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
469 module_param(rcu_cpu_stall_suppress, int, 0644);
470 int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
471 module_param(rcu_cpu_stall_timeout, int, 0644);
472 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
474 #ifdef CONFIG_TASKS_RCU
477 * Simple variant of RCU whose quiescent states are voluntary context
478 * switch, cond_resched_rcu_qs(), user-space execution, and idle.
479 * As such, grace periods can take one good long time. There are no
480 * read-side primitives similar to rcu_read_lock() and rcu_read_unlock()
481 * because this implementation is intended to get the system into a safe
482 * state for some of the manipulations involved in tracing and the like.
483 * Finally, this implementation does not support high call_rcu_tasks()
484 * rates from multiple CPUs. If this is required, per-CPU callback lists
488 /* Global list of callbacks and associated lock. */
489 static struct rcu_head *rcu_tasks_cbs_head;
490 static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
491 static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
492 static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
494 /* Track exiting tasks in order to allow them to be waited for. */
495 DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
497 /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
498 #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
499 static int rcu_task_stall_timeout __read_mostly = RCU_TASK_STALL_TIMEOUT;
500 module_param(rcu_task_stall_timeout, int, 0644);
502 static struct task_struct *rcu_tasks_kthread_ptr;
505 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
506 * @rhp: structure to be used for queueing the RCU updates.
507 * @func: actual callback function to be invoked after the grace period
509 * The callback function will be invoked some time after a full grace
510 * period elapses, in other words after all currently executing RCU
511 * read-side critical sections have completed. call_rcu_tasks() assumes
512 * that the read-side critical sections end at a voluntary context
513 * switch (not a preemption!), cond_resched_rcu_qs(), entry into idle,
514 * or transition to usermode execution. As such, there are no read-side
515 * primitives analogous to rcu_read_lock() and rcu_read_unlock() because
516 * this primitive is intended to determine that all tasks have passed
517 * through a safe state, not so much for data-strcuture synchronization.
519 * See the description of call_rcu() for more detailed information on
520 * memory ordering guarantees.
522 void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
529 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
530 needwake = !rcu_tasks_cbs_head;
531 *rcu_tasks_cbs_tail = rhp;
532 rcu_tasks_cbs_tail = &rhp->next;
533 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
534 /* We can't create the thread unless interrupts are enabled. */
535 if (needwake && READ_ONCE(rcu_tasks_kthread_ptr))
536 wake_up(&rcu_tasks_cbs_wq);
538 EXPORT_SYMBOL_GPL(call_rcu_tasks);
541 * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
543 * Control will return to the caller some time after a full rcu-tasks
544 * grace period has elapsed, in other words after all currently
545 * executing rcu-tasks read-side critical sections have elapsed. These
546 * read-side critical sections are delimited by calls to schedule(),
547 * cond_resched_tasks_rcu_qs(), idle execution, userspace execution, calls
548 * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
550 * This is a very specialized primitive, intended only for a few uses in
551 * tracing and other situations requiring manipulation of function
552 * preambles and profiling hooks. The synchronize_rcu_tasks() function
553 * is not (yet) intended for heavy use from multiple CPUs.
555 * Note that this guarantee implies further memory-ordering guarantees.
556 * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
557 * each CPU is guaranteed to have executed a full memory barrier since the
558 * end of its last RCU-tasks read-side critical section whose beginning
559 * preceded the call to synchronize_rcu_tasks(). In addition, each CPU
560 * having an RCU-tasks read-side critical section that extends beyond
561 * the return from synchronize_rcu_tasks() is guaranteed to have executed
562 * a full memory barrier after the beginning of synchronize_rcu_tasks()
563 * and before the beginning of that RCU-tasks read-side critical section.
564 * Note that these guarantees include CPUs that are offline, idle, or
565 * executing in user mode, as well as CPUs that are executing in the kernel.
567 * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
568 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
569 * to have executed a full memory barrier during the execution of
570 * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
571 * (but again only if the system has more than one CPU).
573 void synchronize_rcu_tasks(void)
575 /* Complain if the scheduler has not started. */
576 RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
577 "synchronize_rcu_tasks called too soon");
579 /* Wait for the grace period. */
580 wait_rcu_gp(call_rcu_tasks);
582 EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
585 * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
587 * Although the current implementation is guaranteed to wait, it is not
588 * obligated to, for example, if there are no pending callbacks.
590 void rcu_barrier_tasks(void)
592 /* There is only one callback queue, so this is easy. ;-) */
593 synchronize_rcu_tasks();
595 EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
597 /* See if tasks are still holding out, complain if so. */
598 static void check_holdout_task(struct task_struct *t,
599 bool needreport, bool *firstreport)
603 if (!READ_ONCE(t->rcu_tasks_holdout) ||
604 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
605 !READ_ONCE(t->on_rq) ||
606 (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
607 !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
608 WRITE_ONCE(t->rcu_tasks_holdout, false);
609 list_del_init(&t->rcu_tasks_holdout_list);
613 rcu_request_urgent_qs_task(t);
617 pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
618 *firstreport = false;
621 pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
622 t, ".I"[is_idle_task(t)],
623 "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
624 t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
625 t->rcu_tasks_idle_cpu, cpu);
629 /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
630 static int __noreturn rcu_tasks_kthread(void *arg)
633 struct task_struct *g, *t;
634 unsigned long lastreport;
635 struct rcu_head *list;
636 struct rcu_head *next;
637 LIST_HEAD(rcu_tasks_holdouts);
640 /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
641 housekeeping_affine(current, HK_FLAG_RCU);
644 * Each pass through the following loop makes one check for
645 * newly arrived callbacks, and, if there are some, waits for
646 * one RCU-tasks grace period and then invokes the callbacks.
647 * This loop is terminated by the system going down. ;-)
651 /* Pick up any new callbacks. */
652 raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
653 list = rcu_tasks_cbs_head;
654 rcu_tasks_cbs_head = NULL;
655 rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
656 raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
658 /* If there were none, wait a bit and start over. */
660 wait_event_interruptible(rcu_tasks_cbs_wq,
662 if (!rcu_tasks_cbs_head) {
663 WARN_ON(signal_pending(current));
664 schedule_timeout_interruptible(HZ/10);
670 * Wait for all pre-existing t->on_rq and t->nvcsw
671 * transitions to complete. Invoking synchronize_rcu()
672 * suffices because all these transitions occur with
673 * interrupts disabled. Without this synchronize_rcu(),
674 * a read-side critical section that started before the
675 * grace period might be incorrectly seen as having started
676 * after the grace period.
678 * This synchronize_rcu() also dispenses with the
679 * need for a memory barrier on the first store to
680 * ->rcu_tasks_holdout, as it forces the store to happen
681 * after the beginning of the grace period.
686 * There were callbacks, so we need to wait for an
687 * RCU-tasks grace period. Start off by scanning
688 * the task list for tasks that are not already
689 * voluntarily blocked. Mark these tasks and make
690 * a list of them in rcu_tasks_holdouts.
693 for_each_process_thread(g, t) {
694 if (t != current && READ_ONCE(t->on_rq) &&
697 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
698 WRITE_ONCE(t->rcu_tasks_holdout, true);
699 list_add(&t->rcu_tasks_holdout_list,
700 &rcu_tasks_holdouts);
706 * Wait for tasks that are in the process of exiting.
707 * This does only part of the job, ensuring that all
708 * tasks that were previously exiting reach the point
709 * where they have disabled preemption, allowing the
710 * later synchronize_rcu() to finish the job.
712 synchronize_srcu(&tasks_rcu_exit_srcu);
715 * Each pass through the following loop scans the list
716 * of holdout tasks, removing any that are no longer
717 * holdouts. When the list is empty, we are done.
719 lastreport = jiffies;
721 /* Start off with HZ/10 wait and slowly back off to 1 HZ wait*/
728 struct task_struct *t1;
730 if (list_empty(&rcu_tasks_holdouts))
733 /* Slowly back off waiting for holdouts */
734 schedule_timeout_interruptible(HZ/fract);
739 rtst = READ_ONCE(rcu_task_stall_timeout);
740 needreport = rtst > 0 &&
741 time_after(jiffies, lastreport + rtst);
743 lastreport = jiffies;
745 WARN_ON(signal_pending(current));
746 list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
747 rcu_tasks_holdout_list) {
748 check_holdout_task(t, needreport, &firstreport);
754 * Because ->on_rq and ->nvcsw are not guaranteed
755 * to have a full memory barriers prior to them in the
756 * schedule() path, memory reordering on other CPUs could
757 * cause their RCU-tasks read-side critical sections to
758 * extend past the end of the grace period. However,
759 * because these ->nvcsw updates are carried out with
760 * interrupts disabled, we can use synchronize_rcu()
761 * to force the needed ordering on all such CPUs.
763 * This synchronize_rcu() also confines all
764 * ->rcu_tasks_holdout accesses to be within the grace
765 * period, avoiding the need for memory barriers for
766 * ->rcu_tasks_holdout accesses.
768 * In addition, this synchronize_rcu() waits for exiting
769 * tasks to complete their final preempt_disable() region
770 * of execution, cleaning up after the synchronize_srcu()
775 /* Invoke the callbacks. */
784 /* Paranoid sleep to keep this from entering a tight loop */
785 schedule_timeout_uninterruptible(HZ/10);
789 /* Spawn rcu_tasks_kthread() at core_initcall() time. */
790 static int __init rcu_spawn_tasks_kthread(void)
792 struct task_struct *t;
794 t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
795 if (WARN_ONCE(IS_ERR(t), "%s: Could not start Tasks-RCU grace-period kthread, OOM is now expected behavior\n", __func__))
797 smp_mb(); /* Ensure others see full kthread. */
798 WRITE_ONCE(rcu_tasks_kthread_ptr, t);
801 core_initcall(rcu_spawn_tasks_kthread);
803 /* Do the srcu_read_lock() for the above synchronize_srcu(). */
804 void exit_tasks_rcu_start(void)
807 current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
811 /* Do the srcu_read_unlock() for the above synchronize_srcu(). */
812 void exit_tasks_rcu_finish(void)
815 __srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx);
819 #endif /* #ifdef CONFIG_TASKS_RCU */
821 #ifndef CONFIG_TINY_RCU
824 * Print any non-default Tasks RCU settings.
826 static void __init rcu_tasks_bootup_oddness(void)
828 #ifdef CONFIG_TASKS_RCU
829 if (rcu_task_stall_timeout != RCU_TASK_STALL_TIMEOUT)
830 pr_info("\tTasks-RCU CPU stall warnings timeout set to %d (rcu_task_stall_timeout).\n", rcu_task_stall_timeout);
832 pr_info("\tTasks RCU enabled.\n");
833 #endif /* #ifdef CONFIG_TASKS_RCU */
836 #endif /* #ifndef CONFIG_TINY_RCU */
838 #ifdef CONFIG_PROVE_RCU
841 * Early boot self test parameters.
843 static bool rcu_self_test;
844 module_param(rcu_self_test, bool, 0444);
846 static int rcu_self_test_counter;
848 static void test_callback(struct rcu_head *r)
850 rcu_self_test_counter++;
851 pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
854 DEFINE_STATIC_SRCU(early_srcu);
856 static void early_boot_test_call_rcu(void)
858 static struct rcu_head head;
859 static struct rcu_head shead;
861 call_rcu(&head, test_callback);
862 if (IS_ENABLED(CONFIG_SRCU))
863 call_srcu(&early_srcu, &shead, test_callback);
866 void rcu_early_boot_tests(void)
868 pr_info("Running RCU self tests\n");
871 early_boot_test_call_rcu();
872 rcu_test_sync_prims();
875 static int rcu_verify_early_boot_tests(void)
878 int early_boot_test_counter = 0;
881 early_boot_test_counter++;
883 if (IS_ENABLED(CONFIG_SRCU)) {
884 early_boot_test_counter++;
885 srcu_barrier(&early_srcu);
888 if (rcu_self_test_counter != early_boot_test_counter) {
895 late_initcall(rcu_verify_early_boot_tests);
897 void rcu_early_boot_tests(void) {}
898 #endif /* CONFIG_PROVE_RCU */
900 #ifndef CONFIG_TINY_RCU
903 * Print any significant non-default boot-time settings.
905 void __init rcupdate_announce_bootup_oddness(void)
908 pr_info("\tNo expedited grace period (rcu_normal).\n");
909 else if (rcu_normal_after_boot)
910 pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
911 else if (rcu_expedited)
912 pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
913 if (rcu_cpu_stall_suppress)
914 pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
915 if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
916 pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
917 rcu_tasks_bootup_oddness();
920 #endif /* #ifndef CONFIG_TINY_RCU */