1 // SPDX-License-Identifier: GPL-2.0+
3 * Sleepable Read-Copy Update mechanism for mutual exclusion,
4 * tiny version for non-preemptible single-CPU use.
6 * Copyright (C) IBM Corporation, 2017
8 * Author: Paul McKenney <paulmck@linux.ibm.com>
11 #include <linux/export.h>
12 #include <linux/mutex.h>
13 #include <linux/preempt.h>
14 #include <linux/rcupdate_wait.h>
15 #include <linux/sched.h>
16 #include <linux/delay.h>
17 #include <linux/srcu.h>
19 #include <linux/rcu_node_tree.h>
20 #include "rcu_segcblist.h"
23 int rcu_scheduler_active __read_mostly;
24 static LIST_HEAD(srcu_boot_list);
25 static bool srcu_init_done;
27 static int init_srcu_struct_fields(struct srcu_struct *ssp)
29 ssp->srcu_lock_nesting[0] = 0;
30 ssp->srcu_lock_nesting[1] = 0;
31 init_swait_queue_head(&ssp->srcu_wq);
32 ssp->srcu_cb_head = NULL;
33 ssp->srcu_cb_tail = &ssp->srcu_cb_head;
34 ssp->srcu_gp_running = false;
35 ssp->srcu_gp_waiting = false;
37 ssp->srcu_idx_max = 0;
38 INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
39 INIT_LIST_HEAD(&ssp->srcu_work.entry);
43 #ifdef CONFIG_DEBUG_LOCK_ALLOC
45 int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
46 struct lock_class_key *key)
48 /* Don't re-initialize a lock while it is held. */
49 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
50 lockdep_init_map(&ssp->dep_map, name, key, 0);
51 return init_srcu_struct_fields(ssp);
53 EXPORT_SYMBOL_GPL(__init_srcu_struct);
55 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
58 * init_srcu_struct - initialize a sleep-RCU structure
59 * @ssp: structure to initialize.
61 * Must invoke this on a given srcu_struct before passing that srcu_struct
62 * to any other function. Each srcu_struct represents a separate domain
65 int init_srcu_struct(struct srcu_struct *ssp)
67 return init_srcu_struct_fields(ssp);
69 EXPORT_SYMBOL_GPL(init_srcu_struct);
71 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
74 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
75 * @ssp: structure to clean up.
77 * Must invoke this after you are finished using a given srcu_struct that
78 * was initialized via init_srcu_struct(), else you leak memory.
80 void cleanup_srcu_struct(struct srcu_struct *ssp)
82 WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
83 flush_work(&ssp->srcu_work);
84 WARN_ON(ssp->srcu_gp_running);
85 WARN_ON(ssp->srcu_gp_waiting);
86 WARN_ON(ssp->srcu_cb_head);
87 WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
88 WARN_ON(ssp->srcu_idx != ssp->srcu_idx_max);
89 WARN_ON(ssp->srcu_idx & 0x1);
91 EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
94 * Removes the count for the old reader from the appropriate element of
97 void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
99 int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1;
101 WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
102 if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task())
103 swake_up_one(&ssp->srcu_wq);
105 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
108 * Workqueue handler to drive one grace period and invoke any callbacks
109 * that become ready as a result. Single-CPU and !PREEMPTION operation
110 * means that we get away with murder on synchronization. ;-)
112 void srcu_drive_gp(struct work_struct *wp)
116 struct rcu_head *rhp;
117 struct srcu_struct *ssp;
119 ssp = container_of(wp, struct srcu_struct, srcu_work);
120 if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
121 return; /* Already running or nothing to do. */
123 /* Remove recently arrived callbacks and wait for readers. */
124 WRITE_ONCE(ssp->srcu_gp_running, true);
126 lh = ssp->srcu_cb_head;
127 ssp->srcu_cb_head = NULL;
128 ssp->srcu_cb_tail = &ssp->srcu_cb_head;
130 idx = (ssp->srcu_idx & 0x2) / 2;
131 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
132 WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */
133 swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
134 WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
135 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
137 /* Invoke the callbacks we removed above. */
147 * Enable rescheduling, and if there are more callbacks,
148 * reschedule ourselves. This can race with a call_srcu()
149 * at interrupt level, but the ->srcu_gp_running checks will
150 * straighten that out.
152 WRITE_ONCE(ssp->srcu_gp_running, false);
153 if (ULONG_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)))
154 schedule_work(&ssp->srcu_work);
156 EXPORT_SYMBOL_GPL(srcu_drive_gp);
158 static void srcu_gp_start_if_needed(struct srcu_struct *ssp)
160 unsigned long cookie;
162 cookie = get_state_synchronize_srcu(ssp);
163 if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie))
165 WRITE_ONCE(ssp->srcu_idx_max, cookie);
166 if (!READ_ONCE(ssp->srcu_gp_running)) {
167 if (likely(srcu_init_done))
168 schedule_work(&ssp->srcu_work);
169 else if (list_empty(&ssp->srcu_work.entry))
170 list_add(&ssp->srcu_work.entry, &srcu_boot_list);
175 * Enqueue an SRCU callback on the specified srcu_struct structure,
176 * initiating grace-period processing if it is not already running.
178 void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
185 local_irq_save(flags);
186 *ssp->srcu_cb_tail = rhp;
187 ssp->srcu_cb_tail = &rhp->next;
188 local_irq_restore(flags);
189 srcu_gp_start_if_needed(ssp);
191 EXPORT_SYMBOL_GPL(call_srcu);
194 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
196 void synchronize_srcu(struct srcu_struct *ssp)
198 struct rcu_synchronize rs;
200 srcu_lock_sync(&ssp->dep_map);
202 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
203 lock_is_held(&rcu_bh_lock_map) ||
204 lock_is_held(&rcu_lock_map) ||
205 lock_is_held(&rcu_sched_lock_map),
206 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
208 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
212 init_rcu_head_on_stack(&rs.head);
213 init_completion(&rs.completion);
214 call_srcu(ssp, &rs.head, wakeme_after_rcu);
215 wait_for_completion(&rs.completion);
216 destroy_rcu_head_on_stack(&rs.head);
218 EXPORT_SYMBOL_GPL(synchronize_srcu);
221 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
223 unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
228 ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1;
232 EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
235 * start_poll_synchronize_srcu - Provide cookie and start grace period
237 * The difference between this and get_state_synchronize_srcu() is that
238 * this function ensures that the poll_state_synchronize_srcu() will
239 * eventually return the value true.
241 unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
243 unsigned long ret = get_state_synchronize_srcu(ssp);
245 srcu_gp_start_if_needed(ssp);
248 EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
251 * poll_state_synchronize_srcu - Has cookie's grace period ended?
253 bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
255 unsigned long cur_s = READ_ONCE(ssp->srcu_idx);
258 return ULONG_CMP_GE(cur_s, cookie) || ULONG_CMP_LT(cur_s, cookie - 3);
260 EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
262 /* Lockdep diagnostics. */
263 void __init rcu_scheduler_starting(void)
265 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
269 * Queue work for srcu_struct structures with early boot callbacks.
270 * The work won't actually execute until the workqueue initialization
271 * phase that takes place after the scheduler starts.
273 void __init srcu_init(void)
275 struct srcu_struct *ssp;
277 srcu_init_done = true;
278 while (!list_empty(&srcu_boot_list)) {
279 ssp = list_first_entry(&srcu_boot_list,
280 struct srcu_struct, srcu_work.entry);
281 list_del_init(&ssp->srcu_work.entry);
282 schedule_work(&ssp->srcu_work);