1 // SPDX-License-Identifier: GPL-2.0+
3 * Read-Copy Update module-based torture test facility
5 * Copyright (C) IBM Corporation, 2005, 2006
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Josh Triplett <josh@joshtriplett.org>
10 * See also: Documentation/RCU/torture.rst
13 #define pr_fmt(fmt) fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate_wait.h>
24 #include <linux/interrupt.h>
25 #include <linux/sched/signal.h>
26 #include <uapi/linux/sched/types.h>
27 #include <linux/atomic.h>
28 #include <linux/bitops.h>
29 #include <linux/completion.h>
30 #include <linux/moduleparam.h>
31 #include <linux/percpu.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <linux/freezer.h>
35 #include <linux/cpu.h>
36 #include <linux/delay.h>
37 #include <linux/stat.h>
38 #include <linux/srcu.h>
39 #include <linux/slab.h>
40 #include <linux/trace_clock.h>
41 #include <asm/byteorder.h>
42 #include <linux/torture.h>
43 #include <linux/vmalloc.h>
44 #include <linux/sched/debug.h>
45 #include <linux/sched/sysctl.h>
46 #include <linux/oom.h>
47 #include <linux/tick.h>
48 #include <linux/rcupdate_trace.h>
49 #include <linux/nmi.h>
53 MODULE_LICENSE("GPL");
54 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
56 /* Bits for ->extendables field, extendables param, and related definitions. */
57 #define RCUTORTURE_RDR_SHIFT 8 /* Put SRCU index in upper bits. */
58 #define RCUTORTURE_RDR_MASK ((1 << RCUTORTURE_RDR_SHIFT) - 1)
59 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
60 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
61 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
62 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
63 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
64 #define RCUTORTURE_RDR_RCU 0x20 /* ... entering another RCU reader. */
65 #define RCUTORTURE_RDR_NBITS 6 /* Number of bits defined above. */
66 #define RCUTORTURE_MAX_EXTEND \
67 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
68 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
69 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
70 /* Must be power of two minus one. */
71 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
73 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
74 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
75 torture_param(int, fqs_duration, 0,
76 "Duration of fqs bursts (us), 0 to disable");
77 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
78 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
79 torture_param(bool, fwd_progress, 1, "Test grace-period forward progress");
80 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
81 torture_param(int, fwd_progress_holdoff, 60,
82 "Time between forward-progress tests (s)");
83 torture_param(bool, fwd_progress_need_resched, 1,
84 "Hide cond_resched() behind need_resched()");
85 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
86 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
87 torture_param(bool, gp_normal, false,
88 "Use normal (non-expedited) GP wait primitives");
89 torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
90 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
91 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
92 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
93 torture_param(int, n_barrier_cbs, 0,
94 "# of callbacks/kthreads for barrier testing");
95 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
96 torture_param(int, nreaders, -1, "Number of RCU reader threads");
97 torture_param(int, object_debug, 0,
98 "Enable debug-object double call_rcu() testing");
99 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
100 torture_param(int, onoff_interval, 0,
101 "Time between CPU hotplugs (jiffies), 0=disable");
102 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
103 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
104 torture_param(int, read_exit_delay, 13,
105 "Delay between read-then-exit episodes (s)");
106 torture_param(int, read_exit_burst, 16,
107 "# of read-then-exit bursts per episode, zero to disable");
108 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
109 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
110 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
111 torture_param(int, stall_cpu_holdoff, 10,
112 "Time to wait before starting stall (s).");
113 torture_param(bool, stall_no_softlockup, false,
114 "Avoid softlockup warning during cpu stall.");
115 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
116 torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
117 torture_param(int, stall_gp_kthread, 0,
118 "Grace-period kthread stall duration (s).");
119 torture_param(int, stat_interval, 60,
120 "Number of seconds between stats printk()s");
121 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
122 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
123 torture_param(int, test_boost_duration, 4,
124 "Duration of each boost test, seconds.");
125 torture_param(int, test_boost_interval, 7,
126 "Interval between boost tests, seconds.");
127 torture_param(bool, test_no_idle_hz, true,
128 "Test support for tickless idle CPUs");
129 torture_param(int, verbose, 1,
130 "Enable verbose debugging printk()s");
132 static char *torture_type = "rcu";
133 module_param(torture_type, charp, 0444);
134 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
136 static int nrealnocbers;
137 static int nrealreaders;
138 static struct task_struct *writer_task;
139 static struct task_struct **fakewriter_tasks;
140 static struct task_struct **reader_tasks;
141 static struct task_struct **nocb_tasks;
142 static struct task_struct *stats_task;
143 static struct task_struct *fqs_task;
144 static struct task_struct *boost_tasks[NR_CPUS];
145 static struct task_struct *stall_task;
146 static struct task_struct *fwd_prog_task;
147 static struct task_struct **barrier_cbs_tasks;
148 static struct task_struct *barrier_task;
149 static struct task_struct *read_exit_task;
151 #define RCU_TORTURE_PIPE_LEN 10
153 // Mailbox-like structure to check RCU global memory ordering.
154 struct rcu_torture_reader_check {
155 unsigned long rtc_myloops;
157 unsigned long rtc_chkloops;
159 struct rcu_torture_reader_check *rtc_assigner;
160 } ____cacheline_internodealigned_in_smp;
162 // Update-side data structure used to check RCU readers.
164 struct rcu_head rtort_rcu;
165 int rtort_pipe_count;
166 struct list_head rtort_free;
168 struct rcu_torture_reader_check *rtort_chkp;
171 static LIST_HEAD(rcu_torture_freelist);
172 static struct rcu_torture __rcu *rcu_torture_current;
173 static unsigned long rcu_torture_current_version;
174 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
175 static DEFINE_SPINLOCK(rcu_torture_lock);
176 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
177 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
178 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
179 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
180 static atomic_t n_rcu_torture_alloc;
181 static atomic_t n_rcu_torture_alloc_fail;
182 static atomic_t n_rcu_torture_free;
183 static atomic_t n_rcu_torture_mberror;
184 static atomic_t n_rcu_torture_mbchk_fail;
185 static atomic_t n_rcu_torture_mbchk_tries;
186 static atomic_t n_rcu_torture_error;
187 static long n_rcu_torture_barrier_error;
188 static long n_rcu_torture_boost_ktrerror;
189 static long n_rcu_torture_boost_rterror;
190 static long n_rcu_torture_boost_failure;
191 static long n_rcu_torture_boosts;
192 static atomic_long_t n_rcu_torture_timers;
193 static long n_barrier_attempts;
194 static long n_barrier_successes; /* did rcu_barrier test succeed? */
195 static unsigned long n_read_exits;
196 static struct list_head rcu_torture_removed;
197 static unsigned long shutdown_jiffies;
198 static unsigned long start_gp_seq;
199 static atomic_long_t n_nocb_offload;
200 static atomic_long_t n_nocb_deoffload;
202 static int rcu_torture_writer_state;
203 #define RTWS_FIXED_DELAY 0
205 #define RTWS_REPLACE 2
206 #define RTWS_DEF_FREE 3
207 #define RTWS_EXP_SYNC 4
208 #define RTWS_COND_GET 5
209 #define RTWS_COND_SYNC 6
210 #define RTWS_POLL_GET 7
211 #define RTWS_POLL_WAIT 8
213 #define RTWS_STUTTER 10
214 #define RTWS_STOPPING 11
215 static const char * const rcu_torture_writer_state_names[] = {
230 /* Record reader segment types and duration for first failing read. */
233 unsigned long rt_delay_jiffies;
234 unsigned long rt_delay_ms;
235 unsigned long rt_delay_us;
238 static int err_segs_recorded;
239 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
240 static int rt_read_nsegs;
242 static const char *rcu_torture_writer_state_getname(void)
244 unsigned int i = READ_ONCE(rcu_torture_writer_state);
246 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
248 return rcu_torture_writer_state_names[i];
251 #ifdef CONFIG_RCU_TRACE
252 static u64 notrace rcu_trace_clock_local(void)
254 u64 ts = trace_clock_local();
256 (void)do_div(ts, NSEC_PER_USEC);
259 #else /* #ifdef CONFIG_RCU_TRACE */
260 static u64 notrace rcu_trace_clock_local(void)
264 #endif /* #else #ifdef CONFIG_RCU_TRACE */
267 * Stop aggressive CPU-hog tests a bit before the end of the test in order
268 * to avoid interfering with test shutdown.
270 static bool shutdown_time_arrived(void)
272 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
275 static unsigned long boost_starttime; /* jiffies of next boost test start. */
276 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
277 /* and boost task create/destroy. */
278 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
279 static bool barrier_phase; /* Test phase. */
280 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
281 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
282 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
284 static bool rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
287 * Allocate an element from the rcu_tortures pool.
289 static struct rcu_torture *
290 rcu_torture_alloc(void)
294 spin_lock_bh(&rcu_torture_lock);
295 if (list_empty(&rcu_torture_freelist)) {
296 atomic_inc(&n_rcu_torture_alloc_fail);
297 spin_unlock_bh(&rcu_torture_lock);
300 atomic_inc(&n_rcu_torture_alloc);
301 p = rcu_torture_freelist.next;
303 spin_unlock_bh(&rcu_torture_lock);
304 return container_of(p, struct rcu_torture, rtort_free);
308 * Free an element to the rcu_tortures pool.
311 rcu_torture_free(struct rcu_torture *p)
313 atomic_inc(&n_rcu_torture_free);
314 spin_lock_bh(&rcu_torture_lock);
315 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
316 spin_unlock_bh(&rcu_torture_lock);
320 * Operations vector for selecting different types of tests.
323 struct rcu_torture_ops {
326 void (*cleanup)(void);
327 int (*readlock)(void);
328 void (*read_delay)(struct torture_random_state *rrsp,
329 struct rt_read_seg *rtrsp);
330 void (*readunlock)(int idx);
331 int (*readlock_held)(void);
332 unsigned long (*get_gp_seq)(void);
333 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
334 void (*deferred_free)(struct rcu_torture *p);
336 void (*exp_sync)(void);
337 unsigned long (*get_gp_state)(void);
338 unsigned long (*start_gp_poll)(void);
339 bool (*poll_gp_state)(unsigned long oldstate);
340 void (*cond_sync)(unsigned long oldstate);
341 call_rcu_func_t call;
342 void (*cb_barrier)(void);
345 void (*gp_kthread_dbg)(void);
346 bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
347 int (*stall_dur)(void);
355 static struct rcu_torture_ops *cur_ops;
358 * Definitions for rcu torture testing.
361 static int torture_readlock_not_held(void)
363 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
366 static int rcu_torture_read_lock(void) __acquires(RCU)
373 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
375 unsigned long started;
376 unsigned long completed;
377 const unsigned long shortdelay_us = 200;
378 unsigned long longdelay_ms = 300;
379 unsigned long long ts;
381 /* We want a short delay sometimes to make a reader delay the grace
382 * period, and we want a long delay occasionally to trigger
383 * force_quiescent_state. */
385 if (!READ_ONCE(rcu_fwd_cb_nodelay) &&
386 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
387 started = cur_ops->get_gp_seq();
388 ts = rcu_trace_clock_local();
389 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
390 longdelay_ms = 5; /* Avoid triggering BH limits. */
391 mdelay(longdelay_ms);
392 rtrsp->rt_delay_ms = longdelay_ms;
393 completed = cur_ops->get_gp_seq();
394 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
397 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
398 udelay(shortdelay_us);
399 rtrsp->rt_delay_us = shortdelay_us;
401 if (!preempt_count() &&
402 !(torture_random(rrsp) % (nrealreaders * 500))) {
403 torture_preempt_schedule(); /* QS only if preemptible. */
404 rtrsp->rt_preempted = true;
408 static void rcu_torture_read_unlock(int idx) __releases(RCU)
414 * Update callback in the pipe. This should be invoked after a grace period.
417 rcu_torture_pipe_update_one(struct rcu_torture *rp)
420 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
423 WRITE_ONCE(rp->rtort_chkp, NULL);
424 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
426 i = READ_ONCE(rp->rtort_pipe_count);
427 if (i > RCU_TORTURE_PIPE_LEN)
428 i = RCU_TORTURE_PIPE_LEN;
429 atomic_inc(&rcu_torture_wcount[i]);
430 WRITE_ONCE(rp->rtort_pipe_count, i + 1);
431 if (rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
432 rp->rtort_mbtest = 0;
439 * Update all callbacks in the pipe. Suitable for synchronous grace-period
443 rcu_torture_pipe_update(struct rcu_torture *old_rp)
445 struct rcu_torture *rp;
446 struct rcu_torture *rp1;
449 list_add(&old_rp->rtort_free, &rcu_torture_removed);
450 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
451 if (rcu_torture_pipe_update_one(rp)) {
452 list_del(&rp->rtort_free);
453 rcu_torture_free(rp);
459 rcu_torture_cb(struct rcu_head *p)
461 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
463 if (torture_must_stop_irq()) {
464 /* Test is ending, just drop callbacks on the floor. */
465 /* The next initialization will pick up the pieces. */
468 if (rcu_torture_pipe_update_one(rp))
469 rcu_torture_free(rp);
471 cur_ops->deferred_free(rp);
474 static unsigned long rcu_no_completed(void)
479 static void rcu_torture_deferred_free(struct rcu_torture *p)
481 call_rcu(&p->rtort_rcu, rcu_torture_cb);
484 static void rcu_sync_torture_init(void)
486 INIT_LIST_HEAD(&rcu_torture_removed);
489 static struct rcu_torture_ops rcu_ops = {
491 .init = rcu_sync_torture_init,
492 .readlock = rcu_torture_read_lock,
493 .read_delay = rcu_read_delay,
494 .readunlock = rcu_torture_read_unlock,
495 .readlock_held = torture_readlock_not_held,
496 .get_gp_seq = rcu_get_gp_seq,
497 .gp_diff = rcu_seq_diff,
498 .deferred_free = rcu_torture_deferred_free,
499 .sync = synchronize_rcu,
500 .exp_sync = synchronize_rcu_expedited,
501 .get_gp_state = get_state_synchronize_rcu,
502 .start_gp_poll = start_poll_synchronize_rcu,
503 .poll_gp_state = poll_state_synchronize_rcu,
504 .cond_sync = cond_synchronize_rcu,
506 .cb_barrier = rcu_barrier,
507 .fqs = rcu_force_quiescent_state,
509 .gp_kthread_dbg = show_rcu_gp_kthreads,
510 .check_boost_failed = rcu_check_boost_fail,
511 .stall_dur = rcu_jiffies_till_stall_check,
513 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
514 .extendables = RCUTORTURE_MAX_EXTEND,
519 * Don't even think about trying any of these in real life!!!
520 * The names includes "busted", and they really means it!
521 * The only purpose of these functions is to provide a buggy RCU
522 * implementation to make sure that rcutorture correctly emits
523 * buggy-RCU error messages.
525 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
527 /* This is a deliberate bug for testing purposes only! */
528 rcu_torture_cb(&p->rtort_rcu);
531 static void synchronize_rcu_busted(void)
533 /* This is a deliberate bug for testing purposes only! */
537 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
539 /* This is a deliberate bug for testing purposes only! */
543 static struct rcu_torture_ops rcu_busted_ops = {
544 .ttype = INVALID_RCU_FLAVOR,
545 .init = rcu_sync_torture_init,
546 .readlock = rcu_torture_read_lock,
547 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
548 .readunlock = rcu_torture_read_unlock,
549 .readlock_held = torture_readlock_not_held,
550 .get_gp_seq = rcu_no_completed,
551 .deferred_free = rcu_busted_torture_deferred_free,
552 .sync = synchronize_rcu_busted,
553 .exp_sync = synchronize_rcu_busted,
554 .call = call_rcu_busted,
563 * Definitions for srcu torture testing.
566 DEFINE_STATIC_SRCU(srcu_ctl);
567 static struct srcu_struct srcu_ctld;
568 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
570 static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
572 return srcu_read_lock(srcu_ctlp);
576 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
579 const long uspertick = 1000000 / HZ;
580 const long longdelay = 10;
582 /* We want there to be long-running readers, but not all the time. */
584 delay = torture_random(rrsp) %
585 (nrealreaders * 2 * longdelay * uspertick);
586 if (!delay && in_task()) {
587 schedule_timeout_interruptible(longdelay);
588 rtrsp->rt_delay_jiffies = longdelay;
590 rcu_read_delay(rrsp, rtrsp);
594 static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
596 srcu_read_unlock(srcu_ctlp, idx);
599 static int torture_srcu_read_lock_held(void)
601 return srcu_read_lock_held(srcu_ctlp);
604 static unsigned long srcu_torture_completed(void)
606 return srcu_batches_completed(srcu_ctlp);
609 static void srcu_torture_deferred_free(struct rcu_torture *rp)
611 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
614 static void srcu_torture_synchronize(void)
616 synchronize_srcu(srcu_ctlp);
619 static unsigned long srcu_torture_get_gp_state(void)
621 return get_state_synchronize_srcu(srcu_ctlp);
624 static unsigned long srcu_torture_start_gp_poll(void)
626 return start_poll_synchronize_srcu(srcu_ctlp);
629 static bool srcu_torture_poll_gp_state(unsigned long oldstate)
631 return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
634 static void srcu_torture_call(struct rcu_head *head,
637 call_srcu(srcu_ctlp, head, func);
640 static void srcu_torture_barrier(void)
642 srcu_barrier(srcu_ctlp);
645 static void srcu_torture_stats(void)
647 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
650 static void srcu_torture_synchronize_expedited(void)
652 synchronize_srcu_expedited(srcu_ctlp);
655 static struct rcu_torture_ops srcu_ops = {
656 .ttype = SRCU_FLAVOR,
657 .init = rcu_sync_torture_init,
658 .readlock = srcu_torture_read_lock,
659 .read_delay = srcu_read_delay,
660 .readunlock = srcu_torture_read_unlock,
661 .readlock_held = torture_srcu_read_lock_held,
662 .get_gp_seq = srcu_torture_completed,
663 .deferred_free = srcu_torture_deferred_free,
664 .sync = srcu_torture_synchronize,
665 .exp_sync = srcu_torture_synchronize_expedited,
666 .get_gp_state = srcu_torture_get_gp_state,
667 .start_gp_poll = srcu_torture_start_gp_poll,
668 .poll_gp_state = srcu_torture_poll_gp_state,
669 .call = srcu_torture_call,
670 .cb_barrier = srcu_torture_barrier,
671 .stats = srcu_torture_stats,
676 static void srcu_torture_init(void)
678 rcu_sync_torture_init();
679 WARN_ON(init_srcu_struct(&srcu_ctld));
680 srcu_ctlp = &srcu_ctld;
683 static void srcu_torture_cleanup(void)
685 cleanup_srcu_struct(&srcu_ctld);
686 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
689 /* As above, but dynamically allocated. */
690 static struct rcu_torture_ops srcud_ops = {
691 .ttype = SRCU_FLAVOR,
692 .init = srcu_torture_init,
693 .cleanup = srcu_torture_cleanup,
694 .readlock = srcu_torture_read_lock,
695 .read_delay = srcu_read_delay,
696 .readunlock = srcu_torture_read_unlock,
697 .readlock_held = torture_srcu_read_lock_held,
698 .get_gp_seq = srcu_torture_completed,
699 .deferred_free = srcu_torture_deferred_free,
700 .sync = srcu_torture_synchronize,
701 .exp_sync = srcu_torture_synchronize_expedited,
702 .call = srcu_torture_call,
703 .cb_barrier = srcu_torture_barrier,
704 .stats = srcu_torture_stats,
709 /* As above, but broken due to inappropriate reader extension. */
710 static struct rcu_torture_ops busted_srcud_ops = {
711 .ttype = SRCU_FLAVOR,
712 .init = srcu_torture_init,
713 .cleanup = srcu_torture_cleanup,
714 .readlock = srcu_torture_read_lock,
715 .read_delay = rcu_read_delay,
716 .readunlock = srcu_torture_read_unlock,
717 .readlock_held = torture_srcu_read_lock_held,
718 .get_gp_seq = srcu_torture_completed,
719 .deferred_free = srcu_torture_deferred_free,
720 .sync = srcu_torture_synchronize,
721 .exp_sync = srcu_torture_synchronize_expedited,
722 .call = srcu_torture_call,
723 .cb_barrier = srcu_torture_barrier,
724 .stats = srcu_torture_stats,
726 .extendables = RCUTORTURE_MAX_EXTEND,
727 .name = "busted_srcud"
731 * Definitions for RCU-tasks torture testing.
734 static int tasks_torture_read_lock(void)
739 static void tasks_torture_read_unlock(int idx)
743 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
745 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
748 static void synchronize_rcu_mult_test(void)
750 synchronize_rcu_mult(call_rcu_tasks, call_rcu);
753 static struct rcu_torture_ops tasks_ops = {
754 .ttype = RCU_TASKS_FLAVOR,
755 .init = rcu_sync_torture_init,
756 .readlock = tasks_torture_read_lock,
757 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
758 .readunlock = tasks_torture_read_unlock,
759 .get_gp_seq = rcu_no_completed,
760 .deferred_free = rcu_tasks_torture_deferred_free,
761 .sync = synchronize_rcu_tasks,
762 .exp_sync = synchronize_rcu_mult_test,
763 .call = call_rcu_tasks,
764 .cb_barrier = rcu_barrier_tasks,
765 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread,
774 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
775 * This implementation does not necessarily work well with CPU hotplug.
778 static void synchronize_rcu_trivial(void)
782 for_each_online_cpu(cpu) {
783 rcutorture_sched_setaffinity(current->pid, cpumask_of(cpu));
784 WARN_ON_ONCE(raw_smp_processor_id() != cpu);
788 static int rcu_torture_read_lock_trivial(void) __acquires(RCU)
794 static void rcu_torture_read_unlock_trivial(int idx) __releases(RCU)
799 static struct rcu_torture_ops trivial_ops = {
800 .ttype = RCU_TRIVIAL_FLAVOR,
801 .init = rcu_sync_torture_init,
802 .readlock = rcu_torture_read_lock_trivial,
803 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
804 .readunlock = rcu_torture_read_unlock_trivial,
805 .readlock_held = torture_readlock_not_held,
806 .get_gp_seq = rcu_no_completed,
807 .sync = synchronize_rcu_trivial,
808 .exp_sync = synchronize_rcu_trivial,
816 * Definitions for rude RCU-tasks torture testing.
819 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
821 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
824 static struct rcu_torture_ops tasks_rude_ops = {
825 .ttype = RCU_TASKS_RUDE_FLAVOR,
826 .init = rcu_sync_torture_init,
827 .readlock = rcu_torture_read_lock_trivial,
828 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
829 .readunlock = rcu_torture_read_unlock_trivial,
830 .get_gp_seq = rcu_no_completed,
831 .deferred_free = rcu_tasks_rude_torture_deferred_free,
832 .sync = synchronize_rcu_tasks_rude,
833 .exp_sync = synchronize_rcu_tasks_rude,
834 .call = call_rcu_tasks_rude,
835 .cb_barrier = rcu_barrier_tasks_rude,
836 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
844 * Definitions for tracing RCU-tasks torture testing.
847 static int tasks_tracing_torture_read_lock(void)
849 rcu_read_lock_trace();
853 static void tasks_tracing_torture_read_unlock(int idx)
855 rcu_read_unlock_trace();
858 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
860 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
863 static struct rcu_torture_ops tasks_tracing_ops = {
864 .ttype = RCU_TASKS_TRACING_FLAVOR,
865 .init = rcu_sync_torture_init,
866 .readlock = tasks_tracing_torture_read_lock,
867 .read_delay = srcu_read_delay, /* just reuse srcu's version. */
868 .readunlock = tasks_tracing_torture_read_unlock,
869 .readlock_held = rcu_read_lock_trace_held,
870 .get_gp_seq = rcu_no_completed,
871 .deferred_free = rcu_tasks_tracing_torture_deferred_free,
872 .sync = synchronize_rcu_tasks_trace,
873 .exp_sync = synchronize_rcu_tasks_trace,
874 .call = call_rcu_tasks_trace,
875 .cb_barrier = rcu_barrier_tasks_trace,
876 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
881 .name = "tasks-tracing"
884 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
886 if (!cur_ops->gp_diff)
888 return cur_ops->gp_diff(new, old);
892 * RCU torture priority-boost testing. Runs one real-time thread per
893 * CPU for moderate bursts, repeatedly starting grace periods and waiting
894 * for them to complete. If a given grace period takes too long, we assume
895 * that priority inversion has occurred.
898 static int old_rt_runtime = -1;
900 static void rcu_torture_disable_rt_throttle(void)
903 * Disable RT throttling so that rcutorture's boost threads don't get
904 * throttled. Only possible if rcutorture is built-in otherwise the
905 * user should manually do this by setting the sched_rt_period_us and
906 * sched_rt_runtime sysctls.
908 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
911 old_rt_runtime = sysctl_sched_rt_runtime;
912 sysctl_sched_rt_runtime = -1;
915 static void rcu_torture_enable_rt_throttle(void)
917 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
920 sysctl_sched_rt_runtime = old_rt_runtime;
924 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
928 unsigned long end = jiffies;
931 static unsigned long last_persist;
933 unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
935 if (end - *start > mininterval) {
936 // Recheck after checking time to avoid false positives.
937 smp_mb(); // Time check before grace-period check.
938 if (cur_ops->poll_gp_state(gp_state))
939 return false; // passed, though perhaps just barely
940 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
941 // At most one persisted message per boost test.
943 lp = READ_ONCE(last_persist);
944 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
945 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
946 return false; // passed on a technicality
948 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
949 n_rcu_torture_boost_failure++;
950 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
951 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
952 current->rt_priority, gp_state, end - *start);
953 cur_ops->gp_kthread_dbg();
954 // Recheck after print to flag grace period ending during splat.
955 gp_done = cur_ops->poll_gp_state(gp_state);
956 pr_info("Boost inversion: GP %lu %s.\n", gp_state,
957 gp_done ? "ended already" : "still pending");
961 return true; // failed
962 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
966 return false; // passed
969 static int rcu_torture_boost(void *arg)
971 unsigned long endtime;
972 unsigned long gp_state;
973 unsigned long gp_state_time;
974 unsigned long oldstarttime;
976 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
978 /* Set real-time priority. */
979 sched_set_fifo_low(current);
981 /* Each pass through the following loop does one boost-test cycle. */
983 bool failed = false; // Test failed already in this test interval
984 bool gp_initiated = false;
986 if (kthread_should_stop())
989 /* Wait for the next test interval. */
990 oldstarttime = boost_starttime;
991 while (time_before(jiffies, oldstarttime)) {
992 schedule_timeout_interruptible(oldstarttime - jiffies);
993 if (stutter_wait("rcu_torture_boost"))
994 sched_set_fifo_low(current);
995 if (torture_must_stop())
999 // Do one boost-test interval.
1000 endtime = oldstarttime + test_boost_duration * HZ;
1001 while (time_before(jiffies, endtime)) {
1002 // Has current GP gone too long?
1003 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1004 failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1005 // If we don't have a grace period in flight, start one.
1006 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1007 gp_state = cur_ops->start_gp_poll();
1008 gp_initiated = true;
1009 gp_state_time = jiffies;
1011 if (stutter_wait("rcu_torture_boost")) {
1012 sched_set_fifo_low(current);
1013 // If the grace period already ended,
1014 // we don't know when that happened, so
1016 if (cur_ops->poll_gp_state(gp_state))
1017 gp_initiated = false;
1019 if (torture_must_stop())
1023 // In case the grace period extended beyond the end of the loop.
1024 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1025 rcu_torture_boost_failed(gp_state, &gp_state_time);
1028 * Set the start time of the next test interval.
1029 * Yes, this is vulnerable to long delays, but such
1030 * delays simply cause a false negative for the next
1031 * interval. Besides, we are running at RT priority,
1032 * so delays should be relatively rare.
1034 while (oldstarttime == boost_starttime && !kthread_should_stop()) {
1035 if (mutex_trylock(&boost_mutex)) {
1036 if (oldstarttime == boost_starttime) {
1037 boost_starttime = jiffies + test_boost_interval * HZ;
1038 n_rcu_torture_boosts++;
1040 mutex_unlock(&boost_mutex);
1043 schedule_timeout_uninterruptible(1);
1046 /* Go do the stutter. */
1047 checkwait: if (stutter_wait("rcu_torture_boost"))
1048 sched_set_fifo_low(current);
1049 } while (!torture_must_stop());
1051 /* Clean up and exit. */
1052 while (!kthread_should_stop()) {
1053 torture_shutdown_absorb("rcu_torture_boost");
1054 schedule_timeout_uninterruptible(1);
1056 torture_kthread_stopping("rcu_torture_boost");
1061 * RCU torture force-quiescent-state kthread. Repeatedly induces
1062 * bursts of calls to force_quiescent_state(), increasing the probability
1063 * of occurrence of some important types of race conditions.
1066 rcu_torture_fqs(void *arg)
1068 unsigned long fqs_resume_time;
1069 int fqs_burst_remaining;
1070 int oldnice = task_nice(current);
1072 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1074 fqs_resume_time = jiffies + fqs_stutter * HZ;
1075 while (time_before(jiffies, fqs_resume_time) &&
1076 !kthread_should_stop()) {
1077 schedule_timeout_interruptible(1);
1079 fqs_burst_remaining = fqs_duration;
1080 while (fqs_burst_remaining > 0 &&
1081 !kthread_should_stop()) {
1083 udelay(fqs_holdoff);
1084 fqs_burst_remaining -= fqs_holdoff;
1086 if (stutter_wait("rcu_torture_fqs"))
1087 sched_set_normal(current, oldnice);
1088 } while (!torture_must_stop());
1089 torture_kthread_stopping("rcu_torture_fqs");
1093 // Used by writers to randomly choose from the available grace-period
1094 // primitives. The only purpose of the initialization is to size the array.
1095 static int synctype[] = { RTWS_DEF_FREE, RTWS_EXP_SYNC, RTWS_COND_GET, RTWS_POLL_GET, RTWS_SYNC };
1096 static int nsynctypes;
1099 * Determine which grace-period primitives are available.
1101 static void rcu_torture_write_types(void)
1103 bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
1104 bool gp_poll1 = gp_poll, gp_sync1 = gp_sync;
1106 /* Initialize synctype[] array. If none set, take default. */
1107 if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_poll1 && !gp_sync1)
1108 gp_cond1 = gp_exp1 = gp_normal1 = gp_poll1 = gp_sync1 = true;
1109 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1110 synctype[nsynctypes++] = RTWS_COND_GET;
1111 pr_info("%s: Testing conditional GPs.\n", __func__);
1112 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1113 pr_alert("%s: gp_cond without primitives.\n", __func__);
1115 if (gp_exp1 && cur_ops->exp_sync) {
1116 synctype[nsynctypes++] = RTWS_EXP_SYNC;
1117 pr_info("%s: Testing expedited GPs.\n", __func__);
1118 } else if (gp_exp && !cur_ops->exp_sync) {
1119 pr_alert("%s: gp_exp without primitives.\n", __func__);
1121 if (gp_normal1 && cur_ops->deferred_free) {
1122 synctype[nsynctypes++] = RTWS_DEF_FREE;
1123 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1124 } else if (gp_normal && !cur_ops->deferred_free) {
1125 pr_alert("%s: gp_normal without primitives.\n", __func__);
1127 if (gp_poll1 && cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1128 synctype[nsynctypes++] = RTWS_POLL_GET;
1129 pr_info("%s: Testing polling GPs.\n", __func__);
1130 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1131 pr_alert("%s: gp_poll without primitives.\n", __func__);
1133 if (gp_sync1 && cur_ops->sync) {
1134 synctype[nsynctypes++] = RTWS_SYNC;
1135 pr_info("%s: Testing normal GPs.\n", __func__);
1136 } else if (gp_sync && !cur_ops->sync) {
1137 pr_alert("%s: gp_sync without primitives.\n", __func__);
1142 * RCU torture writer kthread. Repeatedly substitutes a new structure
1143 * for that pointed to by rcu_torture_current, freeing the old structure
1144 * after a series of grace periods (the "pipeline").
1147 rcu_torture_writer(void *arg)
1150 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1151 unsigned long cookie;
1153 unsigned long gp_snap;
1156 int oldnice = task_nice(current);
1157 struct rcu_torture *rp;
1158 struct rcu_torture *old_rp;
1159 static DEFINE_TORTURE_RANDOM(rand);
1160 bool stutter_waited;
1162 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1164 pr_alert("%s" TORTURE_FLAG
1165 " GP expediting controlled from boot/sysfs for %s.\n",
1166 torture_type, cur_ops->name);
1167 if (WARN_ONCE(nsynctypes == 0,
1168 "rcu_torture_writer: No update-side primitives.\n")) {
1170 * No updates primitives, so don't try updating.
1171 * The resulting test won't be testing much, hence the
1172 * above WARN_ONCE().
1174 rcu_torture_writer_state = RTWS_STOPPING;
1175 torture_kthread_stopping("rcu_torture_writer");
1179 rcu_torture_writer_state = RTWS_FIXED_DELAY;
1180 torture_hrtimeout_us(500, 1000, &rand);
1181 rp = rcu_torture_alloc();
1184 rp->rtort_pipe_count = 0;
1185 rcu_torture_writer_state = RTWS_DELAY;
1186 udelay(torture_random(&rand) & 0x3ff);
1187 rcu_torture_writer_state = RTWS_REPLACE;
1188 old_rp = rcu_dereference_check(rcu_torture_current,
1189 current == writer_task);
1190 rp->rtort_mbtest = 1;
1191 rcu_assign_pointer(rcu_torture_current, rp);
1192 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1194 i = old_rp->rtort_pipe_count;
1195 if (i > RCU_TORTURE_PIPE_LEN)
1196 i = RCU_TORTURE_PIPE_LEN;
1197 atomic_inc(&rcu_torture_wcount[i]);
1198 WRITE_ONCE(old_rp->rtort_pipe_count,
1199 old_rp->rtort_pipe_count + 1);
1200 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1201 idx = cur_ops->readlock();
1202 cookie = cur_ops->get_gp_state();
1203 WARN_ONCE(rcu_torture_writer_state != RTWS_DEF_FREE &&
1204 cur_ops->poll_gp_state(cookie),
1205 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1207 rcu_torture_writer_state_getname(),
1208 rcu_torture_writer_state,
1209 cookie, cur_ops->get_gp_state());
1210 cur_ops->readunlock(idx);
1212 switch (synctype[torture_random(&rand) % nsynctypes]) {
1214 rcu_torture_writer_state = RTWS_DEF_FREE;
1215 cur_ops->deferred_free(old_rp);
1218 rcu_torture_writer_state = RTWS_EXP_SYNC;
1219 cur_ops->exp_sync();
1220 rcu_torture_pipe_update(old_rp);
1223 rcu_torture_writer_state = RTWS_COND_GET;
1224 gp_snap = cur_ops->get_gp_state();
1225 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1226 rcu_torture_writer_state = RTWS_COND_SYNC;
1227 cur_ops->cond_sync(gp_snap);
1228 rcu_torture_pipe_update(old_rp);
1231 rcu_torture_writer_state = RTWS_POLL_GET;
1232 gp_snap = cur_ops->start_gp_poll();
1233 rcu_torture_writer_state = RTWS_POLL_WAIT;
1234 while (!cur_ops->poll_gp_state(gp_snap))
1235 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1237 rcu_torture_pipe_update(old_rp);
1240 rcu_torture_writer_state = RTWS_SYNC;
1242 rcu_torture_pipe_update(old_rp);
1249 WRITE_ONCE(rcu_torture_current_version,
1250 rcu_torture_current_version + 1);
1251 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1253 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1254 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1255 if (expediting >= 0)
1258 rcu_unexpedite_gp();
1259 if (++expediting > 3)
1260 expediting = -expediting;
1261 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1262 can_expedite = !rcu_gp_is_expedited() &&
1263 !rcu_gp_is_normal();
1265 rcu_torture_writer_state = RTWS_STUTTER;
1266 boot_ended = rcu_inkernel_boot_has_ended();
1267 stutter_waited = stutter_wait("rcu_torture_writer");
1268 if (stutter_waited &&
1269 !READ_ONCE(rcu_fwd_cb_nodelay) &&
1270 !cur_ops->slow_gps &&
1271 !torture_must_stop() &&
1273 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1274 if (list_empty(&rcu_tortures[i].rtort_free) &&
1275 rcu_access_pointer(rcu_torture_current) !=
1277 rcu_ftrace_dump(DUMP_ALL);
1278 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1281 sched_set_normal(current, oldnice);
1282 } while (!torture_must_stop());
1283 rcu_torture_current = NULL; // Let stats task know that we are done.
1284 /* Reset expediting back to unexpedited. */
1286 expediting = -expediting;
1287 while (can_expedite && expediting++ < 0)
1288 rcu_unexpedite_gp();
1289 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1291 pr_alert("%s" TORTURE_FLAG
1292 " Dynamic grace-period expediting was disabled.\n",
1294 rcu_torture_writer_state = RTWS_STOPPING;
1295 torture_kthread_stopping("rcu_torture_writer");
1300 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1301 * delay between calls.
1304 rcu_torture_fakewriter(void *arg)
1306 unsigned long gp_snap;
1307 DEFINE_TORTURE_RANDOM(rand);
1309 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1310 set_user_nice(current, MAX_NICE);
1313 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1314 if (cur_ops->cb_barrier != NULL &&
1315 torture_random(&rand) % (nfakewriters * 8) == 0) {
1316 cur_ops->cb_barrier();
1318 switch (synctype[torture_random(&rand) % nsynctypes]) {
1322 cur_ops->exp_sync();
1325 gp_snap = cur_ops->get_gp_state();
1326 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1327 cur_ops->cond_sync(gp_snap);
1330 gp_snap = cur_ops->start_gp_poll();
1331 while (!cur_ops->poll_gp_state(gp_snap)) {
1332 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1344 stutter_wait("rcu_torture_fakewriter");
1345 } while (!torture_must_stop());
1347 torture_kthread_stopping("rcu_torture_fakewriter");
1351 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1356 // Set up and carry out testing of RCU's global memory ordering
1357 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1358 struct torture_random_state *trsp)
1360 unsigned long loops;
1361 int noc = torture_num_online_cpus();
1364 struct rcu_torture_reader_check *rtrcp; // Me.
1365 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1366 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1367 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1370 return; // Don't try this from timer handlers.
1372 // Increment my counter.
1373 rtrcp = &rcu_torture_reader_mbchk[myid];
1374 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1376 // Attempt to assign someone else some checking work.
1377 rdrchked = torture_random(trsp) % nrealreaders;
1378 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1379 rdrchker = torture_random(trsp) % nrealreaders;
1380 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1381 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1382 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1383 !READ_ONCE(rtp->rtort_chkp) &&
1384 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1385 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1386 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1387 rtrcp->rtc_chkrdr = rdrchked;
1388 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1389 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1390 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1391 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1394 // If assigned some completed work, do it!
1395 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1396 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1397 return; // No work or work not yet ready.
1398 rdrchked = rtrcp_assigner->rtc_chkrdr;
1399 if (WARN_ON_ONCE(rdrchked < 0))
1401 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1402 loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1403 atomic_inc(&n_rcu_torture_mbchk_tries);
1404 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1405 atomic_inc(&n_rcu_torture_mbchk_fail);
1406 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1407 rtrcp_assigner->rtc_ready = 0;
1408 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1409 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1413 * Do one extension of an RCU read-side critical section using the
1414 * current reader state in readstate (set to zero for initial entry
1415 * to extended critical section), set the new state as specified by
1416 * newstate (set to zero for final exit from extended critical section),
1417 * and random-number-generator state in trsp. If this is neither the
1418 * beginning or end of the critical section and if there was actually a
1419 * change, do a ->read_delay().
1421 static void rcutorture_one_extend(int *readstate, int newstate,
1422 struct torture_random_state *trsp,
1423 struct rt_read_seg *rtrsp)
1425 unsigned long flags;
1427 int idxold = *readstate;
1428 int statesnew = ~*readstate & newstate;
1429 int statesold = *readstate & ~newstate;
1431 WARN_ON_ONCE(idxold < 0);
1432 WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
1433 rtrsp->rt_readstate = newstate;
1435 /* First, put new protection in place to avoid critical-section gap. */
1436 if (statesnew & RCUTORTURE_RDR_BH)
1438 if (statesnew & RCUTORTURE_RDR_RBH)
1440 if (statesnew & RCUTORTURE_RDR_IRQ)
1441 local_irq_disable();
1442 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1444 if (statesnew & RCUTORTURE_RDR_SCHED)
1445 rcu_read_lock_sched();
1446 if (statesnew & RCUTORTURE_RDR_RCU)
1447 idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
1450 * Next, remove old protection, in decreasing order of strength
1451 * to avoid unlock paths that aren't safe in the stronger
1452 * context. Namely: BH can not be enabled with disabled interrupts.
1453 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
1456 if (statesold & RCUTORTURE_RDR_IRQ)
1458 if (statesold & RCUTORTURE_RDR_PREEMPT)
1460 if (statesold & RCUTORTURE_RDR_SCHED)
1461 rcu_read_unlock_sched();
1462 if (statesold & RCUTORTURE_RDR_BH)
1464 if (statesold & RCUTORTURE_RDR_RBH)
1465 rcu_read_unlock_bh();
1466 if (statesold & RCUTORTURE_RDR_RCU) {
1467 bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
1470 raw_spin_lock_irqsave(¤t->pi_lock, flags);
1471 cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
1473 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
1476 /* Delay if neither beginning nor end and there was a change. */
1477 if ((statesnew || statesold) && *readstate && newstate)
1478 cur_ops->read_delay(trsp, rtrsp);
1480 /* Update the reader state. */
1482 idxnew = idxold & ~RCUTORTURE_RDR_MASK;
1483 WARN_ON_ONCE(idxnew < 0);
1484 WARN_ON_ONCE((idxnew >> RCUTORTURE_RDR_SHIFT) > 1);
1485 *readstate = idxnew | newstate;
1486 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) < 0);
1487 WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT) > 1);
1490 /* Return the biggest extendables mask given current RCU and boot parameters. */
1491 static int rcutorture_extend_mask_max(void)
1495 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1496 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1497 mask = mask | RCUTORTURE_RDR_RCU;
1501 /* Return a random protection state mask, but with at least one bit set. */
1503 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1505 int mask = rcutorture_extend_mask_max();
1506 unsigned long randmask1 = torture_random(trsp) >> 8;
1507 unsigned long randmask2 = randmask1 >> 3;
1508 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1509 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
1510 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1512 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
1513 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
1514 if (!(randmask1 & 0x7))
1515 mask = mask & randmask2;
1517 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1520 * Can't enable bh w/irq disabled.
1522 if (mask & RCUTORTURE_RDR_IRQ)
1523 mask |= oldmask & bhs;
1526 * Ideally these sequences would be detected in debug builds
1527 * (regardless of RT), but until then don't stop testing
1530 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1531 /* Can't modify BH in atomic context */
1532 if (oldmask & preempts_irq)
1534 if ((oldmask | mask) & preempts_irq)
1535 mask |= oldmask & bhs;
1538 return mask ?: RCUTORTURE_RDR_RCU;
1542 * Do a randomly selected number of extensions of an existing RCU read-side
1545 static struct rt_read_seg *
1546 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1547 struct rt_read_seg *rtrsp)
1551 int mask = rcutorture_extend_mask_max();
1553 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1554 if (!((mask - 1) & mask))
1555 return rtrsp; /* Current RCU reader not extendable. */
1556 /* Bias towards larger numbers of loops. */
1557 i = (torture_random(trsp) >> 3);
1558 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1559 for (j = 0; j < i; j++) {
1560 mask = rcutorture_extend_mask(*readstate, trsp);
1561 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1567 * Do one read-side critical section, returning false if there was
1568 * no data to read. Can be invoked both from process context and
1569 * from a timer handler.
1571 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
1573 unsigned long cookie;
1575 unsigned long started;
1576 unsigned long completed;
1578 struct rcu_torture *p;
1581 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1582 struct rt_read_seg *rtrsp = &rtseg[0];
1583 struct rt_read_seg *rtrsp1;
1584 unsigned long long ts;
1586 WARN_ON_ONCE(!rcu_is_watching());
1587 newstate = rcutorture_extend_mask(readstate, trsp);
1588 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1589 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1590 cookie = cur_ops->get_gp_state();
1591 started = cur_ops->get_gp_seq();
1592 ts = rcu_trace_clock_local();
1593 p = rcu_dereference_check(rcu_torture_current,
1594 !cur_ops->readlock_held || cur_ops->readlock_held());
1596 /* Wait for rcu_torture_writer to get underway */
1597 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1600 if (p->rtort_mbtest == 0)
1601 atomic_inc(&n_rcu_torture_mberror);
1602 rcu_torture_reader_do_mbchk(myid, p, trsp);
1603 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
1605 pipe_count = READ_ONCE(p->rtort_pipe_count);
1606 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1607 /* Should not happen, but... */
1608 pipe_count = RCU_TORTURE_PIPE_LEN;
1610 completed = cur_ops->get_gp_seq();
1611 if (pipe_count > 1) {
1612 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
1613 ts, started, completed);
1614 rcu_ftrace_dump(DUMP_ALL);
1616 __this_cpu_inc(rcu_torture_count[pipe_count]);
1617 completed = rcutorture_seq_diff(completed, started);
1618 if (completed > RCU_TORTURE_PIPE_LEN) {
1619 /* Should not happen, but... */
1620 completed = RCU_TORTURE_PIPE_LEN;
1622 __this_cpu_inc(rcu_torture_batch[completed]);
1624 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1625 WARN_ONCE(cur_ops->poll_gp_state(cookie),
1626 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
1628 rcu_torture_writer_state_getname(),
1629 rcu_torture_writer_state,
1630 cookie, cur_ops->get_gp_state());
1631 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1632 WARN_ON_ONCE(readstate & RCUTORTURE_RDR_MASK);
1633 // This next splat is expected behavior if leakpointer, especially
1634 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
1635 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
1637 /* If error or close call, record the sequence of reader protections. */
1638 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
1640 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
1641 err_segs[i++] = *rtrsp1;
1648 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
1651 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
1652 * incrementing the corresponding element of the pipeline array. The
1653 * counter in the element should never be greater than 1, otherwise, the
1654 * RCU implementation is broken.
1656 static void rcu_torture_timer(struct timer_list *unused)
1658 atomic_long_inc(&n_rcu_torture_timers);
1659 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
1661 /* Test call_rcu() invocation from interrupt handler. */
1662 if (cur_ops->call) {
1663 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
1666 cur_ops->call(rhp, rcu_torture_timer_cb);
1671 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
1672 * incrementing the corresponding element of the pipeline array. The
1673 * counter in the element should never be greater than 1, otherwise, the
1674 * RCU implementation is broken.
1677 rcu_torture_reader(void *arg)
1679 unsigned long lastsleep = jiffies;
1680 long myid = (long)arg;
1681 int mynumonline = myid;
1682 DEFINE_TORTURE_RANDOM(rand);
1683 struct timer_list t;
1685 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
1686 set_user_nice(current, MAX_NICE);
1687 if (irqreader && cur_ops->irq_capable)
1688 timer_setup_on_stack(&t, rcu_torture_timer, 0);
1689 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
1691 if (irqreader && cur_ops->irq_capable) {
1692 if (!timer_pending(&t))
1693 mod_timer(&t, jiffies + 1);
1695 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
1696 schedule_timeout_interruptible(HZ);
1697 if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
1698 torture_hrtimeout_us(500, 1000, &rand);
1699 lastsleep = jiffies + 10;
1701 while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
1702 schedule_timeout_interruptible(HZ / 5);
1703 stutter_wait("rcu_torture_reader");
1704 } while (!torture_must_stop());
1705 if (irqreader && cur_ops->irq_capable) {
1707 destroy_timer_on_stack(&t);
1709 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
1710 torture_kthread_stopping("rcu_torture_reader");
1715 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to
1716 * increase race probabilities and fuzzes the interval between toggling.
1718 static int rcu_nocb_toggle(void *arg)
1722 int oldnice = task_nice(current);
1724 DEFINE_TORTURE_RANDOM(rand);
1725 ktime_t toggle_delay;
1726 unsigned long toggle_fuzz;
1727 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
1729 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
1730 while (!rcu_inkernel_boot_has_ended())
1731 schedule_timeout_interruptible(HZ / 10);
1732 for_each_online_cpu(cpu)
1734 WARN_ON(maxcpu < 0);
1735 if (toggle_interval > ULONG_MAX)
1736 toggle_fuzz = ULONG_MAX >> 3;
1738 toggle_fuzz = toggle_interval >> 3;
1739 if (toggle_fuzz <= 0)
1740 toggle_fuzz = NSEC_PER_USEC;
1742 r = torture_random(&rand);
1743 cpu = (r >> 4) % (maxcpu + 1);
1745 rcu_nocb_cpu_offload(cpu);
1746 atomic_long_inc(&n_nocb_offload);
1748 rcu_nocb_cpu_deoffload(cpu);
1749 atomic_long_inc(&n_nocb_deoffload);
1751 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
1752 set_current_state(TASK_INTERRUPTIBLE);
1753 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
1754 if (stutter_wait("rcu_nocb_toggle"))
1755 sched_set_normal(current, oldnice);
1756 } while (!torture_must_stop());
1757 torture_kthread_stopping("rcu_nocb_toggle");
1762 * Print torture statistics. Caller must ensure that there is only
1763 * one call to this function at a given time!!! This is normally
1764 * accomplished by relying on the module system to only have one copy
1765 * of the module loaded, and then by giving the rcu_torture_stats
1766 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1767 * thread is not running).
1770 rcu_torture_stats_print(void)
1774 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1775 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1776 struct rcu_torture *rtcp;
1777 static unsigned long rtcv_snap = ULONG_MAX;
1778 static bool splatted;
1779 struct task_struct *wtp;
1781 for_each_possible_cpu(cpu) {
1782 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1783 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
1784 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
1787 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1788 if (pipesummary[i] != 0)
1792 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1793 rtcp = rcu_access_pointer(rcu_torture_current);
1794 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
1796 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
1797 rcu_torture_current_version,
1798 list_empty(&rcu_torture_freelist),
1799 atomic_read(&n_rcu_torture_alloc),
1800 atomic_read(&n_rcu_torture_alloc_fail),
1801 atomic_read(&n_rcu_torture_free));
1802 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld rtbre: %ld ",
1803 atomic_read(&n_rcu_torture_mberror),
1804 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
1805 n_rcu_torture_barrier_error,
1806 n_rcu_torture_boost_ktrerror,
1807 n_rcu_torture_boost_rterror);
1808 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
1809 n_rcu_torture_boost_failure,
1810 n_rcu_torture_boosts,
1811 atomic_long_read(&n_rcu_torture_timers));
1812 torture_onoff_stats();
1813 pr_cont("barrier: %ld/%ld:%ld ",
1814 data_race(n_barrier_successes),
1815 data_race(n_barrier_attempts),
1816 data_race(n_rcu_torture_barrier_error));
1817 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
1818 pr_cont("nocb-toggles: %ld:%ld\n",
1819 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
1821 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1822 if (atomic_read(&n_rcu_torture_mberror) ||
1823 atomic_read(&n_rcu_torture_mbchk_fail) ||
1824 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
1825 n_rcu_torture_boost_rterror || n_rcu_torture_boost_failure ||
1827 pr_cont("%s", "!!! ");
1828 atomic_inc(&n_rcu_torture_error);
1829 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
1830 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
1831 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
1832 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
1833 WARN_ON_ONCE(n_rcu_torture_boost_rterror); // can't set RT prio
1834 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
1835 WARN_ON_ONCE(i > 1); // Too-short grace period
1837 pr_cont("Reader Pipe: ");
1838 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1839 pr_cont(" %ld", pipesummary[i]);
1842 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1843 pr_cont("Reader Batch: ");
1844 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1845 pr_cont(" %ld", batchsummary[i]);
1848 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
1849 pr_cont("Free-Block Circulation: ");
1850 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1851 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
1857 if (rtcv_snap == rcu_torture_current_version &&
1858 rcu_access_pointer(rcu_torture_current) &&
1859 !rcu_stall_is_suppressed()) {
1860 int __maybe_unused flags = 0;
1861 unsigned long __maybe_unused gp_seq = 0;
1863 rcutorture_get_gp_data(cur_ops->ttype,
1865 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp,
1867 wtp = READ_ONCE(writer_task);
1868 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
1869 rcu_torture_writer_state_getname(),
1870 rcu_torture_writer_state, gp_seq, flags,
1871 wtp == NULL ? ~0U : wtp->__state,
1872 wtp == NULL ? -1 : (int)task_cpu(wtp));
1873 if (!splatted && wtp) {
1874 sched_show_task(wtp);
1877 if (cur_ops->gp_kthread_dbg)
1878 cur_ops->gp_kthread_dbg();
1879 rcu_ftrace_dump(DUMP_ALL);
1881 rtcv_snap = rcu_torture_current_version;
1885 * Periodically prints torture statistics, if periodic statistics printing
1886 * was specified via the stat_interval module parameter.
1889 rcu_torture_stats(void *arg)
1891 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1893 schedule_timeout_interruptible(stat_interval * HZ);
1894 rcu_torture_stats_print();
1895 torture_shutdown_absorb("rcu_torture_stats");
1896 } while (!torture_must_stop());
1897 torture_kthread_stopping("rcu_torture_stats");
1901 /* Test mem_dump_obj() and friends. */
1902 static void rcu_torture_mem_dump_obj(void)
1904 struct rcu_head *rhp;
1905 struct kmem_cache *kcp;
1908 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
1909 rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
1910 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
1911 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
1912 mem_dump_obj(ZERO_SIZE_PTR);
1913 pr_alert("mem_dump_obj(NULL):");
1915 pr_alert("mem_dump_obj(%px):", &rhp);
1917 pr_alert("mem_dump_obj(%px):", rhp);
1919 pr_alert("mem_dump_obj(%px):", &rhp->func);
1920 mem_dump_obj(&rhp->func);
1921 pr_alert("mem_dump_obj(%px):", &z);
1923 kmem_cache_free(kcp, rhp);
1924 kmem_cache_destroy(kcp);
1925 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
1926 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
1927 pr_alert("mem_dump_obj(kmalloc %px):", rhp);
1929 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
1930 mem_dump_obj(&rhp->func);
1932 rhp = vmalloc(4096);
1933 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
1934 pr_alert("mem_dump_obj(vmalloc %px):", rhp);
1936 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
1937 mem_dump_obj(&rhp->func);
1942 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1944 pr_alert("%s" TORTURE_FLAG
1945 "--- %s: nreaders=%d nfakewriters=%d "
1946 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1947 "shuffle_interval=%d stutter=%d irqreader=%d "
1948 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1949 "test_boost=%d/%d test_boost_interval=%d "
1950 "test_boost_duration=%d shutdown_secs=%d "
1951 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
1952 "stall_cpu_block=%d "
1954 "onoff_interval=%d onoff_holdoff=%d "
1955 "read_exit_delay=%d read_exit_burst=%d "
1956 "nocbs_nthreads=%d nocbs_toggle=%d\n",
1957 torture_type, tag, nrealreaders, nfakewriters,
1958 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1959 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1960 test_boost, cur_ops->can_boost,
1961 test_boost_interval, test_boost_duration, shutdown_secs,
1962 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
1965 onoff_interval, onoff_holdoff,
1966 read_exit_delay, read_exit_burst,
1967 nocbs_nthreads, nocbs_toggle);
1970 static int rcutorture_booster_cleanup(unsigned int cpu)
1972 struct task_struct *t;
1974 if (boost_tasks[cpu] == NULL)
1976 mutex_lock(&boost_mutex);
1977 t = boost_tasks[cpu];
1978 boost_tasks[cpu] = NULL;
1979 rcu_torture_enable_rt_throttle();
1980 mutex_unlock(&boost_mutex);
1982 /* This must be outside of the mutex, otherwise deadlock! */
1983 torture_stop_kthread(rcu_torture_boost, t);
1987 static int rcutorture_booster_init(unsigned int cpu)
1991 if (boost_tasks[cpu] != NULL)
1992 return 0; /* Already created, nothing more to do. */
1994 // Testing RCU priority boosting requires rcutorture do
1995 // some serious abuse. Counter this by running ksoftirqd
1996 // at higher priority.
1997 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
1998 struct sched_param sp;
1999 struct task_struct *t;
2001 t = per_cpu(ksoftirqd, cpu);
2003 sp.sched_priority = 2;
2004 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
2007 /* Don't allow time recalculation while creating a new task. */
2008 mutex_lock(&boost_mutex);
2009 rcu_torture_disable_rt_throttle();
2010 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
2011 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
2013 "rcu_torture_boost");
2014 if (IS_ERR(boost_tasks[cpu])) {
2015 retval = PTR_ERR(boost_tasks[cpu]);
2016 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
2017 n_rcu_torture_boost_ktrerror++;
2018 boost_tasks[cpu] = NULL;
2019 mutex_unlock(&boost_mutex);
2022 kthread_bind(boost_tasks[cpu], cpu);
2023 wake_up_process(boost_tasks[cpu]);
2024 mutex_unlock(&boost_mutex);
2029 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
2030 * induces a CPU stall for the time specified by stall_cpu.
2032 static int rcu_torture_stall(void *args)
2035 unsigned long stop_at;
2037 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
2038 if (stall_cpu_holdoff > 0) {
2039 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
2040 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
2041 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
2043 if (!kthread_should_stop() && stall_gp_kthread > 0) {
2044 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2045 rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2046 for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2047 if (kthread_should_stop())
2049 schedule_timeout_uninterruptible(HZ);
2052 if (!kthread_should_stop() && stall_cpu > 0) {
2053 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
2054 stop_at = ktime_get_seconds() + stall_cpu;
2055 /* RCU CPU stall is expected behavior in following code. */
2056 idx = cur_ops->readlock();
2057 if (stall_cpu_irqsoff)
2058 local_irq_disable();
2059 else if (!stall_cpu_block)
2061 pr_alert("%s start on CPU %d.\n",
2062 __func__, raw_smp_processor_id());
2063 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(),
2065 if (stall_cpu_block) {
2066 #ifdef CONFIG_PREEMPTION
2069 schedule_timeout_uninterruptible(HZ);
2071 } else if (stall_no_softlockup) {
2072 touch_softlockup_watchdog();
2074 if (stall_cpu_irqsoff)
2076 else if (!stall_cpu_block)
2078 cur_ops->readunlock(idx);
2080 pr_alert("%s end.\n", __func__);
2081 torture_shutdown_absorb("rcu_torture_stall");
2082 while (!kthread_should_stop())
2083 schedule_timeout_interruptible(10 * HZ);
2087 /* Spawn CPU-stall kthread, if stall_cpu specified. */
2088 static int __init rcu_torture_stall_init(void)
2090 if (stall_cpu <= 0 && stall_gp_kthread <= 0)
2092 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2095 /* State structure for forward-progress self-propagating RCU callback. */
2096 struct fwd_cb_state {
2102 * Forward-progress self-propagating RCU callback function. Because
2103 * callbacks run from softirq, this function is an implicit RCU read-side
2106 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2108 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2110 if (READ_ONCE(fcsp->stop)) {
2111 WRITE_ONCE(fcsp->stop, 2);
2114 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2117 /* State for continuous-flood RCU callbacks. */
2120 struct rcu_fwd_cb *rfc_next;
2121 struct rcu_fwd *rfc_rfp;
2125 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
2126 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
2127 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
2128 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
2129 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2131 struct rcu_launder_hist {
2133 unsigned long launder_gp_seq;
2137 spinlock_t rcu_fwd_lock;
2138 struct rcu_fwd_cb *rcu_fwd_cb_head;
2139 struct rcu_fwd_cb **rcu_fwd_cb_tail;
2141 unsigned long rcu_fwd_startat;
2142 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2143 unsigned long rcu_launder_gp_seq_start;
2146 static DEFINE_MUTEX(rcu_fwd_mutex);
2147 static struct rcu_fwd *rcu_fwds;
2148 static bool rcu_fwd_emergency_stop;
2150 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2153 unsigned long gps_old;
2157 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2158 if (rfp->n_launders_hist[i].n_launders > 0)
2160 pr_alert("%s: Callback-invocation histogram (duration %lu jiffies):",
2161 __func__, jiffies - rfp->rcu_fwd_startat);
2162 gps_old = rfp->rcu_launder_gp_seq_start;
2163 for (j = 0; j <= i; j++) {
2164 gps = rfp->n_launders_hist[j].launder_gp_seq;
2165 pr_cont(" %ds/%d: %ld:%ld",
2166 j + 1, FWD_CBS_HIST_DIV,
2167 rfp->n_launders_hist[j].n_launders,
2168 rcutorture_seq_diff(gps, gps_old));
2174 /* Callback function for continuous-flood RCU callbacks. */
2175 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2177 unsigned long flags;
2179 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2180 struct rcu_fwd_cb **rfcpp;
2181 struct rcu_fwd *rfp = rfcp->rfc_rfp;
2183 rfcp->rfc_next = NULL;
2185 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2186 rfcpp = rfp->rcu_fwd_cb_tail;
2187 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2188 WRITE_ONCE(*rfcpp, rfcp);
2189 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2190 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2191 if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2192 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2193 rfp->n_launders_hist[i].n_launders++;
2194 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2195 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2198 // Give the scheduler a chance, even on nohz_full CPUs.
2199 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2201 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2202 // Real call_rcu() floods hit userspace, so emulate that.
2203 if (need_resched() || (iter & 0xfff))
2207 // No userspace emulation: CB invocation throttles call_rcu()
2212 * Free all callbacks on the rcu_fwd_cb_head list, either because the
2213 * test is over or because we hit an OOM event.
2215 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2217 unsigned long flags;
2218 unsigned long freed = 0;
2219 struct rcu_fwd_cb *rfcp;
2222 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2223 rfcp = rfp->rcu_fwd_cb_head;
2225 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2228 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2229 if (!rfp->rcu_fwd_cb_head)
2230 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2231 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2234 rcu_torture_fwd_prog_cond_resched(freed);
2235 if (tick_nohz_full_enabled()) {
2236 local_irq_save(flags);
2237 rcu_momentary_dyntick_idle();
2238 local_irq_restore(flags);
2244 /* Carry out need_resched()/cond_resched() forward-progress testing. */
2245 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2246 int *tested, int *tested_tries)
2250 struct fwd_cb_state fcs;
2255 bool selfpropcb = false;
2256 unsigned long stopat;
2257 static DEFINE_TORTURE_RANDOM(trs);
2260 return; // Cannot do need_resched() forward progress testing without ->sync.
2261 if (cur_ops->call && cur_ops->cb_barrier) {
2262 init_rcu_head_on_stack(&fcs.rh);
2266 /* Tight loop containing cond_resched(). */
2267 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2268 cur_ops->sync(); /* Later readers see above write. */
2270 WRITE_ONCE(fcs.stop, 0);
2271 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2273 cver = READ_ONCE(rcu_torture_current_version);
2274 gps = cur_ops->get_gp_seq();
2275 sd = cur_ops->stall_dur() + 1;
2276 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2277 dur = sd4 + torture_random(&trs) % (sd - sd4);
2278 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2279 stopat = rfp->rcu_fwd_startat + dur;
2280 while (time_before(jiffies, stopat) &&
2281 !shutdown_time_arrived() &&
2282 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2283 idx = cur_ops->readlock();
2285 cur_ops->readunlock(idx);
2286 if (!fwd_progress_need_resched || need_resched())
2290 if (!time_before(jiffies, stopat) &&
2291 !shutdown_time_arrived() &&
2292 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2294 cver = READ_ONCE(rcu_torture_current_version) - cver;
2295 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2296 WARN_ON(!cver && gps < 2);
2297 pr_alert("%s: Duration %ld cver %ld gps %ld\n", __func__, dur, cver, gps);
2300 WRITE_ONCE(fcs.stop, 1);
2301 cur_ops->sync(); /* Wait for running CB to complete. */
2302 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2306 WARN_ON(READ_ONCE(fcs.stop) != 2);
2307 destroy_rcu_head_on_stack(&fcs.rh);
2309 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2310 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2313 /* Carry out call_rcu() forward-progress testing. */
2314 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2317 unsigned long flags;
2321 long n_launders_cb_snap;
2325 struct rcu_fwd_cb *rfcp;
2326 struct rcu_fwd_cb *rfcpn;
2327 unsigned long stopat;
2328 unsigned long stoppedat;
2330 if (READ_ONCE(rcu_fwd_emergency_stop))
2331 return; /* Get out of the way quickly, no GP wait! */
2333 return; /* Can't do call_rcu() fwd prog without ->call. */
2335 /* Loop continuously posting RCU callbacks. */
2336 WRITE_ONCE(rcu_fwd_cb_nodelay, true);
2337 cur_ops->sync(); /* Later readers see above write. */
2338 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2339 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2341 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2345 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2346 rfp->n_launders_hist[i].n_launders = 0;
2347 cver = READ_ONCE(rcu_torture_current_version);
2348 gps = cur_ops->get_gp_seq();
2349 rfp->rcu_launder_gp_seq_start = gps;
2350 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2351 while (time_before(jiffies, stopat) &&
2352 !shutdown_time_arrived() &&
2353 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2354 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2357 rfcpn = READ_ONCE(rfcp->rfc_next);
2359 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2360 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2362 rfp->rcu_fwd_cb_head = rfcpn;
2366 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2367 if (WARN_ON_ONCE(!rfcp)) {
2368 schedule_timeout_interruptible(1);
2374 rfcp->rfc_rfp = rfp;
2376 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2377 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2378 if (tick_nohz_full_enabled()) {
2379 local_irq_save(flags);
2380 rcu_momentary_dyntick_idle();
2381 local_irq_restore(flags);
2384 stoppedat = jiffies;
2385 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2386 cver = READ_ONCE(rcu_torture_current_version) - cver;
2387 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2388 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2389 (void)rcu_torture_fwd_prog_cbfree(rfp);
2391 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2392 !shutdown_time_arrived()) {
2393 WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED);
2394 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld\n",
2396 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2397 n_launders + n_max_cbs - n_launders_cb_snap,
2398 n_launders, n_launders_sa,
2399 n_max_gps, n_max_cbs, cver, gps);
2400 rcu_torture_fwd_cb_hist(rfp);
2402 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2403 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2404 WRITE_ONCE(rcu_fwd_cb_nodelay, false);
2409 * OOM notifier, but this only prints diagnostic information for the
2410 * current forward-progress test.
2412 static int rcutorture_oom_notify(struct notifier_block *self,
2413 unsigned long notused, void *nfreed)
2415 struct rcu_fwd *rfp;
2417 mutex_lock(&rcu_fwd_mutex);
2420 mutex_unlock(&rcu_fwd_mutex);
2423 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2425 rcu_torture_fwd_cb_hist(rfp);
2426 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp->rcu_fwd_startat)) / 2);
2427 WRITE_ONCE(rcu_fwd_emergency_stop, true);
2428 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2429 pr_info("%s: Freed %lu RCU callbacks.\n",
2430 __func__, rcu_torture_fwd_prog_cbfree(rfp));
2432 pr_info("%s: Freed %lu RCU callbacks.\n",
2433 __func__, rcu_torture_fwd_prog_cbfree(rfp));
2435 pr_info("%s: Freed %lu RCU callbacks.\n",
2436 __func__, rcu_torture_fwd_prog_cbfree(rfp));
2437 smp_mb(); /* Frees before return to avoid redoing OOM. */
2438 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2439 pr_info("%s returning after OOM processing.\n", __func__);
2440 mutex_unlock(&rcu_fwd_mutex);
2444 static struct notifier_block rcutorture_oom_nb = {
2445 .notifier_call = rcutorture_oom_notify
2448 /* Carry out grace-period forward-progress testing. */
2449 static int rcu_torture_fwd_prog(void *args)
2451 int oldnice = task_nice(current);
2452 struct rcu_fwd *rfp = args;
2454 int tested_tries = 0;
2456 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2457 rcu_bind_current_to_nocb();
2458 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2459 set_user_nice(current, MAX_NICE);
2461 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2462 WRITE_ONCE(rcu_fwd_emergency_stop, false);
2463 if (!IS_ENABLED(CONFIG_TINY_RCU) ||
2464 rcu_inkernel_boot_has_ended())
2465 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2466 if (rcu_inkernel_boot_has_ended())
2467 rcu_torture_fwd_prog_cr(rfp);
2469 /* Avoid slow periods, better to test when busy. */
2470 if (stutter_wait("rcu_torture_fwd_prog"))
2471 sched_set_normal(current, oldnice);
2472 } while (!torture_must_stop());
2473 /* Short runs might not contain a valid forward-progress attempt. */
2474 WARN_ON(!tested && tested_tries >= 5);
2475 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2476 torture_kthread_stopping("rcu_torture_fwd_prog");
2480 /* If forward-progress checking is requested and feasible, spawn the thread. */
2481 static int __init rcu_torture_fwd_prog_init(void)
2483 struct rcu_fwd *rfp;
2486 return 0; /* Not requested, so don't do it. */
2487 if ((!cur_ops->sync && !cur_ops->call) ||
2488 !cur_ops->stall_dur || cur_ops->stall_dur() <= 0 || cur_ops == &rcu_busted_ops) {
2489 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2492 if (stall_cpu > 0) {
2493 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2494 if (IS_MODULE(CONFIG_RCU_TORTURE_TESTS))
2495 return -EINVAL; /* In module, can fail back to user. */
2496 WARN_ON(1); /* Make sure rcutorture notices conflict. */
2499 if (fwd_progress_holdoff <= 0)
2500 fwd_progress_holdoff = 1;
2501 if (fwd_progress_div <= 0)
2502 fwd_progress_div = 4;
2503 rfp = kzalloc(sizeof(*rfp), GFP_KERNEL);
2506 spin_lock_init(&rfp->rcu_fwd_lock);
2507 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2508 mutex_lock(&rcu_fwd_mutex);
2510 mutex_unlock(&rcu_fwd_mutex);
2511 register_oom_notifier(&rcutorture_oom_nb);
2512 return torture_create_kthread(rcu_torture_fwd_prog, rfp, fwd_prog_task);
2515 static void rcu_torture_fwd_prog_cleanup(void)
2517 struct rcu_fwd *rfp;
2519 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_task);
2521 mutex_lock(&rcu_fwd_mutex);
2523 mutex_unlock(&rcu_fwd_mutex);
2524 unregister_oom_notifier(&rcutorture_oom_nb);
2528 /* Callback function for RCU barrier testing. */
2529 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
2531 atomic_inc(&barrier_cbs_invoked);
2534 /* IPI handler to get callback posted on desired CPU, if online. */
2535 static void rcu_torture_barrier1cb(void *rcu_void)
2537 struct rcu_head *rhp = rcu_void;
2539 cur_ops->call(rhp, rcu_torture_barrier_cbf);
2542 /* kthread function to register callbacks used to test RCU barriers. */
2543 static int rcu_torture_barrier_cbs(void *arg)
2545 long myid = (long)arg;
2546 bool lastphase = false;
2548 struct rcu_head rcu;
2550 init_rcu_head_on_stack(&rcu);
2551 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
2552 set_user_nice(current, MAX_NICE);
2554 wait_event(barrier_cbs_wq[myid],
2556 smp_load_acquire(&barrier_phase)) != lastphase ||
2557 torture_must_stop());
2558 lastphase = newphase;
2559 if (torture_must_stop())
2562 * The above smp_load_acquire() ensures barrier_phase load
2563 * is ordered before the following ->call().
2565 if (smp_call_function_single(myid, rcu_torture_barrier1cb,
2567 // IPI failed, so use direct call from current CPU.
2568 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
2570 if (atomic_dec_and_test(&barrier_cbs_count))
2571 wake_up(&barrier_wq);
2572 } while (!torture_must_stop());
2573 if (cur_ops->cb_barrier != NULL)
2574 cur_ops->cb_barrier();
2575 destroy_rcu_head_on_stack(&rcu);
2576 torture_kthread_stopping("rcu_torture_barrier_cbs");
2580 /* kthread function to drive and coordinate RCU barrier testing. */
2581 static int rcu_torture_barrier(void *arg)
2585 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
2587 atomic_set(&barrier_cbs_invoked, 0);
2588 atomic_set(&barrier_cbs_count, n_barrier_cbs);
2589 /* Ensure barrier_phase ordered after prior assignments. */
2590 smp_store_release(&barrier_phase, !barrier_phase);
2591 for (i = 0; i < n_barrier_cbs; i++)
2592 wake_up(&barrier_cbs_wq[i]);
2593 wait_event(barrier_wq,
2594 atomic_read(&barrier_cbs_count) == 0 ||
2595 torture_must_stop());
2596 if (torture_must_stop())
2598 n_barrier_attempts++;
2599 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
2600 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
2601 n_rcu_torture_barrier_error++;
2602 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
2603 atomic_read(&barrier_cbs_invoked),
2606 // Wait manually for the remaining callbacks
2609 if (WARN_ON(i++ > HZ))
2611 schedule_timeout_interruptible(1);
2612 cur_ops->cb_barrier();
2613 } while (atomic_read(&barrier_cbs_invoked) !=
2615 !torture_must_stop());
2616 smp_mb(); // Can't trust ordering if broken.
2617 if (!torture_must_stop())
2618 pr_err("Recovered: barrier_cbs_invoked = %d\n",
2619 atomic_read(&barrier_cbs_invoked));
2621 n_barrier_successes++;
2623 schedule_timeout_interruptible(HZ / 10);
2624 } while (!torture_must_stop());
2625 torture_kthread_stopping("rcu_torture_barrier");
2629 /* Initialize RCU barrier testing. */
2630 static int rcu_torture_barrier_init(void)
2635 if (n_barrier_cbs <= 0)
2637 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
2638 pr_alert("%s" TORTURE_FLAG
2639 " Call or barrier ops missing for %s,\n",
2640 torture_type, cur_ops->name);
2641 pr_alert("%s" TORTURE_FLAG
2642 " RCU barrier testing omitted from run.\n",
2646 atomic_set(&barrier_cbs_count, 0);
2647 atomic_set(&barrier_cbs_invoked, 0);
2649 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
2652 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
2653 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
2655 for (i = 0; i < n_barrier_cbs; i++) {
2656 init_waitqueue_head(&barrier_cbs_wq[i]);
2657 ret = torture_create_kthread(rcu_torture_barrier_cbs,
2659 barrier_cbs_tasks[i]);
2663 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
2666 /* Clean up after RCU barrier testing. */
2667 static void rcu_torture_barrier_cleanup(void)
2671 torture_stop_kthread(rcu_torture_barrier, barrier_task);
2672 if (barrier_cbs_tasks != NULL) {
2673 for (i = 0; i < n_barrier_cbs; i++)
2674 torture_stop_kthread(rcu_torture_barrier_cbs,
2675 barrier_cbs_tasks[i]);
2676 kfree(barrier_cbs_tasks);
2677 barrier_cbs_tasks = NULL;
2679 if (barrier_cbs_wq != NULL) {
2680 kfree(barrier_cbs_wq);
2681 barrier_cbs_wq = NULL;
2685 static bool rcu_torture_can_boost(void)
2687 static int boost_warn_once;
2690 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
2692 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
2695 prio = rcu_get_gp_kthreads_prio();
2700 if (boost_warn_once == 1)
2703 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
2704 boost_warn_once = 1;
2711 static bool read_exit_child_stop;
2712 static bool read_exit_child_stopped;
2713 static wait_queue_head_t read_exit_wq;
2715 // Child kthread which just does an rcutorture reader and exits.
2716 static int rcu_torture_read_exit_child(void *trsp_in)
2718 struct torture_random_state *trsp = trsp_in;
2720 set_user_nice(current, MAX_NICE);
2721 // Minimize time between reading and exiting.
2722 while (!kthread_should_stop())
2723 schedule_timeout_uninterruptible(1);
2724 (void)rcu_torture_one_read(trsp, -1);
2728 // Parent kthread which creates and destroys read-exit child kthreads.
2729 static int rcu_torture_read_exit(void *unused)
2732 bool errexit = false;
2734 struct task_struct *tsp;
2735 DEFINE_TORTURE_RANDOM(trs);
2737 // Allocate and initialize.
2738 set_user_nice(current, MAX_NICE);
2739 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
2741 // Each pass through this loop does one read-exit episode.
2743 if (++count > read_exit_burst) {
2744 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
2745 rcu_barrier(); // Wait for task_struct free, avoid OOM.
2746 for (i = 0; i < read_exit_delay; i++) {
2747 schedule_timeout_uninterruptible(HZ);
2748 if (READ_ONCE(read_exit_child_stop))
2751 if (!READ_ONCE(read_exit_child_stop))
2752 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
2755 if (READ_ONCE(read_exit_child_stop))
2758 tsp = kthread_run(rcu_torture_read_exit_child,
2760 "rcu_torture_read_exit_child");
2762 VERBOSE_TOROUT_ERRSTRING("out of memory");
2770 stutter_wait("rcu_torture_read_exit");
2771 } while (!errexit && !READ_ONCE(read_exit_child_stop));
2773 // Clean up and exit.
2774 smp_store_release(&read_exit_child_stopped, true); // After reaping.
2775 smp_mb(); // Store before wakeup.
2776 wake_up(&read_exit_wq);
2777 while (!torture_must_stop())
2778 schedule_timeout_uninterruptible(1);
2779 torture_kthread_stopping("rcu_torture_read_exit");
2783 static int rcu_torture_read_exit_init(void)
2785 if (read_exit_burst <= 0)
2787 init_waitqueue_head(&read_exit_wq);
2788 read_exit_child_stop = false;
2789 read_exit_child_stopped = false;
2790 return torture_create_kthread(rcu_torture_read_exit, NULL,
2794 static void rcu_torture_read_exit_cleanup(void)
2796 if (!read_exit_task)
2798 WRITE_ONCE(read_exit_child_stop, true);
2799 smp_mb(); // Above write before wait.
2800 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
2801 torture_stop_kthread(rcutorture_read_exit, read_exit_task);
2804 static enum cpuhp_state rcutor_hp;
2807 rcu_torture_cleanup(void)
2811 unsigned long gp_seq = 0;
2814 if (torture_cleanup_begin()) {
2815 if (cur_ops->cb_barrier != NULL)
2816 cur_ops->cb_barrier();
2820 torture_cleanup_end();
2824 if (cur_ops->gp_kthread_dbg)
2825 cur_ops->gp_kthread_dbg();
2826 rcu_torture_read_exit_cleanup();
2827 rcu_torture_barrier_cleanup();
2828 rcu_torture_fwd_prog_cleanup();
2829 torture_stop_kthread(rcu_torture_stall, stall_task);
2830 torture_stop_kthread(rcu_torture_writer, writer_task);
2833 for (i = 0; i < nrealnocbers; i++)
2834 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
2840 for (i = 0; i < nrealreaders; i++)
2841 torture_stop_kthread(rcu_torture_reader,
2843 kfree(reader_tasks);
2844 reader_tasks = NULL;
2846 kfree(rcu_torture_reader_mbchk);
2847 rcu_torture_reader_mbchk = NULL;
2849 if (fakewriter_tasks) {
2850 for (i = 0; i < nfakewriters; i++)
2851 torture_stop_kthread(rcu_torture_fakewriter,
2852 fakewriter_tasks[i]);
2853 kfree(fakewriter_tasks);
2854 fakewriter_tasks = NULL;
2857 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
2858 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
2859 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
2860 cur_ops->name, (long)gp_seq, flags,
2861 rcutorture_seq_diff(gp_seq, start_gp_seq));
2862 torture_stop_kthread(rcu_torture_stats, stats_task);
2863 torture_stop_kthread(rcu_torture_fqs, fqs_task);
2864 if (rcu_torture_can_boost() && rcutor_hp >= 0)
2865 cpuhp_remove_state(rcutor_hp);
2868 * Wait for all RCU callbacks to fire, then do torture-type-specific
2869 * cleanup operations.
2871 if (cur_ops->cb_barrier != NULL)
2872 cur_ops->cb_barrier();
2873 if (cur_ops->cleanup != NULL)
2876 rcu_torture_mem_dump_obj();
2878 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
2880 if (err_segs_recorded) {
2881 pr_alert("Failure/close-call rcutorture reader segments:\n");
2882 if (rt_read_nsegs == 0)
2883 pr_alert("\t: No segments recorded!!!\n");
2885 for (i = 0; i < rt_read_nsegs; i++) {
2886 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
2887 if (err_segs[i].rt_delay_jiffies != 0) {
2888 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
2889 err_segs[i].rt_delay_jiffies);
2892 if (err_segs[i].rt_delay_ms != 0) {
2893 pr_cont("%s%ldms", firsttime ? "" : "+",
2894 err_segs[i].rt_delay_ms);
2897 if (err_segs[i].rt_delay_us != 0) {
2898 pr_cont("%s%ldus", firsttime ? "" : "+",
2899 err_segs[i].rt_delay_us);
2903 err_segs[i].rt_preempted ? "preempted" : "");
2907 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
2908 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
2909 else if (torture_onoff_failures())
2910 rcu_torture_print_module_parms(cur_ops,
2911 "End of test: RCU_HOTPLUG");
2913 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
2914 torture_cleanup_end();
2917 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2918 static void rcu_torture_leak_cb(struct rcu_head *rhp)
2922 static void rcu_torture_err_cb(struct rcu_head *rhp)
2925 * This -might- happen due to race conditions, but is unlikely.
2926 * The scenario that leads to this happening is that the
2927 * first of the pair of duplicate callbacks is queued,
2928 * someone else starts a grace period that includes that
2929 * callback, then the second of the pair must wait for the
2930 * next grace period. Unlikely, but can happen. If it
2931 * does happen, the debug-objects subsystem won't have splatted.
2933 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
2935 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2938 * Verify that double-free causes debug-objects to complain, but only
2939 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
2940 * cannot be carried out.
2942 static void rcu_test_debug_objects(void)
2944 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
2945 struct rcu_head rh1;
2946 struct rcu_head rh2;
2947 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2949 init_rcu_head_on_stack(&rh1);
2950 init_rcu_head_on_stack(&rh2);
2951 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
2953 /* Try to queue the rh2 pair of callbacks for the same grace period. */
2954 preempt_disable(); /* Prevent preemption from interrupting test. */
2955 rcu_read_lock(); /* Make it impossible to finish a grace period. */
2956 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
2957 local_irq_disable(); /* Make it harder to start a new grace period. */
2958 call_rcu(&rh2, rcu_torture_leak_cb);
2959 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
2961 call_rcu(rhp, rcu_torture_leak_cb);
2962 call_rcu(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
2968 /* Wait for them all to get done so we can safely return. */
2970 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
2971 destroy_rcu_head_on_stack(&rh1);
2972 destroy_rcu_head_on_stack(&rh2);
2973 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2974 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
2975 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
2978 static void rcutorture_sync(void)
2980 static unsigned long n;
2982 if (cur_ops->sync && !(++n & 0xfff))
2987 rcu_torture_init(void)
2993 unsigned long gp_seq = 0;
2994 static struct rcu_torture_ops *torture_ops[] = {
2995 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
2996 &busted_srcud_ops, &tasks_ops, &tasks_rude_ops,
2997 &tasks_tracing_ops, &trivial_ops,
3000 if (!torture_init_begin(torture_type, verbose))
3003 /* Process args and tell the world that the torturer is on the job. */
3004 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
3005 cur_ops = torture_ops[i];
3006 if (strcmp(torture_type, cur_ops->name) == 0)
3009 if (i == ARRAY_SIZE(torture_ops)) {
3010 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
3012 pr_alert("rcu-torture types:");
3013 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
3014 pr_cont(" %s", torture_ops[i]->name);
3020 if (cur_ops->fqs == NULL && fqs_duration != 0) {
3021 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
3027 if (nreaders >= 0) {
3028 nrealreaders = nreaders;
3030 nrealreaders = num_online_cpus() - 2 - nreaders;
3031 if (nrealreaders <= 0)
3034 rcu_torture_print_module_parms(cur_ops, "Start of test");
3035 rcutorture_get_gp_data(cur_ops->ttype, &flags, &gp_seq);
3036 srcutorture_get_gp_data(cur_ops->ttype, srcu_ctlp, &flags, &gp_seq);
3037 start_gp_seq = gp_seq;
3038 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
3039 cur_ops->name, (long)gp_seq, flags);
3041 /* Set up the freelist. */
3043 INIT_LIST_HEAD(&rcu_torture_freelist);
3044 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
3045 rcu_tortures[i].rtort_mbtest = 0;
3046 list_add_tail(&rcu_tortures[i].rtort_free,
3047 &rcu_torture_freelist);
3050 /* Initialize the statistics so that each run gets its own numbers. */
3052 rcu_torture_current = NULL;
3053 rcu_torture_current_version = 0;
3054 atomic_set(&n_rcu_torture_alloc, 0);
3055 atomic_set(&n_rcu_torture_alloc_fail, 0);
3056 atomic_set(&n_rcu_torture_free, 0);
3057 atomic_set(&n_rcu_torture_mberror, 0);
3058 atomic_set(&n_rcu_torture_mbchk_fail, 0);
3059 atomic_set(&n_rcu_torture_mbchk_tries, 0);
3060 atomic_set(&n_rcu_torture_error, 0);
3061 n_rcu_torture_barrier_error = 0;
3062 n_rcu_torture_boost_ktrerror = 0;
3063 n_rcu_torture_boost_rterror = 0;
3064 n_rcu_torture_boost_failure = 0;
3065 n_rcu_torture_boosts = 0;
3066 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
3067 atomic_set(&rcu_torture_wcount[i], 0);
3068 for_each_possible_cpu(cpu) {
3069 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
3070 per_cpu(rcu_torture_count, cpu)[i] = 0;
3071 per_cpu(rcu_torture_batch, cpu)[i] = 0;
3074 err_segs_recorded = 0;
3077 /* Start up the kthreads. */
3079 rcu_torture_write_types();
3080 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
3082 if (torture_init_error(firsterr))
3084 if (nfakewriters > 0) {
3085 fakewriter_tasks = kcalloc(nfakewriters,
3086 sizeof(fakewriter_tasks[0]),
3088 if (fakewriter_tasks == NULL) {
3089 VERBOSE_TOROUT_ERRSTRING("out of memory");
3094 for (i = 0; i < nfakewriters; i++) {
3095 firsterr = torture_create_kthread(rcu_torture_fakewriter,
3096 NULL, fakewriter_tasks[i]);
3097 if (torture_init_error(firsterr))
3100 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3102 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3104 if (!reader_tasks || !rcu_torture_reader_mbchk) {
3105 VERBOSE_TOROUT_ERRSTRING("out of memory");
3109 for (i = 0; i < nrealreaders; i++) {
3110 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3111 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3113 if (torture_init_error(firsterr))
3116 nrealnocbers = nocbs_nthreads;
3117 if (WARN_ON(nrealnocbers < 0))
3119 if (WARN_ON(nocbs_toggle < 0))
3121 if (nrealnocbers > 0) {
3122 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3123 if (nocb_tasks == NULL) {
3124 VERBOSE_TOROUT_ERRSTRING("out of memory");
3131 for (i = 0; i < nrealnocbers; i++) {
3132 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3133 if (torture_init_error(firsterr))
3136 if (stat_interval > 0) {
3137 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3139 if (torture_init_error(firsterr))
3142 if (test_no_idle_hz && shuffle_interval > 0) {
3143 firsterr = torture_shuffle_init(shuffle_interval * HZ);
3144 if (torture_init_error(firsterr))
3152 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3153 firsterr = torture_stutter_init(stutter * HZ, t);
3154 if (torture_init_error(firsterr))
3157 if (fqs_duration < 0)
3160 /* Create the fqs thread */
3161 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3163 if (torture_init_error(firsterr))
3166 if (test_boost_interval < 1)
3167 test_boost_interval = 1;
3168 if (test_boost_duration < 2)
3169 test_boost_duration = 2;
3170 if (rcu_torture_can_boost()) {
3172 boost_starttime = jiffies + test_boost_interval * HZ;
3174 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3175 rcutorture_booster_init,
3176 rcutorture_booster_cleanup);
3177 rcutor_hp = firsterr;
3178 if (torture_init_error(firsterr))
3181 shutdown_jiffies = jiffies + shutdown_secs * HZ;
3182 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
3183 if (torture_init_error(firsterr))
3185 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3187 if (torture_init_error(firsterr))
3189 firsterr = rcu_torture_stall_init();
3190 if (torture_init_error(firsterr))
3192 firsterr = rcu_torture_fwd_prog_init();
3193 if (torture_init_error(firsterr))
3195 firsterr = rcu_torture_barrier_init();
3196 if (torture_init_error(firsterr))
3198 firsterr = rcu_torture_read_exit_init();
3199 if (torture_init_error(firsterr))
3202 rcu_test_debug_objects();
3208 rcu_torture_cleanup();
3209 if (shutdown_secs) {
3210 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3216 module_init(rcu_torture_init);
3217 module_exit(rcu_torture_cleanup);