2 * Read-Copy Update module-based torture test facility
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2005, 2006
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org>
23 * See also: Documentation/RCU/torture.txt
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <linux/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/reboot.h>
43 #include <linux/freezer.h>
44 #include <linux/cpu.h>
45 #include <linux/delay.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <linux/trace_clock.h>
50 #include <asm/byteorder.h>
51 #include <linux/torture.h>
53 MODULE_LICENSE("GPL");
54 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
56 MODULE_ALIAS("rcutorture");
57 #ifdef MODULE_PARAM_PREFIX
58 #undef MODULE_PARAM_PREFIX
60 #define MODULE_PARAM_PREFIX "rcutorture."
62 torture_param(int, fqs_duration, 0,
63 "Duration of fqs bursts (us), 0 to disable");
64 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
65 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
66 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
67 torture_param(bool, gp_normal, false,
68 "Use normal (non-expedited) GP wait primitives");
69 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
70 torture_param(int, n_barrier_cbs, 0,
71 "# of callbacks/kthreads for barrier testing");
72 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
73 torture_param(int, nreaders, -1, "Number of RCU reader threads");
74 torture_param(int, object_debug, 0,
75 "Enable debug-object double call_rcu() testing");
76 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
77 torture_param(int, onoff_interval, 0,
78 "Time between CPU hotplugs (s), 0=disable");
79 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
80 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
81 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
82 torture_param(int, stall_cpu_holdoff, 10,
83 "Time to wait before starting stall (s).");
84 torture_param(int, stat_interval, 60,
85 "Number of seconds between stats printk()s");
86 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
87 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
88 torture_param(int, test_boost_duration, 4,
89 "Duration of each boost test, seconds.");
90 torture_param(int, test_boost_interval, 7,
91 "Interval between boost tests, seconds.");
92 torture_param(bool, test_no_idle_hz, true,
93 "Test support for tickless idle CPUs");
95 char *torture_type = "rcu";
96 EXPORT_SYMBOL_GPL(torture_type);
97 module_param(torture_type, charp, 0444);
98 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
100 EXPORT_SYMBOL_GPL(verbose);
101 module_param(verbose, bool, 0444);
102 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
104 static int nrealreaders;
105 static struct task_struct *writer_task;
106 static struct task_struct **fakewriter_tasks;
107 static struct task_struct **reader_tasks;
108 static struct task_struct *stats_task;
109 static struct task_struct *stutter_task;
110 static struct task_struct *fqs_task;
111 static struct task_struct *boost_tasks[NR_CPUS];
112 static struct task_struct *shutdown_task;
113 #ifdef CONFIG_HOTPLUG_CPU
114 static struct task_struct *onoff_task;
115 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
116 static struct task_struct *stall_task;
117 static struct task_struct **barrier_cbs_tasks;
118 static struct task_struct *barrier_task;
120 #define RCU_TORTURE_PIPE_LEN 10
123 struct rcu_head rtort_rcu;
124 int rtort_pipe_count;
125 struct list_head rtort_free;
129 static LIST_HEAD(rcu_torture_freelist);
130 static struct rcu_torture __rcu *rcu_torture_current;
131 static unsigned long rcu_torture_current_version;
132 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
133 static DEFINE_SPINLOCK(rcu_torture_lock);
134 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
135 rcu_torture_count) = { 0 };
136 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
137 rcu_torture_batch) = { 0 };
138 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
139 static atomic_t n_rcu_torture_alloc;
140 static atomic_t n_rcu_torture_alloc_fail;
141 static atomic_t n_rcu_torture_free;
142 static atomic_t n_rcu_torture_mberror;
143 static atomic_t n_rcu_torture_error;
144 static long n_rcu_torture_barrier_error;
145 static long n_rcu_torture_boost_ktrerror;
146 static long n_rcu_torture_boost_rterror;
147 static long n_rcu_torture_boost_failure;
148 static long n_rcu_torture_boosts;
149 static long n_rcu_torture_timers;
150 static long n_offline_attempts;
151 static long n_offline_successes;
152 static unsigned long sum_offline;
153 static int min_offline = -1;
154 static int max_offline;
155 static long n_online_attempts;
156 static long n_online_successes;
157 static unsigned long sum_online;
158 static int min_online = -1;
159 static int max_online;
160 static long n_barrier_attempts;
161 static long n_barrier_successes;
162 static struct list_head rcu_torture_removed;
164 static int stutter_pause_test;
166 #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
167 #define RCUTORTURE_RUNNABLE_INIT 1
169 #define RCUTORTURE_RUNNABLE_INIT 0
171 int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
172 module_param(rcutorture_runnable, int, 0444);
173 MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
175 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
176 #define rcu_can_boost() 1
177 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
178 #define rcu_can_boost() 0
179 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
181 #ifdef CONFIG_RCU_TRACE
182 static u64 notrace rcu_trace_clock_local(void)
184 u64 ts = trace_clock_local();
185 unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC);
188 #else /* #ifdef CONFIG_RCU_TRACE */
189 static u64 notrace rcu_trace_clock_local(void)
193 #endif /* #else #ifdef CONFIG_RCU_TRACE */
195 static unsigned long shutdown_time; /* jiffies to system shutdown. */
196 static unsigned long boost_starttime; /* jiffies of next boost test start. */
197 DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
198 /* and boost task create/destroy. */
199 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
200 static bool barrier_phase; /* Test phase. */
201 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
202 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
203 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
205 /* Forward reference. */
206 static void rcu_torture_cleanup(void);
209 * Detect and respond to a system shutdown.
212 rcutorture_shutdown_notify(struct notifier_block *unused1,
213 unsigned long unused2, void *unused3)
215 mutex_lock(&fullstop_mutex);
216 if (fullstop == FULLSTOP_DONTSTOP)
217 fullstop = FULLSTOP_SHUTDOWN;
219 pr_warn(/* but going down anyway, so... */
220 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
221 mutex_unlock(&fullstop_mutex);
226 * Allocate an element from the rcu_tortures pool.
228 static struct rcu_torture *
229 rcu_torture_alloc(void)
233 spin_lock_bh(&rcu_torture_lock);
234 if (list_empty(&rcu_torture_freelist)) {
235 atomic_inc(&n_rcu_torture_alloc_fail);
236 spin_unlock_bh(&rcu_torture_lock);
239 atomic_inc(&n_rcu_torture_alloc);
240 p = rcu_torture_freelist.next;
242 spin_unlock_bh(&rcu_torture_lock);
243 return container_of(p, struct rcu_torture, rtort_free);
247 * Free an element to the rcu_tortures pool.
250 rcu_torture_free(struct rcu_torture *p)
252 atomic_inc(&n_rcu_torture_free);
253 spin_lock_bh(&rcu_torture_lock);
254 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
255 spin_unlock_bh(&rcu_torture_lock);
259 rcu_stutter_wait(const char *title)
261 while (stutter_pause_test || !rcutorture_runnable) {
262 if (rcutorture_runnable)
263 schedule_timeout_interruptible(1);
265 schedule_timeout_interruptible(round_jiffies_relative(HZ));
266 torture_shutdown_absorb(title);
271 * Operations vector for selecting different types of tests.
274 struct rcu_torture_ops {
276 int (*readlock)(void);
277 void (*read_delay)(struct torture_random_state *rrsp);
278 void (*readunlock)(int idx);
279 int (*completed)(void);
280 void (*deferred_free)(struct rcu_torture *p);
282 void (*exp_sync)(void);
283 void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
284 void (*cb_barrier)(void);
286 void (*stats)(char *page);
292 static struct rcu_torture_ops *cur_ops;
295 * Definitions for rcu torture testing.
298 static int rcu_torture_read_lock(void) __acquires(RCU)
304 static void rcu_read_delay(struct torture_random_state *rrsp)
306 const unsigned long shortdelay_us = 200;
307 const unsigned long longdelay_ms = 50;
309 /* We want a short delay sometimes to make a reader delay the grace
310 * period, and we want a long delay occasionally to trigger
311 * force_quiescent_state. */
313 if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
314 mdelay(longdelay_ms);
315 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
316 udelay(shortdelay_us);
317 #ifdef CONFIG_PREEMPT
318 if (!preempt_count() &&
319 !(torture_random(rrsp) % (nrealreaders * 20000)))
320 preempt_schedule(); /* No QS if preempt_disable() in effect */
324 static void rcu_torture_read_unlock(int idx) __releases(RCU)
329 static int rcu_torture_completed(void)
331 return rcu_batches_completed();
335 rcu_torture_cb(struct rcu_head *p)
338 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
340 if (fullstop != FULLSTOP_DONTSTOP) {
341 /* Test is ending, just drop callbacks on the floor. */
342 /* The next initialization will pick up the pieces. */
345 i = rp->rtort_pipe_count;
346 if (i > RCU_TORTURE_PIPE_LEN)
347 i = RCU_TORTURE_PIPE_LEN;
348 atomic_inc(&rcu_torture_wcount[i]);
349 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
350 rp->rtort_mbtest = 0;
351 rcu_torture_free(rp);
353 cur_ops->deferred_free(rp);
357 static int rcu_no_completed(void)
362 static void rcu_torture_deferred_free(struct rcu_torture *p)
364 call_rcu(&p->rtort_rcu, rcu_torture_cb);
367 static void rcu_sync_torture_init(void)
369 INIT_LIST_HEAD(&rcu_torture_removed);
372 static struct rcu_torture_ops rcu_ops = {
373 .init = rcu_sync_torture_init,
374 .readlock = rcu_torture_read_lock,
375 .read_delay = rcu_read_delay,
376 .readunlock = rcu_torture_read_unlock,
377 .completed = rcu_torture_completed,
378 .deferred_free = rcu_torture_deferred_free,
379 .sync = synchronize_rcu,
380 .exp_sync = synchronize_rcu_expedited,
382 .cb_barrier = rcu_barrier,
383 .fqs = rcu_force_quiescent_state,
386 .can_boost = rcu_can_boost(),
391 * Definitions for rcu_bh torture testing.
394 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
400 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
402 rcu_read_unlock_bh();
405 static int rcu_bh_torture_completed(void)
407 return rcu_batches_completed_bh();
410 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
412 call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
415 static struct rcu_torture_ops rcu_bh_ops = {
416 .init = rcu_sync_torture_init,
417 .readlock = rcu_bh_torture_read_lock,
418 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
419 .readunlock = rcu_bh_torture_read_unlock,
420 .completed = rcu_bh_torture_completed,
421 .deferred_free = rcu_bh_torture_deferred_free,
422 .sync = synchronize_rcu_bh,
423 .exp_sync = synchronize_rcu_bh_expedited,
425 .cb_barrier = rcu_barrier_bh,
426 .fqs = rcu_bh_force_quiescent_state,
433 * Definitions for srcu torture testing.
436 DEFINE_STATIC_SRCU(srcu_ctl);
438 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
440 return srcu_read_lock(&srcu_ctl);
443 static void srcu_read_delay(struct torture_random_state *rrsp)
446 const long uspertick = 1000000 / HZ;
447 const long longdelay = 10;
449 /* We want there to be long-running readers, but not all the time. */
451 delay = torture_random(rrsp) %
452 (nrealreaders * 2 * longdelay * uspertick);
454 schedule_timeout_interruptible(longdelay);
456 rcu_read_delay(rrsp);
459 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
461 srcu_read_unlock(&srcu_ctl, idx);
464 static int srcu_torture_completed(void)
466 return srcu_batches_completed(&srcu_ctl);
469 static void srcu_torture_deferred_free(struct rcu_torture *rp)
471 call_srcu(&srcu_ctl, &rp->rtort_rcu, rcu_torture_cb);
474 static void srcu_torture_synchronize(void)
476 synchronize_srcu(&srcu_ctl);
479 static void srcu_torture_call(struct rcu_head *head,
480 void (*func)(struct rcu_head *head))
482 call_srcu(&srcu_ctl, head, func);
485 static void srcu_torture_barrier(void)
487 srcu_barrier(&srcu_ctl);
490 static void srcu_torture_stats(char *page)
493 int idx = srcu_ctl.completed & 0x1;
495 page += sprintf(page, "%s%s per-CPU(idx=%d):",
496 torture_type, TORTURE_FLAG, idx);
497 for_each_possible_cpu(cpu) {
498 page += sprintf(page, " %d(%lu,%lu)", cpu,
499 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
500 per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
505 static void srcu_torture_synchronize_expedited(void)
507 synchronize_srcu_expedited(&srcu_ctl);
510 static struct rcu_torture_ops srcu_ops = {
511 .init = rcu_sync_torture_init,
512 .readlock = srcu_torture_read_lock,
513 .read_delay = srcu_read_delay,
514 .readunlock = srcu_torture_read_unlock,
515 .completed = srcu_torture_completed,
516 .deferred_free = srcu_torture_deferred_free,
517 .sync = srcu_torture_synchronize,
518 .exp_sync = srcu_torture_synchronize_expedited,
519 .call = srcu_torture_call,
520 .cb_barrier = srcu_torture_barrier,
521 .stats = srcu_torture_stats,
526 * Definitions for sched torture testing.
529 static int sched_torture_read_lock(void)
535 static void sched_torture_read_unlock(int idx)
540 static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
542 call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
545 static struct rcu_torture_ops sched_ops = {
546 .init = rcu_sync_torture_init,
547 .readlock = sched_torture_read_lock,
548 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
549 .readunlock = sched_torture_read_unlock,
550 .completed = rcu_no_completed,
551 .deferred_free = rcu_sched_torture_deferred_free,
552 .sync = synchronize_sched,
553 .exp_sync = synchronize_sched_expedited,
554 .call = call_rcu_sched,
555 .cb_barrier = rcu_barrier_sched,
556 .fqs = rcu_sched_force_quiescent_state,
563 * RCU torture priority-boost testing. Runs one real-time thread per
564 * CPU for moderate bursts, repeatedly registering RCU callbacks and
565 * spinning waiting for them to be invoked. If a given callback takes
566 * too long to be invoked, we assume that priority inversion has occurred.
569 struct rcu_boost_inflight {
574 static void rcu_torture_boost_cb(struct rcu_head *head)
576 struct rcu_boost_inflight *rbip =
577 container_of(head, struct rcu_boost_inflight, rcu);
579 smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
583 static int rcu_torture_boost(void *arg)
585 unsigned long call_rcu_time;
586 unsigned long endtime;
587 unsigned long oldstarttime;
588 struct rcu_boost_inflight rbi = { .inflight = 0 };
589 struct sched_param sp;
591 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
593 /* Set real-time priority. */
594 sp.sched_priority = 1;
595 if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
596 VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
597 n_rcu_torture_boost_rterror++;
600 init_rcu_head_on_stack(&rbi.rcu);
601 /* Each pass through the following loop does one boost-test cycle. */
603 /* Wait for the next test interval. */
604 oldstarttime = boost_starttime;
605 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
606 schedule_timeout_interruptible(oldstarttime - jiffies);
607 rcu_stutter_wait("rcu_torture_boost");
608 if (kthread_should_stop() ||
609 fullstop != FULLSTOP_DONTSTOP)
613 /* Do one boost-test interval. */
614 endtime = oldstarttime + test_boost_duration * HZ;
615 call_rcu_time = jiffies;
616 while (ULONG_CMP_LT(jiffies, endtime)) {
617 /* If we don't have a callback in flight, post one. */
619 smp_mb(); /* RCU core before ->inflight = 1. */
621 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
622 if (jiffies - call_rcu_time >
623 test_boost_duration * HZ - HZ / 2) {
624 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
625 n_rcu_torture_boost_failure++;
627 call_rcu_time = jiffies;
630 rcu_stutter_wait("rcu_torture_boost");
631 if (kthread_should_stop() ||
632 fullstop != FULLSTOP_DONTSTOP)
637 * Set the start time of the next test interval.
638 * Yes, this is vulnerable to long delays, but such
639 * delays simply cause a false negative for the next
640 * interval. Besides, we are running at RT priority,
641 * so delays should be relatively rare.
643 while (oldstarttime == boost_starttime &&
644 !kthread_should_stop()) {
645 if (mutex_trylock(&boost_mutex)) {
646 boost_starttime = jiffies +
647 test_boost_interval * HZ;
648 n_rcu_torture_boosts++;
649 mutex_unlock(&boost_mutex);
652 schedule_timeout_uninterruptible(1);
655 /* Go do the stutter. */
656 checkwait: rcu_stutter_wait("rcu_torture_boost");
657 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
659 /* Clean up and exit. */
660 VERBOSE_TOROUT_STRING("rcu_torture_boost task stopping");
661 torture_shutdown_absorb("rcu_torture_boost");
662 while (!kthread_should_stop() || rbi.inflight)
663 schedule_timeout_uninterruptible(1);
664 smp_mb(); /* order accesses to ->inflight before stack-frame death. */
665 destroy_rcu_head_on_stack(&rbi.rcu);
670 * RCU torture force-quiescent-state kthread. Repeatedly induces
671 * bursts of calls to force_quiescent_state(), increasing the probability
672 * of occurrence of some important types of race conditions.
675 rcu_torture_fqs(void *arg)
677 unsigned long fqs_resume_time;
678 int fqs_burst_remaining;
680 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
682 fqs_resume_time = jiffies + fqs_stutter * HZ;
683 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
684 !kthread_should_stop()) {
685 schedule_timeout_interruptible(1);
687 fqs_burst_remaining = fqs_duration;
688 while (fqs_burst_remaining > 0 &&
689 !kthread_should_stop()) {
692 fqs_burst_remaining -= fqs_holdoff;
694 rcu_stutter_wait("rcu_torture_fqs");
695 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
696 VERBOSE_TOROUT_STRING("rcu_torture_fqs task stopping");
697 torture_shutdown_absorb("rcu_torture_fqs");
698 while (!kthread_should_stop())
699 schedule_timeout_uninterruptible(1);
704 * RCU torture writer kthread. Repeatedly substitutes a new structure
705 * for that pointed to by rcu_torture_current, freeing the old structure
706 * after a series of grace periods (the "pipeline").
709 rcu_torture_writer(void *arg)
713 struct rcu_torture *rp;
714 struct rcu_torture *rp1;
715 struct rcu_torture *old_rp;
716 static DEFINE_TORTURE_RANDOM(rand);
718 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
719 set_user_nice(current, 19);
722 schedule_timeout_uninterruptible(1);
723 rp = rcu_torture_alloc();
726 rp->rtort_pipe_count = 0;
727 udelay(torture_random(&rand) & 0x3ff);
728 old_rp = rcu_dereference_check(rcu_torture_current,
729 current == writer_task);
730 rp->rtort_mbtest = 1;
731 rcu_assign_pointer(rcu_torture_current, rp);
732 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
734 i = old_rp->rtort_pipe_count;
735 if (i > RCU_TORTURE_PIPE_LEN)
736 i = RCU_TORTURE_PIPE_LEN;
737 atomic_inc(&rcu_torture_wcount[i]);
738 old_rp->rtort_pipe_count++;
739 if (gp_normal == gp_exp)
740 exp = !!(torture_random(&rand) & 0x80);
744 cur_ops->deferred_free(old_rp);
747 list_add(&old_rp->rtort_free,
748 &rcu_torture_removed);
749 list_for_each_entry_safe(rp, rp1,
750 &rcu_torture_removed,
752 i = rp->rtort_pipe_count;
753 if (i > RCU_TORTURE_PIPE_LEN)
754 i = RCU_TORTURE_PIPE_LEN;
755 atomic_inc(&rcu_torture_wcount[i]);
756 if (++rp->rtort_pipe_count >=
757 RCU_TORTURE_PIPE_LEN) {
758 rp->rtort_mbtest = 0;
759 list_del(&rp->rtort_free);
760 rcu_torture_free(rp);
765 rcutorture_record_progress(++rcu_torture_current_version);
766 rcu_stutter_wait("rcu_torture_writer");
767 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
768 VERBOSE_TOROUT_STRING("rcu_torture_writer task stopping");
769 torture_shutdown_absorb("rcu_torture_writer");
770 while (!kthread_should_stop())
771 schedule_timeout_uninterruptible(1);
776 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
777 * delay between calls.
780 rcu_torture_fakewriter(void *arg)
782 DEFINE_TORTURE_RANDOM(rand);
784 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
785 set_user_nice(current, 19);
788 schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
789 udelay(torture_random(&rand) & 0x3ff);
790 if (cur_ops->cb_barrier != NULL &&
791 torture_random(&rand) % (nfakewriters * 8) == 0) {
792 cur_ops->cb_barrier();
793 } else if (gp_normal == gp_exp) {
794 if (torture_random(&rand) & 0x80)
798 } else if (gp_normal) {
803 rcu_stutter_wait("rcu_torture_fakewriter");
804 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
806 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task stopping");
807 torture_shutdown_absorb("rcu_torture_fakewriter");
808 while (!kthread_should_stop())
809 schedule_timeout_uninterruptible(1);
813 void rcutorture_trace_dump(void)
815 static atomic_t beenhere = ATOMIC_INIT(0);
817 if (atomic_read(&beenhere))
819 if (atomic_xchg(&beenhere, 1) != 0)
821 ftrace_dump(DUMP_ALL);
825 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
826 * incrementing the corresponding element of the pipeline array. The
827 * counter in the element should never be greater than 1, otherwise, the
828 * RCU implementation is broken.
830 static void rcu_torture_timer(unsigned long unused)
835 static DEFINE_TORTURE_RANDOM(rand);
836 static DEFINE_SPINLOCK(rand_lock);
837 struct rcu_torture *p;
839 unsigned long long ts;
841 idx = cur_ops->readlock();
842 completed = cur_ops->completed();
843 ts = rcu_trace_clock_local();
844 p = rcu_dereference_check(rcu_torture_current,
845 rcu_read_lock_bh_held() ||
846 rcu_read_lock_sched_held() ||
847 srcu_read_lock_held(&srcu_ctl));
849 /* Leave because rcu_torture_writer is not yet underway */
850 cur_ops->readunlock(idx);
853 if (p->rtort_mbtest == 0)
854 atomic_inc(&n_rcu_torture_mberror);
855 spin_lock(&rand_lock);
856 cur_ops->read_delay(&rand);
857 n_rcu_torture_timers++;
858 spin_unlock(&rand_lock);
860 pipe_count = p->rtort_pipe_count;
861 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
862 /* Should not happen, but... */
863 pipe_count = RCU_TORTURE_PIPE_LEN;
865 completed_end = cur_ops->completed();
866 if (pipe_count > 1) {
867 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
868 completed, completed_end);
869 rcutorture_trace_dump();
871 __this_cpu_inc(rcu_torture_count[pipe_count]);
872 completed = completed_end - completed;
873 if (completed > RCU_TORTURE_PIPE_LEN) {
874 /* Should not happen, but... */
875 completed = RCU_TORTURE_PIPE_LEN;
877 __this_cpu_inc(rcu_torture_batch[completed]);
879 cur_ops->readunlock(idx);
883 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
884 * incrementing the corresponding element of the pipeline array. The
885 * counter in the element should never be greater than 1, otherwise, the
886 * RCU implementation is broken.
889 rcu_torture_reader(void *arg)
894 DEFINE_TORTURE_RANDOM(rand);
895 struct rcu_torture *p;
898 unsigned long long ts;
900 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
901 set_user_nice(current, 19);
902 if (irqreader && cur_ops->irq_capable)
903 setup_timer_on_stack(&t, rcu_torture_timer, 0);
906 if (irqreader && cur_ops->irq_capable) {
907 if (!timer_pending(&t))
908 mod_timer(&t, jiffies + 1);
910 idx = cur_ops->readlock();
911 completed = cur_ops->completed();
912 ts = rcu_trace_clock_local();
913 p = rcu_dereference_check(rcu_torture_current,
914 rcu_read_lock_bh_held() ||
915 rcu_read_lock_sched_held() ||
916 srcu_read_lock_held(&srcu_ctl));
918 /* Wait for rcu_torture_writer to get underway */
919 cur_ops->readunlock(idx);
920 schedule_timeout_interruptible(HZ);
923 if (p->rtort_mbtest == 0)
924 atomic_inc(&n_rcu_torture_mberror);
925 cur_ops->read_delay(&rand);
927 pipe_count = p->rtort_pipe_count;
928 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
929 /* Should not happen, but... */
930 pipe_count = RCU_TORTURE_PIPE_LEN;
932 completed_end = cur_ops->completed();
933 if (pipe_count > 1) {
934 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
935 ts, completed, completed_end);
936 rcutorture_trace_dump();
938 __this_cpu_inc(rcu_torture_count[pipe_count]);
939 completed = completed_end - completed;
940 if (completed > RCU_TORTURE_PIPE_LEN) {
941 /* Should not happen, but... */
942 completed = RCU_TORTURE_PIPE_LEN;
944 __this_cpu_inc(rcu_torture_batch[completed]);
946 cur_ops->readunlock(idx);
948 rcu_stutter_wait("rcu_torture_reader");
949 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
950 VERBOSE_TOROUT_STRING("rcu_torture_reader task stopping");
951 torture_shutdown_absorb("rcu_torture_reader");
952 if (irqreader && cur_ops->irq_capable)
954 while (!kthread_should_stop())
955 schedule_timeout_uninterruptible(1);
960 * Create an RCU-torture statistics message in the specified buffer.
963 rcu_torture_printk(char *page)
967 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
968 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
970 for_each_possible_cpu(cpu) {
971 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
972 pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
973 batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
976 for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
977 if (pipesummary[i] != 0)
980 page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
981 page += sprintf(page,
982 "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
984 rcu_torture_current_version,
985 list_empty(&rcu_torture_freelist),
986 atomic_read(&n_rcu_torture_alloc),
987 atomic_read(&n_rcu_torture_alloc_fail),
988 atomic_read(&n_rcu_torture_free));
989 page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
990 atomic_read(&n_rcu_torture_mberror),
991 n_rcu_torture_boost_ktrerror,
992 n_rcu_torture_boost_rterror);
993 page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
994 n_rcu_torture_boost_failure,
995 n_rcu_torture_boosts,
996 n_rcu_torture_timers);
997 page += sprintf(page,
998 "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
999 n_online_successes, n_online_attempts,
1000 n_offline_successes, n_offline_attempts,
1001 min_online, max_online,
1002 min_offline, max_offline,
1003 sum_online, sum_offline, HZ);
1004 page += sprintf(page, "barrier: %ld/%ld:%ld",
1005 n_barrier_successes,
1007 n_rcu_torture_barrier_error);
1008 page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
1009 if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1010 n_rcu_torture_barrier_error != 0 ||
1011 n_rcu_torture_boost_ktrerror != 0 ||
1012 n_rcu_torture_boost_rterror != 0 ||
1013 n_rcu_torture_boost_failure != 0 ||
1015 page += sprintf(page, "!!! ");
1016 atomic_inc(&n_rcu_torture_error);
1019 page += sprintf(page, "Reader Pipe: ");
1020 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1021 page += sprintf(page, " %ld", pipesummary[i]);
1022 page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
1023 page += sprintf(page, "Reader Batch: ");
1024 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1025 page += sprintf(page, " %ld", batchsummary[i]);
1026 page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
1027 page += sprintf(page, "Free-Block Circulation: ");
1028 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1029 page += sprintf(page, " %d",
1030 atomic_read(&rcu_torture_wcount[i]));
1032 page += sprintf(page, "\n");
1034 cur_ops->stats(page);
1038 * Print torture statistics. Caller must ensure that there is only
1039 * one call to this function at a given time!!! This is normally
1040 * accomplished by relying on the module system to only have one copy
1041 * of the module loaded, and then by giving the rcu_torture_stats
1042 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1043 * thread is not running).
1046 rcu_torture_stats_print(void)
1048 int size = nr_cpu_ids * 200 + 8192;
1051 buf = kmalloc(size, GFP_KERNEL);
1053 pr_err("rcu-torture: Out of memory, need: %d", size);
1056 rcu_torture_printk(buf);
1057 pr_alert("%s", buf);
1062 * Periodically prints torture statistics, if periodic statistics printing
1063 * was specified via the stat_interval module parameter.
1065 * No need to worry about fullstop here, since this one doesn't reference
1066 * volatile state or register callbacks.
1069 rcu_torture_stats(void *arg)
1071 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1073 schedule_timeout_interruptible(stat_interval * HZ);
1074 rcu_torture_stats_print();
1075 torture_shutdown_absorb("rcu_torture_stats");
1076 } while (!kthread_should_stop());
1077 VERBOSE_TOROUT_STRING("rcu_torture_stats task stopping");
1081 /* Cause the rcutorture test to "stutter", starting and stopping all
1082 * threads periodically.
1085 rcu_torture_stutter(void *arg)
1087 VERBOSE_TOROUT_STRING("rcu_torture_stutter task started");
1089 schedule_timeout_interruptible(stutter * HZ);
1090 stutter_pause_test = 1;
1091 if (!kthread_should_stop())
1092 schedule_timeout_interruptible(stutter * HZ);
1093 stutter_pause_test = 0;
1094 torture_shutdown_absorb("rcu_torture_stutter");
1095 } while (!kthread_should_stop());
1096 VERBOSE_TOROUT_STRING("rcu_torture_stutter task stopping");
1101 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1103 pr_alert("%s" TORTURE_FLAG
1104 "--- %s: nreaders=%d nfakewriters=%d "
1105 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1106 "shuffle_interval=%d stutter=%d irqreader=%d "
1107 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1108 "test_boost=%d/%d test_boost_interval=%d "
1109 "test_boost_duration=%d shutdown_secs=%d "
1110 "stall_cpu=%d stall_cpu_holdoff=%d "
1112 "onoff_interval=%d onoff_holdoff=%d\n",
1113 torture_type, tag, nrealreaders, nfakewriters,
1114 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1115 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1116 test_boost, cur_ops->can_boost,
1117 test_boost_interval, test_boost_duration, shutdown_secs,
1118 stall_cpu, stall_cpu_holdoff,
1120 onoff_interval, onoff_holdoff);
1123 static struct notifier_block rcutorture_shutdown_nb = {
1124 .notifier_call = rcutorture_shutdown_notify,
1127 static void rcutorture_booster_cleanup(int cpu)
1129 struct task_struct *t;
1131 if (boost_tasks[cpu] == NULL)
1133 mutex_lock(&boost_mutex);
1134 VERBOSE_TOROUT_STRING("Stopping rcu_torture_boost task");
1135 t = boost_tasks[cpu];
1136 boost_tasks[cpu] = NULL;
1137 mutex_unlock(&boost_mutex);
1139 /* This must be outside of the mutex, otherwise deadlock! */
1141 boost_tasks[cpu] = NULL;
1144 static int rcutorture_booster_init(int cpu)
1148 if (boost_tasks[cpu] != NULL)
1149 return 0; /* Already created, nothing more to do. */
1151 /* Don't allow time recalculation while creating a new task. */
1152 mutex_lock(&boost_mutex);
1153 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
1154 boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1156 "rcu_torture_boost");
1157 if (IS_ERR(boost_tasks[cpu])) {
1158 retval = PTR_ERR(boost_tasks[cpu]);
1159 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1160 n_rcu_torture_boost_ktrerror++;
1161 boost_tasks[cpu] = NULL;
1162 mutex_unlock(&boost_mutex);
1165 kthread_bind(boost_tasks[cpu], cpu);
1166 wake_up_process(boost_tasks[cpu]);
1167 mutex_unlock(&boost_mutex);
1172 * Cause the rcutorture test to shutdown the system after the test has
1173 * run for the time specified by the shutdown_secs module parameter.
1176 rcu_torture_shutdown(void *arg)
1179 unsigned long jiffies_snap;
1181 VERBOSE_TOROUT_STRING("rcu_torture_shutdown task started");
1182 jiffies_snap = ACCESS_ONCE(jiffies);
1183 while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
1184 !kthread_should_stop()) {
1185 delta = shutdown_time - jiffies_snap;
1187 pr_alert("%s" TORTURE_FLAG
1188 "rcu_torture_shutdown task: %lu jiffies remaining\n",
1189 torture_type, delta);
1190 schedule_timeout_interruptible(delta);
1191 jiffies_snap = ACCESS_ONCE(jiffies);
1193 if (kthread_should_stop()) {
1194 VERBOSE_TOROUT_STRING("rcu_torture_shutdown task stopping");
1198 /* OK, shut down the system. */
1200 VERBOSE_TOROUT_STRING("rcu_torture_shutdown task shutting down system");
1201 shutdown_task = NULL; /* Avoid self-kill deadlock. */
1202 rcu_torture_cleanup(); /* Get the success/failure message. */
1203 kernel_power_off(); /* Shut down the system. */
1207 #ifdef CONFIG_HOTPLUG_CPU
1210 * Execute random CPU-hotplug operations at the interval specified
1211 * by the onoff_interval.
1214 rcu_torture_onoff(void *arg)
1217 unsigned long delta;
1219 DEFINE_TORTURE_RANDOM(rand);
1221 unsigned long starttime;
1223 VERBOSE_TOROUT_STRING("rcu_torture_onoff task started");
1224 for_each_online_cpu(cpu)
1226 WARN_ON(maxcpu < 0);
1227 if (onoff_holdoff > 0) {
1228 VERBOSE_TOROUT_STRING("rcu_torture_onoff begin holdoff");
1229 schedule_timeout_interruptible(onoff_holdoff * HZ);
1230 VERBOSE_TOROUT_STRING("rcu_torture_onoff end holdoff");
1232 while (!kthread_should_stop()) {
1233 cpu = (torture_random(&rand) >> 4) % (maxcpu + 1);
1234 if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
1236 pr_alert("%s" TORTURE_FLAG
1237 "rcu_torture_onoff task: offlining %d\n",
1239 starttime = jiffies;
1240 n_offline_attempts++;
1241 ret = cpu_down(cpu);
1244 pr_alert("%s" TORTURE_FLAG
1245 "rcu_torture_onoff task: offline %d failed: errno %d\n",
1246 torture_type, cpu, ret);
1249 pr_alert("%s" TORTURE_FLAG
1250 "rcu_torture_onoff task: offlined %d\n",
1252 n_offline_successes++;
1253 delta = jiffies - starttime;
1254 sum_offline += delta;
1255 if (min_offline < 0) {
1256 min_offline = delta;
1257 max_offline = delta;
1259 if (min_offline > delta)
1260 min_offline = delta;
1261 if (max_offline < delta)
1262 max_offline = delta;
1264 } else if (cpu_is_hotpluggable(cpu)) {
1266 pr_alert("%s" TORTURE_FLAG
1267 "rcu_torture_onoff task: onlining %d\n",
1269 starttime = jiffies;
1270 n_online_attempts++;
1274 pr_alert("%s" TORTURE_FLAG
1275 "rcu_torture_onoff task: online %d failed: errno %d\n",
1276 torture_type, cpu, ret);
1279 pr_alert("%s" TORTURE_FLAG
1280 "rcu_torture_onoff task: onlined %d\n",
1282 n_online_successes++;
1283 delta = jiffies - starttime;
1284 sum_online += delta;
1285 if (min_online < 0) {
1289 if (min_online > delta)
1291 if (max_online < delta)
1295 schedule_timeout_interruptible(onoff_interval * HZ);
1297 VERBOSE_TOROUT_STRING("rcu_torture_onoff task stopping");
1302 rcu_torture_onoff_init(void)
1306 if (onoff_interval <= 0)
1308 onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff");
1309 if (IS_ERR(onoff_task)) {
1310 ret = PTR_ERR(onoff_task);
1314 torture_shuffle_task_register(onoff_task);
1318 static void rcu_torture_onoff_cleanup(void)
1320 if (onoff_task == NULL)
1322 VERBOSE_TOROUT_STRING("Stopping rcu_torture_onoff task");
1323 kthread_stop(onoff_task);
1327 #else /* #ifdef CONFIG_HOTPLUG_CPU */
1330 rcu_torture_onoff_init(void)
1335 static void rcu_torture_onoff_cleanup(void)
1339 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
1342 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1343 * induces a CPU stall for the time specified by stall_cpu.
1345 static int rcu_torture_stall(void *args)
1347 unsigned long stop_at;
1349 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1350 if (stall_cpu_holdoff > 0) {
1351 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1352 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1353 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1355 if (!kthread_should_stop()) {
1356 stop_at = get_seconds() + stall_cpu;
1357 /* RCU CPU stall is expected behavior in following code. */
1358 pr_alert("rcu_torture_stall start.\n");
1361 while (ULONG_CMP_LT(get_seconds(), stop_at))
1362 continue; /* Induce RCU CPU stall warning. */
1365 pr_alert("rcu_torture_stall end.\n");
1367 torture_shutdown_absorb("rcu_torture_stall");
1368 while (!kthread_should_stop())
1369 schedule_timeout_interruptible(10 * HZ);
1373 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1374 static int __init rcu_torture_stall_init(void)
1380 stall_task = kthread_run(rcu_torture_stall, NULL, "rcu_torture_stall");
1381 if (IS_ERR(stall_task)) {
1382 ret = PTR_ERR(stall_task);
1386 torture_shuffle_task_register(stall_task);
1390 /* Clean up after the CPU-stall kthread, if one was spawned. */
1391 static void rcu_torture_stall_cleanup(void)
1393 if (stall_task == NULL)
1395 VERBOSE_TOROUT_STRING("Stopping rcu_torture_stall_task.");
1396 kthread_stop(stall_task);
1400 /* Callback function for RCU barrier testing. */
1401 void rcu_torture_barrier_cbf(struct rcu_head *rcu)
1403 atomic_inc(&barrier_cbs_invoked);
1406 /* kthread function to register callbacks used to test RCU barriers. */
1407 static int rcu_torture_barrier_cbs(void *arg)
1409 long myid = (long)arg;
1412 struct rcu_head rcu;
1414 init_rcu_head_on_stack(&rcu);
1415 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
1416 set_user_nice(current, 19);
1418 wait_event(barrier_cbs_wq[myid],
1420 ACCESS_ONCE(barrier_phase)) != lastphase ||
1421 kthread_should_stop() ||
1422 fullstop != FULLSTOP_DONTSTOP);
1423 lastphase = newphase;
1424 smp_mb(); /* ensure barrier_phase load before ->call(). */
1425 if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
1427 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
1428 if (atomic_dec_and_test(&barrier_cbs_count))
1429 wake_up(&barrier_wq);
1430 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1431 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task stopping");
1432 torture_shutdown_absorb("rcu_torture_barrier_cbs");
1433 while (!kthread_should_stop())
1434 schedule_timeout_interruptible(1);
1435 cur_ops->cb_barrier();
1436 destroy_rcu_head_on_stack(&rcu);
1440 /* kthread function to drive and coordinate RCU barrier testing. */
1441 static int rcu_torture_barrier(void *arg)
1445 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
1447 atomic_set(&barrier_cbs_invoked, 0);
1448 atomic_set(&barrier_cbs_count, n_barrier_cbs);
1449 smp_mb(); /* Ensure barrier_phase after prior assignments. */
1450 barrier_phase = !barrier_phase;
1451 for (i = 0; i < n_barrier_cbs; i++)
1452 wake_up(&barrier_cbs_wq[i]);
1453 wait_event(barrier_wq,
1454 atomic_read(&barrier_cbs_count) == 0 ||
1455 kthread_should_stop() ||
1456 fullstop != FULLSTOP_DONTSTOP);
1457 if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
1459 n_barrier_attempts++;
1460 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
1461 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
1462 n_rcu_torture_barrier_error++;
1465 n_barrier_successes++;
1466 schedule_timeout_interruptible(HZ / 10);
1467 } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1468 VERBOSE_TOROUT_STRING("rcu_torture_barrier task stopping");
1469 torture_shutdown_absorb("rcu_torture_barrier");
1470 while (!kthread_should_stop())
1471 schedule_timeout_interruptible(1);
1475 /* Initialize RCU barrier testing. */
1476 static int rcu_torture_barrier_init(void)
1481 if (n_barrier_cbs == 0)
1483 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
1484 pr_alert("%s" TORTURE_FLAG
1485 " Call or barrier ops missing for %s,\n",
1486 torture_type, cur_ops->name);
1487 pr_alert("%s" TORTURE_FLAG
1488 " RCU barrier testing omitted from run.\n",
1492 atomic_set(&barrier_cbs_count, 0);
1493 atomic_set(&barrier_cbs_invoked, 0);
1495 kzalloc(n_barrier_cbs * sizeof(barrier_cbs_tasks[0]),
1498 kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]),
1500 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
1502 for (i = 0; i < n_barrier_cbs; i++) {
1503 init_waitqueue_head(&barrier_cbs_wq[i]);
1504 barrier_cbs_tasks[i] = kthread_run(rcu_torture_barrier_cbs,
1506 "rcu_torture_barrier_cbs");
1507 if (IS_ERR(barrier_cbs_tasks[i])) {
1508 ret = PTR_ERR(barrier_cbs_tasks[i]);
1509 VERBOSE_TOROUT_ERRSTRING("Failed to create rcu_torture_barrier_cbs");
1510 barrier_cbs_tasks[i] = NULL;
1513 torture_shuffle_task_register(barrier_cbs_tasks[i]);
1515 barrier_task = kthread_run(rcu_torture_barrier, NULL,
1516 "rcu_torture_barrier");
1517 if (IS_ERR(barrier_task)) {
1518 ret = PTR_ERR(barrier_task);
1519 VERBOSE_TOROUT_ERRSTRING("Failed to create rcu_torture_barrier");
1520 barrier_task = NULL;
1522 torture_shuffle_task_register(barrier_task);
1526 /* Clean up after RCU barrier testing. */
1527 static void rcu_torture_barrier_cleanup(void)
1531 if (barrier_task != NULL) {
1532 VERBOSE_TOROUT_STRING("Stopping rcu_torture_barrier task");
1533 kthread_stop(barrier_task);
1534 barrier_task = NULL;
1536 if (barrier_cbs_tasks != NULL) {
1537 for (i = 0; i < n_barrier_cbs; i++) {
1538 if (barrier_cbs_tasks[i] != NULL) {
1539 VERBOSE_TOROUT_STRING("Stopping rcu_torture_barrier_cbs task");
1540 kthread_stop(barrier_cbs_tasks[i]);
1541 barrier_cbs_tasks[i] = NULL;
1544 kfree(barrier_cbs_tasks);
1545 barrier_cbs_tasks = NULL;
1547 if (barrier_cbs_wq != NULL) {
1548 kfree(barrier_cbs_wq);
1549 barrier_cbs_wq = NULL;
1553 static int rcutorture_cpu_notify(struct notifier_block *self,
1554 unsigned long action, void *hcpu)
1556 long cpu = (long)hcpu;
1560 case CPU_DOWN_FAILED:
1561 (void)rcutorture_booster_init(cpu);
1563 case CPU_DOWN_PREPARE:
1564 rcutorture_booster_cleanup(cpu);
1572 static struct notifier_block rcutorture_cpu_nb = {
1573 .notifier_call = rcutorture_cpu_notify,
1577 rcu_torture_cleanup(void)
1581 mutex_lock(&fullstop_mutex);
1582 rcutorture_record_test_transition();
1583 if (fullstop == FULLSTOP_SHUTDOWN) {
1584 pr_warn(/* but going down anyway, so... */
1585 "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
1586 mutex_unlock(&fullstop_mutex);
1587 schedule_timeout_uninterruptible(10);
1588 if (cur_ops->cb_barrier != NULL)
1589 cur_ops->cb_barrier();
1592 fullstop = FULLSTOP_RMMOD;
1593 mutex_unlock(&fullstop_mutex);
1594 unregister_reboot_notifier(&rcutorture_shutdown_nb);
1596 torture_shuffle_cleanup(); /* Must be first task cleaned up. */
1597 rcu_torture_barrier_cleanup();
1598 rcu_torture_stall_cleanup();
1600 VERBOSE_TOROUT_STRING("Stopping rcu_torture_stutter task");
1601 kthread_stop(stutter_task);
1603 stutter_task = NULL;
1606 VERBOSE_TOROUT_STRING("Stopping rcu_torture_writer task");
1607 kthread_stop(writer_task);
1612 for (i = 0; i < nrealreaders; i++) {
1613 if (reader_tasks[i]) {
1614 VERBOSE_TOROUT_STRING(
1615 "Stopping rcu_torture_reader task");
1616 kthread_stop(reader_tasks[i]);
1618 reader_tasks[i] = NULL;
1620 kfree(reader_tasks);
1621 reader_tasks = NULL;
1623 rcu_torture_current = NULL;
1625 if (fakewriter_tasks) {
1626 for (i = 0; i < nfakewriters; i++) {
1627 if (fakewriter_tasks[i]) {
1628 VERBOSE_TOROUT_STRING(
1629 "Stopping rcu_torture_fakewriter task");
1630 kthread_stop(fakewriter_tasks[i]);
1632 fakewriter_tasks[i] = NULL;
1634 kfree(fakewriter_tasks);
1635 fakewriter_tasks = NULL;
1639 VERBOSE_TOROUT_STRING("Stopping rcu_torture_stats task");
1640 kthread_stop(stats_task);
1645 VERBOSE_TOROUT_STRING("Stopping rcu_torture_fqs task");
1646 kthread_stop(fqs_task);
1649 if ((test_boost == 1 && cur_ops->can_boost) ||
1651 unregister_cpu_notifier(&rcutorture_cpu_nb);
1652 for_each_possible_cpu(i)
1653 rcutorture_booster_cleanup(i);
1655 if (shutdown_task != NULL) {
1656 VERBOSE_TOROUT_STRING("Stopping rcu_torture_shutdown task");
1657 kthread_stop(shutdown_task);
1659 shutdown_task = NULL;
1660 rcu_torture_onoff_cleanup();
1662 /* Wait for all RCU callbacks to fire. */
1664 if (cur_ops->cb_barrier != NULL)
1665 cur_ops->cb_barrier();
1667 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
1669 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
1670 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1671 else if (n_online_successes != n_online_attempts ||
1672 n_offline_successes != n_offline_attempts)
1673 rcu_torture_print_module_parms(cur_ops,
1674 "End of test: RCU_HOTPLUG");
1676 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1679 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1680 static void rcu_torture_leak_cb(struct rcu_head *rhp)
1684 static void rcu_torture_err_cb(struct rcu_head *rhp)
1687 * This -might- happen due to race conditions, but is unlikely.
1688 * The scenario that leads to this happening is that the
1689 * first of the pair of duplicate callbacks is queued,
1690 * someone else starts a grace period that includes that
1691 * callback, then the second of the pair must wait for the
1692 * next grace period. Unlikely, but can happen. If it
1693 * does happen, the debug-objects subsystem won't have splatted.
1695 pr_alert("rcutorture: duplicated callback was invoked.\n");
1697 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1700 * Verify that double-free causes debug-objects to complain, but only
1701 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
1702 * cannot be carried out.
1704 static void rcu_test_debug_objects(void)
1706 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
1707 struct rcu_head rh1;
1708 struct rcu_head rh2;
1710 init_rcu_head_on_stack(&rh1);
1711 init_rcu_head_on_stack(&rh2);
1712 pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n");
1714 /* Try to queue the rh2 pair of callbacks for the same grace period. */
1715 preempt_disable(); /* Prevent preemption from interrupting test. */
1716 rcu_read_lock(); /* Make it impossible to finish a grace period. */
1717 call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
1718 local_irq_disable(); /* Make it harder to start a new grace period. */
1719 call_rcu(&rh2, rcu_torture_leak_cb);
1720 call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
1725 /* Wait for them all to get done so we can safely return. */
1727 pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n");
1728 destroy_rcu_head_on_stack(&rh1);
1729 destroy_rcu_head_on_stack(&rh2);
1730 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1731 pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n");
1732 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
1736 rcu_torture_init(void)
1742 static struct rcu_torture_ops *torture_ops[] = {
1743 &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
1746 mutex_lock(&fullstop_mutex);
1748 /* Process args and tell the world that the torturer is on the job. */
1749 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1750 cur_ops = torture_ops[i];
1751 if (strcmp(torture_type, cur_ops->name) == 0)
1754 if (i == ARRAY_SIZE(torture_ops)) {
1755 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
1757 pr_alert("rcu-torture types:");
1758 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1759 pr_alert(" %s", torture_ops[i]->name);
1761 mutex_unlock(&fullstop_mutex);
1764 if (cur_ops->fqs == NULL && fqs_duration != 0) {
1765 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
1769 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
1772 nrealreaders = nreaders;
1774 nrealreaders = 2 * num_online_cpus();
1775 rcu_torture_print_module_parms(cur_ops, "Start of test");
1776 fullstop = FULLSTOP_DONTSTOP;
1778 /* Set up the freelist. */
1780 INIT_LIST_HEAD(&rcu_torture_freelist);
1781 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
1782 rcu_tortures[i].rtort_mbtest = 0;
1783 list_add_tail(&rcu_tortures[i].rtort_free,
1784 &rcu_torture_freelist);
1787 /* Initialize the statistics so that each run gets its own numbers. */
1789 rcu_torture_current = NULL;
1790 rcu_torture_current_version = 0;
1791 atomic_set(&n_rcu_torture_alloc, 0);
1792 atomic_set(&n_rcu_torture_alloc_fail, 0);
1793 atomic_set(&n_rcu_torture_free, 0);
1794 atomic_set(&n_rcu_torture_mberror, 0);
1795 atomic_set(&n_rcu_torture_error, 0);
1796 n_rcu_torture_barrier_error = 0;
1797 n_rcu_torture_boost_ktrerror = 0;
1798 n_rcu_torture_boost_rterror = 0;
1799 n_rcu_torture_boost_failure = 0;
1800 n_rcu_torture_boosts = 0;
1801 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1802 atomic_set(&rcu_torture_wcount[i], 0);
1803 for_each_possible_cpu(cpu) {
1804 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1805 per_cpu(rcu_torture_count, cpu)[i] = 0;
1806 per_cpu(rcu_torture_batch, cpu)[i] = 0;
1810 /* Start up the kthreads. */
1812 VERBOSE_TOROUT_STRING("Creating rcu_torture_writer task");
1813 writer_task = kthread_create(rcu_torture_writer, NULL,
1814 "rcu_torture_writer");
1815 if (IS_ERR(writer_task)) {
1816 firsterr = PTR_ERR(writer_task);
1817 VERBOSE_TOROUT_ERRSTRING("Failed to create writer");
1821 torture_shuffle_task_register(writer_task);
1822 wake_up_process(writer_task);
1823 fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
1825 if (fakewriter_tasks == NULL) {
1826 VERBOSE_TOROUT_ERRSTRING("out of memory");
1830 for (i = 0; i < nfakewriters; i++) {
1831 VERBOSE_TOROUT_STRING("Creating rcu_torture_fakewriter task");
1832 fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
1833 "rcu_torture_fakewriter");
1834 if (IS_ERR(fakewriter_tasks[i])) {
1835 firsterr = PTR_ERR(fakewriter_tasks[i]);
1836 VERBOSE_TOROUT_ERRSTRING("Failed to create fakewriter");
1837 fakewriter_tasks[i] = NULL;
1840 torture_shuffle_task_register(fakewriter_tasks[i]);
1842 reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1844 if (reader_tasks == NULL) {
1845 VERBOSE_TOROUT_ERRSTRING("out of memory");
1849 for (i = 0; i < nrealreaders; i++) {
1850 VERBOSE_TOROUT_STRING("Creating rcu_torture_reader task");
1851 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
1852 "rcu_torture_reader");
1853 if (IS_ERR(reader_tasks[i])) {
1854 firsterr = PTR_ERR(reader_tasks[i]);
1855 VERBOSE_TOROUT_ERRSTRING("Failed to create reader");
1856 reader_tasks[i] = NULL;
1859 torture_shuffle_task_register(reader_tasks[i]);
1861 if (stat_interval > 0) {
1862 VERBOSE_TOROUT_STRING("Creating rcu_torture_stats task");
1863 stats_task = kthread_run(rcu_torture_stats, NULL,
1864 "rcu_torture_stats");
1865 if (IS_ERR(stats_task)) {
1866 firsterr = PTR_ERR(stats_task);
1867 VERBOSE_TOROUT_ERRSTRING("Failed to create stats");
1871 torture_shuffle_task_register(stats_task);
1873 if (test_no_idle_hz) {
1874 firsterr = torture_shuffle_init(shuffle_interval * HZ);
1881 /* Create the stutter thread */
1882 stutter_task = kthread_run(rcu_torture_stutter, NULL,
1883 "rcu_torture_stutter");
1884 if (IS_ERR(stutter_task)) {
1885 firsterr = PTR_ERR(stutter_task);
1886 VERBOSE_TOROUT_ERRSTRING("Failed to create stutter");
1887 stutter_task = NULL;
1890 torture_shuffle_task_register(stutter_task);
1892 if (fqs_duration < 0)
1895 /* Create the stutter thread */
1896 fqs_task = kthread_run(rcu_torture_fqs, NULL,
1898 if (IS_ERR(fqs_task)) {
1899 firsterr = PTR_ERR(fqs_task);
1900 VERBOSE_TOROUT_ERRSTRING("Failed to create fqs");
1904 torture_shuffle_task_register(fqs_task);
1906 if (test_boost_interval < 1)
1907 test_boost_interval = 1;
1908 if (test_boost_duration < 2)
1909 test_boost_duration = 2;
1910 if ((test_boost == 1 && cur_ops->can_boost) ||
1913 boost_starttime = jiffies + test_boost_interval * HZ;
1914 register_cpu_notifier(&rcutorture_cpu_nb);
1915 for_each_possible_cpu(i) {
1916 if (cpu_is_offline(i))
1917 continue; /* Heuristic: CPU can go offline. */
1918 retval = rcutorture_booster_init(i);
1925 if (shutdown_secs > 0) {
1926 shutdown_time = jiffies + shutdown_secs * HZ;
1927 shutdown_task = kthread_create(rcu_torture_shutdown, NULL,
1928 "rcu_torture_shutdown");
1929 if (IS_ERR(shutdown_task)) {
1930 firsterr = PTR_ERR(shutdown_task);
1931 VERBOSE_TOROUT_ERRSTRING("Failed to create shutdown");
1932 shutdown_task = NULL;
1935 torture_shuffle_task_register(shutdown_task);
1936 wake_up_process(shutdown_task);
1938 i = rcu_torture_onoff_init();
1943 register_reboot_notifier(&rcutorture_shutdown_nb);
1944 i = rcu_torture_stall_init();
1949 retval = rcu_torture_barrier_init();
1955 rcu_test_debug_objects();
1956 rcutorture_record_test_transition();
1957 mutex_unlock(&fullstop_mutex);
1961 mutex_unlock(&fullstop_mutex);
1962 rcu_torture_cleanup();
1966 module_init(rcu_torture_init);
1967 module_exit(rcu_torture_cleanup);