Merge tag 'dm-3.4-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm
[platform/adaptation/renesas_rcar/renesas_kernel.git] / kernel / rcutorture.c
1 /*
2  * Read-Copy Update module-based torture test facility
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2005, 2006
19  *
20  * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21  *        Josh Triplett <josh@freedesktop.org>
22  *
23  * See also:  Documentation/RCU/torture.txt
24  */
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <linux/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/completion.h>
39 #include <linux/moduleparam.h>
40 #include <linux/percpu.h>
41 #include <linux/notifier.h>
42 #include <linux/reboot.h>
43 #include <linux/freezer.h>
44 #include <linux/cpu.h>
45 #include <linux/delay.h>
46 #include <linux/stat.h>
47 #include <linux/srcu.h>
48 #include <linux/slab.h>
49 #include <asm/byteorder.h>
50
51 MODULE_LICENSE("GPL");
52 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
53               "Josh Triplett <josh@freedesktop.org>");
54
55 static int nreaders = -1;       /* # reader threads, defaults to 2*ncpus */
56 static int nfakewriters = 4;    /* # fake writer threads */
57 static int stat_interval;       /* Interval between stats, in seconds. */
58                                 /*  Defaults to "only at end of test". */
59 static bool verbose;            /* Print more debug info. */
60 static bool test_no_idle_hz;    /* Test RCU's support for tickless idle CPUs. */
61 static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
62 static int stutter = 5;         /* Start/stop testing interval (in sec) */
63 static int irqreader = 1;       /* RCU readers from irq (timers). */
64 static int fqs_duration;        /* Duration of bursts (us), 0 to disable. */
65 static int fqs_holdoff;         /* Hold time within burst (us). */
66 static int fqs_stutter = 3;     /* Wait time between bursts (s). */
67 static int onoff_interval;      /* Wait time between CPU hotplugs, 0=disable. */
68 static int onoff_holdoff;       /* Seconds after boot before CPU hotplugs. */
69 static int shutdown_secs;       /* Shutdown time (s).  <=0 for no shutdown. */
70 static int stall_cpu;           /* CPU-stall duration (s).  0 for no stall. */
71 static int stall_cpu_holdoff = 10; /* Time to wait until stall (s).  */
72 static int test_boost = 1;      /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
73 static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
74 static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
75 static char *torture_type = "rcu"; /* What RCU implementation to torture. */
76
77 module_param(nreaders, int, 0444);
78 MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
79 module_param(nfakewriters, int, 0444);
80 MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
81 module_param(stat_interval, int, 0644);
82 MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
83 module_param(verbose, bool, 0444);
84 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
85 module_param(test_no_idle_hz, bool, 0444);
86 MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
87 module_param(shuffle_interval, int, 0444);
88 MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
89 module_param(stutter, int, 0444);
90 MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
91 module_param(irqreader, int, 0444);
92 MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
93 module_param(fqs_duration, int, 0444);
94 MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
95 module_param(fqs_holdoff, int, 0444);
96 MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
97 module_param(fqs_stutter, int, 0444);
98 MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
99 module_param(onoff_interval, int, 0444);
100 MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
101 module_param(onoff_holdoff, int, 0444);
102 MODULE_PARM_DESC(onoff_holdoff, "Time after boot before CPU hotplugs (s)");
103 module_param(shutdown_secs, int, 0444);
104 MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), zero to disable.");
105 module_param(stall_cpu, int, 0444);
106 MODULE_PARM_DESC(stall_cpu, "Stall duration (s), zero to disable.");
107 module_param(stall_cpu_holdoff, int, 0444);
108 MODULE_PARM_DESC(stall_cpu_holdoff, "Time to wait before starting stall (s).");
109 module_param(test_boost, int, 0444);
110 MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
111 module_param(test_boost_interval, int, 0444);
112 MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
113 module_param(test_boost_duration, int, 0444);
114 MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
115 module_param(torture_type, charp, 0444);
116 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
117
118 #define TORTURE_FLAG "-torture:"
119 #define PRINTK_STRING(s) \
120         do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
121 #define VERBOSE_PRINTK_STRING(s) \
122         do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
123 #define VERBOSE_PRINTK_ERRSTRING(s) \
124         do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
125
126 static char printk_buf[4096];
127
128 static int nrealreaders;
129 static struct task_struct *writer_task;
130 static struct task_struct **fakewriter_tasks;
131 static struct task_struct **reader_tasks;
132 static struct task_struct *stats_task;
133 static struct task_struct *shuffler_task;
134 static struct task_struct *stutter_task;
135 static struct task_struct *fqs_task;
136 static struct task_struct *boost_tasks[NR_CPUS];
137 static struct task_struct *shutdown_task;
138 #ifdef CONFIG_HOTPLUG_CPU
139 static struct task_struct *onoff_task;
140 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
141 static struct task_struct *stall_task;
142
143 #define RCU_TORTURE_PIPE_LEN 10
144
145 struct rcu_torture {
146         struct rcu_head rtort_rcu;
147         int rtort_pipe_count;
148         struct list_head rtort_free;
149         int rtort_mbtest;
150 };
151
152 static LIST_HEAD(rcu_torture_freelist);
153 static struct rcu_torture __rcu *rcu_torture_current;
154 static unsigned long rcu_torture_current_version;
155 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
156 static DEFINE_SPINLOCK(rcu_torture_lock);
157 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
158         { 0 };
159 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
160         { 0 };
161 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
162 static atomic_t n_rcu_torture_alloc;
163 static atomic_t n_rcu_torture_alloc_fail;
164 static atomic_t n_rcu_torture_free;
165 static atomic_t n_rcu_torture_mberror;
166 static atomic_t n_rcu_torture_error;
167 static long n_rcu_torture_boost_ktrerror;
168 static long n_rcu_torture_boost_rterror;
169 static long n_rcu_torture_boost_failure;
170 static long n_rcu_torture_boosts;
171 static long n_rcu_torture_timers;
172 static long n_offline_attempts;
173 static long n_offline_successes;
174 static long n_online_attempts;
175 static long n_online_successes;
176 static struct list_head rcu_torture_removed;
177 static cpumask_var_t shuffle_tmp_mask;
178
179 static int stutter_pause_test;
180
181 #if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
182 #define RCUTORTURE_RUNNABLE_INIT 1
183 #else
184 #define RCUTORTURE_RUNNABLE_INIT 0
185 #endif
186 int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
187 module_param(rcutorture_runnable, int, 0444);
188 MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
189
190 #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
191 #define rcu_can_boost() 1
192 #else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
193 #define rcu_can_boost() 0
194 #endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
195
196 static unsigned long shutdown_time;     /* jiffies to system shutdown. */
197 static unsigned long boost_starttime;   /* jiffies of next boost test start. */
198 DEFINE_MUTEX(boost_mutex);              /* protect setting boost_starttime */
199                                         /*  and boost task create/destroy. */
200
201 /* Mediate rmmod and system shutdown.  Concurrent rmmod & shutdown illegal! */
202
203 #define FULLSTOP_DONTSTOP 0     /* Normal operation. */
204 #define FULLSTOP_SHUTDOWN 1     /* System shutdown with rcutorture running. */
205 #define FULLSTOP_RMMOD    2     /* Normal rmmod of rcutorture. */
206 static int fullstop = FULLSTOP_RMMOD;
207 /*
208  * Protect fullstop transitions and spawning of kthreads.
209  */
210 static DEFINE_MUTEX(fullstop_mutex);
211
212 /* Forward reference. */
213 static void rcu_torture_cleanup(void);
214
215 /*
216  * Detect and respond to a system shutdown.
217  */
218 static int
219 rcutorture_shutdown_notify(struct notifier_block *unused1,
220                            unsigned long unused2, void *unused3)
221 {
222         mutex_lock(&fullstop_mutex);
223         if (fullstop == FULLSTOP_DONTSTOP)
224                 fullstop = FULLSTOP_SHUTDOWN;
225         else
226                 printk(KERN_WARNING /* but going down anyway, so... */
227                        "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
228         mutex_unlock(&fullstop_mutex);
229         return NOTIFY_DONE;
230 }
231
232 /*
233  * Absorb kthreads into a kernel function that won't return, so that
234  * they won't ever access module text or data again.
235  */
236 static void rcutorture_shutdown_absorb(char *title)
237 {
238         if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
239                 printk(KERN_NOTICE
240                        "rcutorture thread %s parking due to system shutdown\n",
241                        title);
242                 schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
243         }
244 }
245
246 /*
247  * Allocate an element from the rcu_tortures pool.
248  */
249 static struct rcu_torture *
250 rcu_torture_alloc(void)
251 {
252         struct list_head *p;
253
254         spin_lock_bh(&rcu_torture_lock);
255         if (list_empty(&rcu_torture_freelist)) {
256                 atomic_inc(&n_rcu_torture_alloc_fail);
257                 spin_unlock_bh(&rcu_torture_lock);
258                 return NULL;
259         }
260         atomic_inc(&n_rcu_torture_alloc);
261         p = rcu_torture_freelist.next;
262         list_del_init(p);
263         spin_unlock_bh(&rcu_torture_lock);
264         return container_of(p, struct rcu_torture, rtort_free);
265 }
266
267 /*
268  * Free an element to the rcu_tortures pool.
269  */
270 static void
271 rcu_torture_free(struct rcu_torture *p)
272 {
273         atomic_inc(&n_rcu_torture_free);
274         spin_lock_bh(&rcu_torture_lock);
275         list_add_tail(&p->rtort_free, &rcu_torture_freelist);
276         spin_unlock_bh(&rcu_torture_lock);
277 }
278
279 struct rcu_random_state {
280         unsigned long rrs_state;
281         long rrs_count;
282 };
283
284 #define RCU_RANDOM_MULT 39916801  /* prime */
285 #define RCU_RANDOM_ADD  479001701 /* prime */
286 #define RCU_RANDOM_REFRESH 10000
287
288 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
289
290 /*
291  * Crude but fast random-number generator.  Uses a linear congruential
292  * generator, with occasional help from cpu_clock().
293  */
294 static unsigned long
295 rcu_random(struct rcu_random_state *rrsp)
296 {
297         if (--rrsp->rrs_count < 0) {
298                 rrsp->rrs_state += (unsigned long)local_clock();
299                 rrsp->rrs_count = RCU_RANDOM_REFRESH;
300         }
301         rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
302         return swahw32(rrsp->rrs_state);
303 }
304
305 static void
306 rcu_stutter_wait(char *title)
307 {
308         while (stutter_pause_test || !rcutorture_runnable) {
309                 if (rcutorture_runnable)
310                         schedule_timeout_interruptible(1);
311                 else
312                         schedule_timeout_interruptible(round_jiffies_relative(HZ));
313                 rcutorture_shutdown_absorb(title);
314         }
315 }
316
317 /*
318  * Operations vector for selecting different types of tests.
319  */
320
321 struct rcu_torture_ops {
322         void (*init)(void);
323         void (*cleanup)(void);
324         int (*readlock)(void);
325         void (*read_delay)(struct rcu_random_state *rrsp);
326         void (*readunlock)(int idx);
327         int (*completed)(void);
328         void (*deferred_free)(struct rcu_torture *p);
329         void (*sync)(void);
330         void (*cb_barrier)(void);
331         void (*fqs)(void);
332         int (*stats)(char *page);
333         int irq_capable;
334         int can_boost;
335         char *name;
336 };
337
338 static struct rcu_torture_ops *cur_ops;
339
340 /*
341  * Definitions for rcu torture testing.
342  */
343
344 static int rcu_torture_read_lock(void) __acquires(RCU)
345 {
346         rcu_read_lock();
347         return 0;
348 }
349
350 static void rcu_read_delay(struct rcu_random_state *rrsp)
351 {
352         const unsigned long shortdelay_us = 200;
353         const unsigned long longdelay_ms = 50;
354
355         /* We want a short delay sometimes to make a reader delay the grace
356          * period, and we want a long delay occasionally to trigger
357          * force_quiescent_state. */
358
359         if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
360                 mdelay(longdelay_ms);
361         if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
362                 udelay(shortdelay_us);
363 #ifdef CONFIG_PREEMPT
364         if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
365                 preempt_schedule();  /* No QS if preempt_disable() in effect */
366 #endif
367 }
368
369 static void rcu_torture_read_unlock(int idx) __releases(RCU)
370 {
371         rcu_read_unlock();
372 }
373
374 static int rcu_torture_completed(void)
375 {
376         return rcu_batches_completed();
377 }
378
379 static void
380 rcu_torture_cb(struct rcu_head *p)
381 {
382         int i;
383         struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
384
385         if (fullstop != FULLSTOP_DONTSTOP) {
386                 /* Test is ending, just drop callbacks on the floor. */
387                 /* The next initialization will pick up the pieces. */
388                 return;
389         }
390         i = rp->rtort_pipe_count;
391         if (i > RCU_TORTURE_PIPE_LEN)
392                 i = RCU_TORTURE_PIPE_LEN;
393         atomic_inc(&rcu_torture_wcount[i]);
394         if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
395                 rp->rtort_mbtest = 0;
396                 rcu_torture_free(rp);
397         } else
398                 cur_ops->deferred_free(rp);
399 }
400
401 static int rcu_no_completed(void)
402 {
403         return 0;
404 }
405
406 static void rcu_torture_deferred_free(struct rcu_torture *p)
407 {
408         call_rcu(&p->rtort_rcu, rcu_torture_cb);
409 }
410
411 static struct rcu_torture_ops rcu_ops = {
412         .init           = NULL,
413         .cleanup        = NULL,
414         .readlock       = rcu_torture_read_lock,
415         .read_delay     = rcu_read_delay,
416         .readunlock     = rcu_torture_read_unlock,
417         .completed      = rcu_torture_completed,
418         .deferred_free  = rcu_torture_deferred_free,
419         .sync           = synchronize_rcu,
420         .cb_barrier     = rcu_barrier,
421         .fqs            = rcu_force_quiescent_state,
422         .stats          = NULL,
423         .irq_capable    = 1,
424         .can_boost      = rcu_can_boost(),
425         .name           = "rcu"
426 };
427
428 static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
429 {
430         int i;
431         struct rcu_torture *rp;
432         struct rcu_torture *rp1;
433
434         cur_ops->sync();
435         list_add(&p->rtort_free, &rcu_torture_removed);
436         list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
437                 i = rp->rtort_pipe_count;
438                 if (i > RCU_TORTURE_PIPE_LEN)
439                         i = RCU_TORTURE_PIPE_LEN;
440                 atomic_inc(&rcu_torture_wcount[i]);
441                 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
442                         rp->rtort_mbtest = 0;
443                         list_del(&rp->rtort_free);
444                         rcu_torture_free(rp);
445                 }
446         }
447 }
448
449 static void rcu_sync_torture_init(void)
450 {
451         INIT_LIST_HEAD(&rcu_torture_removed);
452 }
453
454 static struct rcu_torture_ops rcu_sync_ops = {
455         .init           = rcu_sync_torture_init,
456         .cleanup        = NULL,
457         .readlock       = rcu_torture_read_lock,
458         .read_delay     = rcu_read_delay,
459         .readunlock     = rcu_torture_read_unlock,
460         .completed      = rcu_torture_completed,
461         .deferred_free  = rcu_sync_torture_deferred_free,
462         .sync           = synchronize_rcu,
463         .cb_barrier     = NULL,
464         .fqs            = rcu_force_quiescent_state,
465         .stats          = NULL,
466         .irq_capable    = 1,
467         .can_boost      = rcu_can_boost(),
468         .name           = "rcu_sync"
469 };
470
471 static struct rcu_torture_ops rcu_expedited_ops = {
472         .init           = rcu_sync_torture_init,
473         .cleanup        = NULL,
474         .readlock       = rcu_torture_read_lock,
475         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
476         .readunlock     = rcu_torture_read_unlock,
477         .completed      = rcu_no_completed,
478         .deferred_free  = rcu_sync_torture_deferred_free,
479         .sync           = synchronize_rcu_expedited,
480         .cb_barrier     = NULL,
481         .fqs            = rcu_force_quiescent_state,
482         .stats          = NULL,
483         .irq_capable    = 1,
484         .can_boost      = rcu_can_boost(),
485         .name           = "rcu_expedited"
486 };
487
488 /*
489  * Definitions for rcu_bh torture testing.
490  */
491
492 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
493 {
494         rcu_read_lock_bh();
495         return 0;
496 }
497
498 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
499 {
500         rcu_read_unlock_bh();
501 }
502
503 static int rcu_bh_torture_completed(void)
504 {
505         return rcu_batches_completed_bh();
506 }
507
508 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
509 {
510         call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
511 }
512
513 static struct rcu_torture_ops rcu_bh_ops = {
514         .init           = NULL,
515         .cleanup        = NULL,
516         .readlock       = rcu_bh_torture_read_lock,
517         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
518         .readunlock     = rcu_bh_torture_read_unlock,
519         .completed      = rcu_bh_torture_completed,
520         .deferred_free  = rcu_bh_torture_deferred_free,
521         .sync           = synchronize_rcu_bh,
522         .cb_barrier     = rcu_barrier_bh,
523         .fqs            = rcu_bh_force_quiescent_state,
524         .stats          = NULL,
525         .irq_capable    = 1,
526         .name           = "rcu_bh"
527 };
528
529 static struct rcu_torture_ops rcu_bh_sync_ops = {
530         .init           = rcu_sync_torture_init,
531         .cleanup        = NULL,
532         .readlock       = rcu_bh_torture_read_lock,
533         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
534         .readunlock     = rcu_bh_torture_read_unlock,
535         .completed      = rcu_bh_torture_completed,
536         .deferred_free  = rcu_sync_torture_deferred_free,
537         .sync           = synchronize_rcu_bh,
538         .cb_barrier     = NULL,
539         .fqs            = rcu_bh_force_quiescent_state,
540         .stats          = NULL,
541         .irq_capable    = 1,
542         .name           = "rcu_bh_sync"
543 };
544
545 static struct rcu_torture_ops rcu_bh_expedited_ops = {
546         .init           = rcu_sync_torture_init,
547         .cleanup        = NULL,
548         .readlock       = rcu_bh_torture_read_lock,
549         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
550         .readunlock     = rcu_bh_torture_read_unlock,
551         .completed      = rcu_bh_torture_completed,
552         .deferred_free  = rcu_sync_torture_deferred_free,
553         .sync           = synchronize_rcu_bh_expedited,
554         .cb_barrier     = NULL,
555         .fqs            = rcu_bh_force_quiescent_state,
556         .stats          = NULL,
557         .irq_capable    = 1,
558         .name           = "rcu_bh_expedited"
559 };
560
561 /*
562  * Definitions for srcu torture testing.
563  */
564
565 static struct srcu_struct srcu_ctl;
566
567 static void srcu_torture_init(void)
568 {
569         init_srcu_struct(&srcu_ctl);
570         rcu_sync_torture_init();
571 }
572
573 static void srcu_torture_cleanup(void)
574 {
575         synchronize_srcu(&srcu_ctl);
576         cleanup_srcu_struct(&srcu_ctl);
577 }
578
579 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
580 {
581         return srcu_read_lock(&srcu_ctl);
582 }
583
584 static void srcu_read_delay(struct rcu_random_state *rrsp)
585 {
586         long delay;
587         const long uspertick = 1000000 / HZ;
588         const long longdelay = 10;
589
590         /* We want there to be long-running readers, but not all the time. */
591
592         delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
593         if (!delay)
594                 schedule_timeout_interruptible(longdelay);
595         else
596                 rcu_read_delay(rrsp);
597 }
598
599 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
600 {
601         srcu_read_unlock(&srcu_ctl, idx);
602 }
603
604 static int srcu_torture_completed(void)
605 {
606         return srcu_batches_completed(&srcu_ctl);
607 }
608
609 static void srcu_torture_synchronize(void)
610 {
611         synchronize_srcu(&srcu_ctl);
612 }
613
614 static int srcu_torture_stats(char *page)
615 {
616         int cnt = 0;
617         int cpu;
618         int idx = srcu_ctl.completed & 0x1;
619
620         cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
621                        torture_type, TORTURE_FLAG, idx);
622         for_each_possible_cpu(cpu) {
623                 cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
624                                per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
625                                per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
626         }
627         cnt += sprintf(&page[cnt], "\n");
628         return cnt;
629 }
630
631 static struct rcu_torture_ops srcu_ops = {
632         .init           = srcu_torture_init,
633         .cleanup        = srcu_torture_cleanup,
634         .readlock       = srcu_torture_read_lock,
635         .read_delay     = srcu_read_delay,
636         .readunlock     = srcu_torture_read_unlock,
637         .completed      = srcu_torture_completed,
638         .deferred_free  = rcu_sync_torture_deferred_free,
639         .sync           = srcu_torture_synchronize,
640         .cb_barrier     = NULL,
641         .stats          = srcu_torture_stats,
642         .name           = "srcu"
643 };
644
645 static int srcu_torture_read_lock_raw(void) __acquires(&srcu_ctl)
646 {
647         return srcu_read_lock_raw(&srcu_ctl);
648 }
649
650 static void srcu_torture_read_unlock_raw(int idx) __releases(&srcu_ctl)
651 {
652         srcu_read_unlock_raw(&srcu_ctl, idx);
653 }
654
655 static struct rcu_torture_ops srcu_raw_ops = {
656         .init           = srcu_torture_init,
657         .cleanup        = srcu_torture_cleanup,
658         .readlock       = srcu_torture_read_lock_raw,
659         .read_delay     = srcu_read_delay,
660         .readunlock     = srcu_torture_read_unlock_raw,
661         .completed      = srcu_torture_completed,
662         .deferred_free  = rcu_sync_torture_deferred_free,
663         .sync           = srcu_torture_synchronize,
664         .cb_barrier     = NULL,
665         .stats          = srcu_torture_stats,
666         .name           = "srcu_raw"
667 };
668
669 static void srcu_torture_synchronize_expedited(void)
670 {
671         synchronize_srcu_expedited(&srcu_ctl);
672 }
673
674 static struct rcu_torture_ops srcu_expedited_ops = {
675         .init           = srcu_torture_init,
676         .cleanup        = srcu_torture_cleanup,
677         .readlock       = srcu_torture_read_lock,
678         .read_delay     = srcu_read_delay,
679         .readunlock     = srcu_torture_read_unlock,
680         .completed      = srcu_torture_completed,
681         .deferred_free  = rcu_sync_torture_deferred_free,
682         .sync           = srcu_torture_synchronize_expedited,
683         .cb_barrier     = NULL,
684         .stats          = srcu_torture_stats,
685         .name           = "srcu_expedited"
686 };
687
688 /*
689  * Definitions for sched torture testing.
690  */
691
692 static int sched_torture_read_lock(void)
693 {
694         preempt_disable();
695         return 0;
696 }
697
698 static void sched_torture_read_unlock(int idx)
699 {
700         preempt_enable();
701 }
702
703 static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
704 {
705         call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
706 }
707
708 static struct rcu_torture_ops sched_ops = {
709         .init           = rcu_sync_torture_init,
710         .cleanup        = NULL,
711         .readlock       = sched_torture_read_lock,
712         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
713         .readunlock     = sched_torture_read_unlock,
714         .completed      = rcu_no_completed,
715         .deferred_free  = rcu_sched_torture_deferred_free,
716         .sync           = synchronize_sched,
717         .cb_barrier     = rcu_barrier_sched,
718         .fqs            = rcu_sched_force_quiescent_state,
719         .stats          = NULL,
720         .irq_capable    = 1,
721         .name           = "sched"
722 };
723
724 static struct rcu_torture_ops sched_sync_ops = {
725         .init           = rcu_sync_torture_init,
726         .cleanup        = NULL,
727         .readlock       = sched_torture_read_lock,
728         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
729         .readunlock     = sched_torture_read_unlock,
730         .completed      = rcu_no_completed,
731         .deferred_free  = rcu_sync_torture_deferred_free,
732         .sync           = synchronize_sched,
733         .cb_barrier     = NULL,
734         .fqs            = rcu_sched_force_quiescent_state,
735         .stats          = NULL,
736         .name           = "sched_sync"
737 };
738
739 static struct rcu_torture_ops sched_expedited_ops = {
740         .init           = rcu_sync_torture_init,
741         .cleanup        = NULL,
742         .readlock       = sched_torture_read_lock,
743         .read_delay     = rcu_read_delay,  /* just reuse rcu's version. */
744         .readunlock     = sched_torture_read_unlock,
745         .completed      = rcu_no_completed,
746         .deferred_free  = rcu_sync_torture_deferred_free,
747         .sync           = synchronize_sched_expedited,
748         .cb_barrier     = NULL,
749         .fqs            = rcu_sched_force_quiescent_state,
750         .stats          = NULL,
751         .irq_capable    = 1,
752         .name           = "sched_expedited"
753 };
754
755 /*
756  * RCU torture priority-boost testing.  Runs one real-time thread per
757  * CPU for moderate bursts, repeatedly registering RCU callbacks and
758  * spinning waiting for them to be invoked.  If a given callback takes
759  * too long to be invoked, we assume that priority inversion has occurred.
760  */
761
762 struct rcu_boost_inflight {
763         struct rcu_head rcu;
764         int inflight;
765 };
766
767 static void rcu_torture_boost_cb(struct rcu_head *head)
768 {
769         struct rcu_boost_inflight *rbip =
770                 container_of(head, struct rcu_boost_inflight, rcu);
771
772         smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
773         rbip->inflight = 0;
774 }
775
776 static int rcu_torture_boost(void *arg)
777 {
778         unsigned long call_rcu_time;
779         unsigned long endtime;
780         unsigned long oldstarttime;
781         struct rcu_boost_inflight rbi = { .inflight = 0 };
782         struct sched_param sp;
783
784         VERBOSE_PRINTK_STRING("rcu_torture_boost started");
785
786         /* Set real-time priority. */
787         sp.sched_priority = 1;
788         if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
789                 VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!");
790                 n_rcu_torture_boost_rterror++;
791         }
792
793         init_rcu_head_on_stack(&rbi.rcu);
794         /* Each pass through the following loop does one boost-test cycle. */
795         do {
796                 /* Wait for the next test interval. */
797                 oldstarttime = boost_starttime;
798                 while (ULONG_CMP_LT(jiffies, oldstarttime)) {
799                         schedule_timeout_uninterruptible(1);
800                         rcu_stutter_wait("rcu_torture_boost");
801                         if (kthread_should_stop() ||
802                             fullstop != FULLSTOP_DONTSTOP)
803                                 goto checkwait;
804                 }
805
806                 /* Do one boost-test interval. */
807                 endtime = oldstarttime + test_boost_duration * HZ;
808                 call_rcu_time = jiffies;
809                 while (ULONG_CMP_LT(jiffies, endtime)) {
810                         /* If we don't have a callback in flight, post one. */
811                         if (!rbi.inflight) {
812                                 smp_mb(); /* RCU core before ->inflight = 1. */
813                                 rbi.inflight = 1;
814                                 call_rcu(&rbi.rcu, rcu_torture_boost_cb);
815                                 if (jiffies - call_rcu_time >
816                                          test_boost_duration * HZ - HZ / 2) {
817                                         VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed");
818                                         n_rcu_torture_boost_failure++;
819                                 }
820                                 call_rcu_time = jiffies;
821                         }
822                         cond_resched();
823                         rcu_stutter_wait("rcu_torture_boost");
824                         if (kthread_should_stop() ||
825                             fullstop != FULLSTOP_DONTSTOP)
826                                 goto checkwait;
827                 }
828
829                 /*
830                  * Set the start time of the next test interval.
831                  * Yes, this is vulnerable to long delays, but such
832                  * delays simply cause a false negative for the next
833                  * interval.  Besides, we are running at RT priority,
834                  * so delays should be relatively rare.
835                  */
836                 while (oldstarttime == boost_starttime &&
837                        !kthread_should_stop()) {
838                         if (mutex_trylock(&boost_mutex)) {
839                                 boost_starttime = jiffies +
840                                                   test_boost_interval * HZ;
841                                 n_rcu_torture_boosts++;
842                                 mutex_unlock(&boost_mutex);
843                                 break;
844                         }
845                         schedule_timeout_uninterruptible(1);
846                 }
847
848                 /* Go do the stutter. */
849 checkwait:      rcu_stutter_wait("rcu_torture_boost");
850         } while (!kthread_should_stop() && fullstop  == FULLSTOP_DONTSTOP);
851
852         /* Clean up and exit. */
853         VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
854         rcutorture_shutdown_absorb("rcu_torture_boost");
855         while (!kthread_should_stop() || rbi.inflight)
856                 schedule_timeout_uninterruptible(1);
857         smp_mb(); /* order accesses to ->inflight before stack-frame death. */
858         destroy_rcu_head_on_stack(&rbi.rcu);
859         return 0;
860 }
861
862 /*
863  * RCU torture force-quiescent-state kthread.  Repeatedly induces
864  * bursts of calls to force_quiescent_state(), increasing the probability
865  * of occurrence of some important types of race conditions.
866  */
867 static int
868 rcu_torture_fqs(void *arg)
869 {
870         unsigned long fqs_resume_time;
871         int fqs_burst_remaining;
872
873         VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
874         do {
875                 fqs_resume_time = jiffies + fqs_stutter * HZ;
876                 while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
877                        !kthread_should_stop()) {
878                         schedule_timeout_interruptible(1);
879                 }
880                 fqs_burst_remaining = fqs_duration;
881                 while (fqs_burst_remaining > 0 &&
882                        !kthread_should_stop()) {
883                         cur_ops->fqs();
884                         udelay(fqs_holdoff);
885                         fqs_burst_remaining -= fqs_holdoff;
886                 }
887                 rcu_stutter_wait("rcu_torture_fqs");
888         } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
889         VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
890         rcutorture_shutdown_absorb("rcu_torture_fqs");
891         while (!kthread_should_stop())
892                 schedule_timeout_uninterruptible(1);
893         return 0;
894 }
895
896 /*
897  * RCU torture writer kthread.  Repeatedly substitutes a new structure
898  * for that pointed to by rcu_torture_current, freeing the old structure
899  * after a series of grace periods (the "pipeline").
900  */
901 static int
902 rcu_torture_writer(void *arg)
903 {
904         int i;
905         long oldbatch = rcu_batches_completed();
906         struct rcu_torture *rp;
907         struct rcu_torture *old_rp;
908         static DEFINE_RCU_RANDOM(rand);
909
910         VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
911         set_user_nice(current, 19);
912
913         do {
914                 schedule_timeout_uninterruptible(1);
915                 rp = rcu_torture_alloc();
916                 if (rp == NULL)
917                         continue;
918                 rp->rtort_pipe_count = 0;
919                 udelay(rcu_random(&rand) & 0x3ff);
920                 old_rp = rcu_dereference_check(rcu_torture_current,
921                                                current == writer_task);
922                 rp->rtort_mbtest = 1;
923                 rcu_assign_pointer(rcu_torture_current, rp);
924                 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
925                 if (old_rp) {
926                         i = old_rp->rtort_pipe_count;
927                         if (i > RCU_TORTURE_PIPE_LEN)
928                                 i = RCU_TORTURE_PIPE_LEN;
929                         atomic_inc(&rcu_torture_wcount[i]);
930                         old_rp->rtort_pipe_count++;
931                         cur_ops->deferred_free(old_rp);
932                 }
933                 rcutorture_record_progress(++rcu_torture_current_version);
934                 oldbatch = cur_ops->completed();
935                 rcu_stutter_wait("rcu_torture_writer");
936         } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
937         VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
938         rcutorture_shutdown_absorb("rcu_torture_writer");
939         while (!kthread_should_stop())
940                 schedule_timeout_uninterruptible(1);
941         return 0;
942 }
943
944 /*
945  * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
946  * delay between calls.
947  */
948 static int
949 rcu_torture_fakewriter(void *arg)
950 {
951         DEFINE_RCU_RANDOM(rand);
952
953         VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
954         set_user_nice(current, 19);
955
956         do {
957                 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
958                 udelay(rcu_random(&rand) & 0x3ff);
959                 cur_ops->sync();
960                 rcu_stutter_wait("rcu_torture_fakewriter");
961         } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
962
963         VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
964         rcutorture_shutdown_absorb("rcu_torture_fakewriter");
965         while (!kthread_should_stop())
966                 schedule_timeout_uninterruptible(1);
967         return 0;
968 }
969
970 void rcutorture_trace_dump(void)
971 {
972         static atomic_t beenhere = ATOMIC_INIT(0);
973
974         if (atomic_read(&beenhere))
975                 return;
976         if (atomic_xchg(&beenhere, 1) != 0)
977                 return;
978         do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL);
979         ftrace_dump(DUMP_ALL);
980 }
981
982 /*
983  * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
984  * incrementing the corresponding element of the pipeline array.  The
985  * counter in the element should never be greater than 1, otherwise, the
986  * RCU implementation is broken.
987  */
988 static void rcu_torture_timer(unsigned long unused)
989 {
990         int idx;
991         int completed;
992         static DEFINE_RCU_RANDOM(rand);
993         static DEFINE_SPINLOCK(rand_lock);
994         struct rcu_torture *p;
995         int pipe_count;
996
997         idx = cur_ops->readlock();
998         completed = cur_ops->completed();
999         p = rcu_dereference_check(rcu_torture_current,
1000                                   rcu_read_lock_bh_held() ||
1001                                   rcu_read_lock_sched_held() ||
1002                                   srcu_read_lock_held(&srcu_ctl));
1003         if (p == NULL) {
1004                 /* Leave because rcu_torture_writer is not yet underway */
1005                 cur_ops->readunlock(idx);
1006                 return;
1007         }
1008         do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
1009         if (p->rtort_mbtest == 0)
1010                 atomic_inc(&n_rcu_torture_mberror);
1011         spin_lock(&rand_lock);
1012         cur_ops->read_delay(&rand);
1013         n_rcu_torture_timers++;
1014         spin_unlock(&rand_lock);
1015         preempt_disable();
1016         pipe_count = p->rtort_pipe_count;
1017         if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1018                 /* Should not happen, but... */
1019                 pipe_count = RCU_TORTURE_PIPE_LEN;
1020         }
1021         if (pipe_count > 1)
1022                 rcutorture_trace_dump();
1023         __this_cpu_inc(rcu_torture_count[pipe_count]);
1024         completed = cur_ops->completed() - completed;
1025         if (completed > RCU_TORTURE_PIPE_LEN) {
1026                 /* Should not happen, but... */
1027                 completed = RCU_TORTURE_PIPE_LEN;
1028         }
1029         __this_cpu_inc(rcu_torture_batch[completed]);
1030         preempt_enable();
1031         cur_ops->readunlock(idx);
1032 }
1033
1034 /*
1035  * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
1036  * incrementing the corresponding element of the pipeline array.  The
1037  * counter in the element should never be greater than 1, otherwise, the
1038  * RCU implementation is broken.
1039  */
1040 static int
1041 rcu_torture_reader(void *arg)
1042 {
1043         int completed;
1044         int idx;
1045         DEFINE_RCU_RANDOM(rand);
1046         struct rcu_torture *p;
1047         int pipe_count;
1048         struct timer_list t;
1049
1050         VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
1051         set_user_nice(current, 19);
1052         if (irqreader && cur_ops->irq_capable)
1053                 setup_timer_on_stack(&t, rcu_torture_timer, 0);
1054
1055         do {
1056                 if (irqreader && cur_ops->irq_capable) {
1057                         if (!timer_pending(&t))
1058                                 mod_timer(&t, jiffies + 1);
1059                 }
1060                 idx = cur_ops->readlock();
1061                 completed = cur_ops->completed();
1062                 p = rcu_dereference_check(rcu_torture_current,
1063                                           rcu_read_lock_bh_held() ||
1064                                           rcu_read_lock_sched_held() ||
1065                                           srcu_read_lock_held(&srcu_ctl));
1066                 if (p == NULL) {
1067                         /* Wait for rcu_torture_writer to get underway */
1068                         cur_ops->readunlock(idx);
1069                         schedule_timeout_interruptible(HZ);
1070                         continue;
1071                 }
1072                 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
1073                 if (p->rtort_mbtest == 0)
1074                         atomic_inc(&n_rcu_torture_mberror);
1075                 cur_ops->read_delay(&rand);
1076                 preempt_disable();
1077                 pipe_count = p->rtort_pipe_count;
1078                 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1079                         /* Should not happen, but... */
1080                         pipe_count = RCU_TORTURE_PIPE_LEN;
1081                 }
1082                 if (pipe_count > 1)
1083                         rcutorture_trace_dump();
1084                 __this_cpu_inc(rcu_torture_count[pipe_count]);
1085                 completed = cur_ops->completed() - completed;
1086                 if (completed > RCU_TORTURE_PIPE_LEN) {
1087                         /* Should not happen, but... */
1088                         completed = RCU_TORTURE_PIPE_LEN;
1089                 }
1090                 __this_cpu_inc(rcu_torture_batch[completed]);
1091                 preempt_enable();
1092                 cur_ops->readunlock(idx);
1093                 schedule();
1094                 rcu_stutter_wait("rcu_torture_reader");
1095         } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1096         VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
1097         rcutorture_shutdown_absorb("rcu_torture_reader");
1098         if (irqreader && cur_ops->irq_capable)
1099                 del_timer_sync(&t);
1100         while (!kthread_should_stop())
1101                 schedule_timeout_uninterruptible(1);
1102         return 0;
1103 }
1104
1105 /*
1106  * Create an RCU-torture statistics message in the specified buffer.
1107  */
1108 static int
1109 rcu_torture_printk(char *page)
1110 {
1111         int cnt = 0;
1112         int cpu;
1113         int i;
1114         long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1115         long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1116
1117         for_each_possible_cpu(cpu) {
1118                 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1119                         pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1120                         batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1121                 }
1122         }
1123         for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1124                 if (pipesummary[i] != 0)
1125                         break;
1126         }
1127         cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
1128         cnt += sprintf(&page[cnt],
1129                        "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d "
1130                        "rtmbe: %d rtbke: %ld rtbre: %ld "
1131                        "rtbf: %ld rtb: %ld nt: %ld "
1132                        "onoff: %ld/%ld:%ld/%ld",
1133                        rcu_torture_current,
1134                        rcu_torture_current_version,
1135                        list_empty(&rcu_torture_freelist),
1136                        atomic_read(&n_rcu_torture_alloc),
1137                        atomic_read(&n_rcu_torture_alloc_fail),
1138                        atomic_read(&n_rcu_torture_free),
1139                        atomic_read(&n_rcu_torture_mberror),
1140                        n_rcu_torture_boost_ktrerror,
1141                        n_rcu_torture_boost_rterror,
1142                        n_rcu_torture_boost_failure,
1143                        n_rcu_torture_boosts,
1144                        n_rcu_torture_timers,
1145                        n_online_successes,
1146                        n_online_attempts,
1147                        n_offline_successes,
1148                        n_offline_attempts);
1149         if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1150             n_rcu_torture_boost_ktrerror != 0 ||
1151             n_rcu_torture_boost_rterror != 0 ||
1152             n_rcu_torture_boost_failure != 0)
1153                 cnt += sprintf(&page[cnt], " !!!");
1154         cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1155         if (i > 1) {
1156                 cnt += sprintf(&page[cnt], "!!! ");
1157                 atomic_inc(&n_rcu_torture_error);
1158                 WARN_ON_ONCE(1);
1159         }
1160         cnt += sprintf(&page[cnt], "Reader Pipe: ");
1161         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1162                 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
1163         cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1164         cnt += sprintf(&page[cnt], "Reader Batch: ");
1165         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1166                 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
1167         cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1168         cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
1169         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1170                 cnt += sprintf(&page[cnt], " %d",
1171                                atomic_read(&rcu_torture_wcount[i]));
1172         }
1173         cnt += sprintf(&page[cnt], "\n");
1174         if (cur_ops->stats)
1175                 cnt += cur_ops->stats(&page[cnt]);
1176         return cnt;
1177 }
1178
1179 /*
1180  * Print torture statistics.  Caller must ensure that there is only
1181  * one call to this function at a given time!!!  This is normally
1182  * accomplished by relying on the module system to only have one copy
1183  * of the module loaded, and then by giving the rcu_torture_stats
1184  * kthread full control (or the init/cleanup functions when rcu_torture_stats
1185  * thread is not running).
1186  */
1187 static void
1188 rcu_torture_stats_print(void)
1189 {
1190         int cnt;
1191
1192         cnt = rcu_torture_printk(printk_buf);
1193         printk(KERN_ALERT "%s", printk_buf);
1194 }
1195
1196 /*
1197  * Periodically prints torture statistics, if periodic statistics printing
1198  * was specified via the stat_interval module parameter.
1199  *
1200  * No need to worry about fullstop here, since this one doesn't reference
1201  * volatile state or register callbacks.
1202  */
1203 static int
1204 rcu_torture_stats(void *arg)
1205 {
1206         VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
1207         do {
1208                 schedule_timeout_interruptible(stat_interval * HZ);
1209                 rcu_torture_stats_print();
1210                 rcutorture_shutdown_absorb("rcu_torture_stats");
1211         } while (!kthread_should_stop());
1212         VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
1213         return 0;
1214 }
1215
1216 static int rcu_idle_cpu;        /* Force all torture tasks off this CPU */
1217
1218 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
1219  * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
1220  */
1221 static void rcu_torture_shuffle_tasks(void)
1222 {
1223         int i;
1224
1225         cpumask_setall(shuffle_tmp_mask);
1226         get_online_cpus();
1227
1228         /* No point in shuffling if there is only one online CPU (ex: UP) */
1229         if (num_online_cpus() == 1) {
1230                 put_online_cpus();
1231                 return;
1232         }
1233
1234         if (rcu_idle_cpu != -1)
1235                 cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
1236
1237         set_cpus_allowed_ptr(current, shuffle_tmp_mask);
1238
1239         if (reader_tasks) {
1240                 for (i = 0; i < nrealreaders; i++)
1241                         if (reader_tasks[i])
1242                                 set_cpus_allowed_ptr(reader_tasks[i],
1243                                                      shuffle_tmp_mask);
1244         }
1245
1246         if (fakewriter_tasks) {
1247                 for (i = 0; i < nfakewriters; i++)
1248                         if (fakewriter_tasks[i])
1249                                 set_cpus_allowed_ptr(fakewriter_tasks[i],
1250                                                      shuffle_tmp_mask);
1251         }
1252
1253         if (writer_task)
1254                 set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
1255
1256         if (stats_task)
1257                 set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
1258
1259         if (rcu_idle_cpu == -1)
1260                 rcu_idle_cpu = num_online_cpus() - 1;
1261         else
1262                 rcu_idle_cpu--;
1263
1264         put_online_cpus();
1265 }
1266
1267 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
1268  * system to become idle at a time and cut off its timer ticks. This is meant
1269  * to test the support for such tickless idle CPU in RCU.
1270  */
1271 static int
1272 rcu_torture_shuffle(void *arg)
1273 {
1274         VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
1275         do {
1276                 schedule_timeout_interruptible(shuffle_interval * HZ);
1277                 rcu_torture_shuffle_tasks();
1278                 rcutorture_shutdown_absorb("rcu_torture_shuffle");
1279         } while (!kthread_should_stop());
1280         VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
1281         return 0;
1282 }
1283
1284 /* Cause the rcutorture test to "stutter", starting and stopping all
1285  * threads periodically.
1286  */
1287 static int
1288 rcu_torture_stutter(void *arg)
1289 {
1290         VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
1291         do {
1292                 schedule_timeout_interruptible(stutter * HZ);
1293                 stutter_pause_test = 1;
1294                 if (!kthread_should_stop())
1295                         schedule_timeout_interruptible(stutter * HZ);
1296                 stutter_pause_test = 0;
1297                 rcutorture_shutdown_absorb("rcu_torture_stutter");
1298         } while (!kthread_should_stop());
1299         VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
1300         return 0;
1301 }
1302
1303 static inline void
1304 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
1305 {
1306         printk(KERN_ALERT "%s" TORTURE_FLAG
1307                 "--- %s: nreaders=%d nfakewriters=%d "
1308                 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1309                 "shuffle_interval=%d stutter=%d irqreader=%d "
1310                 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1311                 "test_boost=%d/%d test_boost_interval=%d "
1312                 "test_boost_duration=%d shutdown_secs=%d "
1313                 "onoff_interval=%d onoff_holdoff=%d\n",
1314                 torture_type, tag, nrealreaders, nfakewriters,
1315                 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1316                 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1317                 test_boost, cur_ops->can_boost,
1318                 test_boost_interval, test_boost_duration, shutdown_secs,
1319                 onoff_interval, onoff_holdoff);
1320 }
1321
1322 static struct notifier_block rcutorture_shutdown_nb = {
1323         .notifier_call = rcutorture_shutdown_notify,
1324 };
1325
1326 static void rcutorture_booster_cleanup(int cpu)
1327 {
1328         struct task_struct *t;
1329
1330         if (boost_tasks[cpu] == NULL)
1331                 return;
1332         mutex_lock(&boost_mutex);
1333         VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task");
1334         t = boost_tasks[cpu];
1335         boost_tasks[cpu] = NULL;
1336         mutex_unlock(&boost_mutex);
1337
1338         /* This must be outside of the mutex, otherwise deadlock! */
1339         kthread_stop(t);
1340 }
1341
1342 static int rcutorture_booster_init(int cpu)
1343 {
1344         int retval;
1345
1346         if (boost_tasks[cpu] != NULL)
1347                 return 0;  /* Already created, nothing more to do. */
1348
1349         /* Don't allow time recalculation while creating a new task. */
1350         mutex_lock(&boost_mutex);
1351         VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
1352         boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
1353                                                   cpu_to_node(cpu),
1354                                                   "rcu_torture_boost");
1355         if (IS_ERR(boost_tasks[cpu])) {
1356                 retval = PTR_ERR(boost_tasks[cpu]);
1357                 VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
1358                 n_rcu_torture_boost_ktrerror++;
1359                 boost_tasks[cpu] = NULL;
1360                 mutex_unlock(&boost_mutex);
1361                 return retval;
1362         }
1363         kthread_bind(boost_tasks[cpu], cpu);
1364         wake_up_process(boost_tasks[cpu]);
1365         mutex_unlock(&boost_mutex);
1366         return 0;
1367 }
1368
1369 /*
1370  * Cause the rcutorture test to shutdown the system after the test has
1371  * run for the time specified by the shutdown_secs module parameter.
1372  */
1373 static int
1374 rcu_torture_shutdown(void *arg)
1375 {
1376         long delta;
1377         unsigned long jiffies_snap;
1378
1379         VERBOSE_PRINTK_STRING("rcu_torture_shutdown task started");
1380         jiffies_snap = ACCESS_ONCE(jiffies);
1381         while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
1382                !kthread_should_stop()) {
1383                 delta = shutdown_time - jiffies_snap;
1384                 if (verbose)
1385                         printk(KERN_ALERT "%s" TORTURE_FLAG
1386                                "rcu_torture_shutdown task: %lu "
1387                                "jiffies remaining\n",
1388                                torture_type, delta);
1389                 schedule_timeout_interruptible(delta);
1390                 jiffies_snap = ACCESS_ONCE(jiffies);
1391         }
1392         if (kthread_should_stop()) {
1393                 VERBOSE_PRINTK_STRING("rcu_torture_shutdown task stopping");
1394                 return 0;
1395         }
1396
1397         /* OK, shut down the system. */
1398
1399         VERBOSE_PRINTK_STRING("rcu_torture_shutdown task shutting down system");
1400         shutdown_task = NULL;   /* Avoid self-kill deadlock. */
1401         rcu_torture_cleanup();  /* Get the success/failure message. */
1402         kernel_power_off();     /* Shut down the system. */
1403         return 0;
1404 }
1405
1406 #ifdef CONFIG_HOTPLUG_CPU
1407
1408 /*
1409  * Execute random CPU-hotplug operations at the interval specified
1410  * by the onoff_interval.
1411  */
1412 static int __cpuinit
1413 rcu_torture_onoff(void *arg)
1414 {
1415         int cpu;
1416         int maxcpu = -1;
1417         DEFINE_RCU_RANDOM(rand);
1418
1419         VERBOSE_PRINTK_STRING("rcu_torture_onoff task started");
1420         for_each_online_cpu(cpu)
1421                 maxcpu = cpu;
1422         WARN_ON(maxcpu < 0);
1423         if (onoff_holdoff > 0) {
1424                 VERBOSE_PRINTK_STRING("rcu_torture_onoff begin holdoff");
1425                 schedule_timeout_interruptible(onoff_holdoff * HZ);
1426                 VERBOSE_PRINTK_STRING("rcu_torture_onoff end holdoff");
1427         }
1428         while (!kthread_should_stop()) {
1429                 cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1);
1430                 if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
1431                         if (verbose)
1432                                 printk(KERN_ALERT "%s" TORTURE_FLAG
1433                                        "rcu_torture_onoff task: offlining %d\n",
1434                                        torture_type, cpu);
1435                         n_offline_attempts++;
1436                         if (cpu_down(cpu) == 0) {
1437                                 if (verbose)
1438                                         printk(KERN_ALERT "%s" TORTURE_FLAG
1439                                                "rcu_torture_onoff task: "
1440                                                "offlined %d\n",
1441                                                torture_type, cpu);
1442                                 n_offline_successes++;
1443                         }
1444                 } else if (cpu_is_hotpluggable(cpu)) {
1445                         if (verbose)
1446                                 printk(KERN_ALERT "%s" TORTURE_FLAG
1447                                        "rcu_torture_onoff task: onlining %d\n",
1448                                        torture_type, cpu);
1449                         n_online_attempts++;
1450                         if (cpu_up(cpu) == 0) {
1451                                 if (verbose)
1452                                         printk(KERN_ALERT "%s" TORTURE_FLAG
1453                                                "rcu_torture_onoff task: "
1454                                                "onlined %d\n",
1455                                                torture_type, cpu);
1456                                 n_online_successes++;
1457                         }
1458                 }
1459                 schedule_timeout_interruptible(onoff_interval * HZ);
1460         }
1461         VERBOSE_PRINTK_STRING("rcu_torture_onoff task stopping");
1462         return 0;
1463 }
1464
1465 static int __cpuinit
1466 rcu_torture_onoff_init(void)
1467 {
1468         int ret;
1469
1470         if (onoff_interval <= 0)
1471                 return 0;
1472         onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff");
1473         if (IS_ERR(onoff_task)) {
1474                 ret = PTR_ERR(onoff_task);
1475                 onoff_task = NULL;
1476                 return ret;
1477         }
1478         return 0;
1479 }
1480
1481 static void rcu_torture_onoff_cleanup(void)
1482 {
1483         if (onoff_task == NULL)
1484                 return;
1485         VERBOSE_PRINTK_STRING("Stopping rcu_torture_onoff task");
1486         kthread_stop(onoff_task);
1487 }
1488
1489 #else /* #ifdef CONFIG_HOTPLUG_CPU */
1490
1491 static void
1492 rcu_torture_onoff_init(void)
1493 {
1494 }
1495
1496 static void rcu_torture_onoff_cleanup(void)
1497 {
1498 }
1499
1500 #endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
1501
1502 /*
1503  * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
1504  * induces a CPU stall for the time specified by stall_cpu.
1505  */
1506 static int __cpuinit rcu_torture_stall(void *args)
1507 {
1508         unsigned long stop_at;
1509
1510         VERBOSE_PRINTK_STRING("rcu_torture_stall task started");
1511         if (stall_cpu_holdoff > 0) {
1512                 VERBOSE_PRINTK_STRING("rcu_torture_stall begin holdoff");
1513                 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1514                 VERBOSE_PRINTK_STRING("rcu_torture_stall end holdoff");
1515         }
1516         if (!kthread_should_stop()) {
1517                 stop_at = get_seconds() + stall_cpu;
1518                 /* RCU CPU stall is expected behavior in following code. */
1519                 printk(KERN_ALERT "rcu_torture_stall start.\n");
1520                 rcu_read_lock();
1521                 preempt_disable();
1522                 while (ULONG_CMP_LT(get_seconds(), stop_at))
1523                         continue;  /* Induce RCU CPU stall warning. */
1524                 preempt_enable();
1525                 rcu_read_unlock();
1526                 printk(KERN_ALERT "rcu_torture_stall end.\n");
1527         }
1528         rcutorture_shutdown_absorb("rcu_torture_stall");
1529         while (!kthread_should_stop())
1530                 schedule_timeout_interruptible(10 * HZ);
1531         return 0;
1532 }
1533
1534 /* Spawn CPU-stall kthread, if stall_cpu specified. */
1535 static int __init rcu_torture_stall_init(void)
1536 {
1537         int ret;
1538
1539         if (stall_cpu <= 0)
1540                 return 0;
1541         stall_task = kthread_run(rcu_torture_stall, NULL, "rcu_torture_stall");
1542         if (IS_ERR(stall_task)) {
1543                 ret = PTR_ERR(stall_task);
1544                 stall_task = NULL;
1545                 return ret;
1546         }
1547         return 0;
1548 }
1549
1550 /* Clean up after the CPU-stall kthread, if one was spawned. */
1551 static void rcu_torture_stall_cleanup(void)
1552 {
1553         if (stall_task == NULL)
1554                 return;
1555         VERBOSE_PRINTK_STRING("Stopping rcu_torture_stall_task.");
1556         kthread_stop(stall_task);
1557 }
1558
1559 static int rcutorture_cpu_notify(struct notifier_block *self,
1560                                  unsigned long action, void *hcpu)
1561 {
1562         long cpu = (long)hcpu;
1563
1564         switch (action) {
1565         case CPU_ONLINE:
1566         case CPU_DOWN_FAILED:
1567                 (void)rcutorture_booster_init(cpu);
1568                 break;
1569         case CPU_DOWN_PREPARE:
1570                 rcutorture_booster_cleanup(cpu);
1571                 break;
1572         default:
1573                 break;
1574         }
1575         return NOTIFY_OK;
1576 }
1577
1578 static struct notifier_block rcutorture_cpu_nb = {
1579         .notifier_call = rcutorture_cpu_notify,
1580 };
1581
1582 static void
1583 rcu_torture_cleanup(void)
1584 {
1585         int i;
1586
1587         mutex_lock(&fullstop_mutex);
1588         rcutorture_record_test_transition();
1589         if (fullstop == FULLSTOP_SHUTDOWN) {
1590                 printk(KERN_WARNING /* but going down anyway, so... */
1591                        "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
1592                 mutex_unlock(&fullstop_mutex);
1593                 schedule_timeout_uninterruptible(10);
1594                 if (cur_ops->cb_barrier != NULL)
1595                         cur_ops->cb_barrier();
1596                 return;
1597         }
1598         fullstop = FULLSTOP_RMMOD;
1599         mutex_unlock(&fullstop_mutex);
1600         unregister_reboot_notifier(&rcutorture_shutdown_nb);
1601         rcu_torture_stall_cleanup();
1602         if (stutter_task) {
1603                 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
1604                 kthread_stop(stutter_task);
1605         }
1606         stutter_task = NULL;
1607         if (shuffler_task) {
1608                 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
1609                 kthread_stop(shuffler_task);
1610                 free_cpumask_var(shuffle_tmp_mask);
1611         }
1612         shuffler_task = NULL;
1613
1614         if (writer_task) {
1615                 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
1616                 kthread_stop(writer_task);
1617         }
1618         writer_task = NULL;
1619
1620         if (reader_tasks) {
1621                 for (i = 0; i < nrealreaders; i++) {
1622                         if (reader_tasks[i]) {
1623                                 VERBOSE_PRINTK_STRING(
1624                                         "Stopping rcu_torture_reader task");
1625                                 kthread_stop(reader_tasks[i]);
1626                         }
1627                         reader_tasks[i] = NULL;
1628                 }
1629                 kfree(reader_tasks);
1630                 reader_tasks = NULL;
1631         }
1632         rcu_torture_current = NULL;
1633
1634         if (fakewriter_tasks) {
1635                 for (i = 0; i < nfakewriters; i++) {
1636                         if (fakewriter_tasks[i]) {
1637                                 VERBOSE_PRINTK_STRING(
1638                                         "Stopping rcu_torture_fakewriter task");
1639                                 kthread_stop(fakewriter_tasks[i]);
1640                         }
1641                         fakewriter_tasks[i] = NULL;
1642                 }
1643                 kfree(fakewriter_tasks);
1644                 fakewriter_tasks = NULL;
1645         }
1646
1647         if (stats_task) {
1648                 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
1649                 kthread_stop(stats_task);
1650         }
1651         stats_task = NULL;
1652
1653         if (fqs_task) {
1654                 VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
1655                 kthread_stop(fqs_task);
1656         }
1657         fqs_task = NULL;
1658         if ((test_boost == 1 && cur_ops->can_boost) ||
1659             test_boost == 2) {
1660                 unregister_cpu_notifier(&rcutorture_cpu_nb);
1661                 for_each_possible_cpu(i)
1662                         rcutorture_booster_cleanup(i);
1663         }
1664         if (shutdown_task != NULL) {
1665                 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shutdown task");
1666                 kthread_stop(shutdown_task);
1667         }
1668         rcu_torture_onoff_cleanup();
1669
1670         /* Wait for all RCU callbacks to fire.  */
1671
1672         if (cur_ops->cb_barrier != NULL)
1673                 cur_ops->cb_barrier();
1674
1675         rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
1676
1677         if (cur_ops->cleanup)
1678                 cur_ops->cleanup();
1679         if (atomic_read(&n_rcu_torture_error))
1680                 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1681         else if (n_online_successes != n_online_attempts ||
1682                  n_offline_successes != n_offline_attempts)
1683                 rcu_torture_print_module_parms(cur_ops,
1684                                                "End of test: RCU_HOTPLUG");
1685         else
1686                 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1687 }
1688
1689 static int __init
1690 rcu_torture_init(void)
1691 {
1692         int i;
1693         int cpu;
1694         int firsterr = 0;
1695         static struct rcu_torture_ops *torture_ops[] =
1696                 { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
1697                   &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
1698                   &srcu_ops, &srcu_raw_ops, &srcu_expedited_ops,
1699                   &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
1700
1701         mutex_lock(&fullstop_mutex);
1702
1703         /* Process args and tell the world that the torturer is on the job. */
1704         for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1705                 cur_ops = torture_ops[i];
1706                 if (strcmp(torture_type, cur_ops->name) == 0)
1707                         break;
1708         }
1709         if (i == ARRAY_SIZE(torture_ops)) {
1710                 printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
1711                        torture_type);
1712                 printk(KERN_ALERT "rcu-torture types:");
1713                 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1714                         printk(KERN_ALERT " %s", torture_ops[i]->name);
1715                 printk(KERN_ALERT "\n");
1716                 mutex_unlock(&fullstop_mutex);
1717                 return -EINVAL;
1718         }
1719         if (cur_ops->fqs == NULL && fqs_duration != 0) {
1720                 printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
1721                                   "fqs_duration, fqs disabled.\n");
1722                 fqs_duration = 0;
1723         }
1724         if (cur_ops->init)
1725                 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
1726
1727         if (nreaders >= 0)
1728                 nrealreaders = nreaders;
1729         else
1730                 nrealreaders = 2 * num_online_cpus();
1731         rcu_torture_print_module_parms(cur_ops, "Start of test");
1732         fullstop = FULLSTOP_DONTSTOP;
1733
1734         /* Set up the freelist. */
1735
1736         INIT_LIST_HEAD(&rcu_torture_freelist);
1737         for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
1738                 rcu_tortures[i].rtort_mbtest = 0;
1739                 list_add_tail(&rcu_tortures[i].rtort_free,
1740                               &rcu_torture_freelist);
1741         }
1742
1743         /* Initialize the statistics so that each run gets its own numbers. */
1744
1745         rcu_torture_current = NULL;
1746         rcu_torture_current_version = 0;
1747         atomic_set(&n_rcu_torture_alloc, 0);
1748         atomic_set(&n_rcu_torture_alloc_fail, 0);
1749         atomic_set(&n_rcu_torture_free, 0);
1750         atomic_set(&n_rcu_torture_mberror, 0);
1751         atomic_set(&n_rcu_torture_error, 0);
1752         n_rcu_torture_boost_ktrerror = 0;
1753         n_rcu_torture_boost_rterror = 0;
1754         n_rcu_torture_boost_failure = 0;
1755         n_rcu_torture_boosts = 0;
1756         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1757                 atomic_set(&rcu_torture_wcount[i], 0);
1758         for_each_possible_cpu(cpu) {
1759                 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1760                         per_cpu(rcu_torture_count, cpu)[i] = 0;
1761                         per_cpu(rcu_torture_batch, cpu)[i] = 0;
1762                 }
1763         }
1764
1765         /* Start up the kthreads. */
1766
1767         VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
1768         writer_task = kthread_run(rcu_torture_writer, NULL,
1769                                   "rcu_torture_writer");
1770         if (IS_ERR(writer_task)) {
1771                 firsterr = PTR_ERR(writer_task);
1772                 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
1773                 writer_task = NULL;
1774                 goto unwind;
1775         }
1776         fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
1777                                    GFP_KERNEL);
1778         if (fakewriter_tasks == NULL) {
1779                 VERBOSE_PRINTK_ERRSTRING("out of memory");
1780                 firsterr = -ENOMEM;
1781                 goto unwind;
1782         }
1783         for (i = 0; i < nfakewriters; i++) {
1784                 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
1785                 fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
1786                                                   "rcu_torture_fakewriter");
1787                 if (IS_ERR(fakewriter_tasks[i])) {
1788                         firsterr = PTR_ERR(fakewriter_tasks[i]);
1789                         VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
1790                         fakewriter_tasks[i] = NULL;
1791                         goto unwind;
1792                 }
1793         }
1794         reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1795                                GFP_KERNEL);
1796         if (reader_tasks == NULL) {
1797                 VERBOSE_PRINTK_ERRSTRING("out of memory");
1798                 firsterr = -ENOMEM;
1799                 goto unwind;
1800         }
1801         for (i = 0; i < nrealreaders; i++) {
1802                 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
1803                 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
1804                                               "rcu_torture_reader");
1805                 if (IS_ERR(reader_tasks[i])) {
1806                         firsterr = PTR_ERR(reader_tasks[i]);
1807                         VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
1808                         reader_tasks[i] = NULL;
1809                         goto unwind;
1810                 }
1811         }
1812         if (stat_interval > 0) {
1813                 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
1814                 stats_task = kthread_run(rcu_torture_stats, NULL,
1815                                         "rcu_torture_stats");
1816                 if (IS_ERR(stats_task)) {
1817                         firsterr = PTR_ERR(stats_task);
1818                         VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
1819                         stats_task = NULL;
1820                         goto unwind;
1821                 }
1822         }
1823         if (test_no_idle_hz) {
1824                 rcu_idle_cpu = num_online_cpus() - 1;
1825
1826                 if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
1827                         firsterr = -ENOMEM;
1828                         VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
1829                         goto unwind;
1830                 }
1831
1832                 /* Create the shuffler thread */
1833                 shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
1834                                           "rcu_torture_shuffle");
1835                 if (IS_ERR(shuffler_task)) {
1836                         free_cpumask_var(shuffle_tmp_mask);
1837                         firsterr = PTR_ERR(shuffler_task);
1838                         VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
1839                         shuffler_task = NULL;
1840                         goto unwind;
1841                 }
1842         }
1843         if (stutter < 0)
1844                 stutter = 0;
1845         if (stutter) {
1846                 /* Create the stutter thread */
1847                 stutter_task = kthread_run(rcu_torture_stutter, NULL,
1848                                           "rcu_torture_stutter");
1849                 if (IS_ERR(stutter_task)) {
1850                         firsterr = PTR_ERR(stutter_task);
1851                         VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
1852                         stutter_task = NULL;
1853                         goto unwind;
1854                 }
1855         }
1856         if (fqs_duration < 0)
1857                 fqs_duration = 0;
1858         if (fqs_duration) {
1859                 /* Create the stutter thread */
1860                 fqs_task = kthread_run(rcu_torture_fqs, NULL,
1861                                        "rcu_torture_fqs");
1862                 if (IS_ERR(fqs_task)) {
1863                         firsterr = PTR_ERR(fqs_task);
1864                         VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
1865                         fqs_task = NULL;
1866                         goto unwind;
1867                 }
1868         }
1869         if (test_boost_interval < 1)
1870                 test_boost_interval = 1;
1871         if (test_boost_duration < 2)
1872                 test_boost_duration = 2;
1873         if ((test_boost == 1 && cur_ops->can_boost) ||
1874             test_boost == 2) {
1875                 int retval;
1876
1877                 boost_starttime = jiffies + test_boost_interval * HZ;
1878                 register_cpu_notifier(&rcutorture_cpu_nb);
1879                 for_each_possible_cpu(i) {
1880                         if (cpu_is_offline(i))
1881                                 continue;  /* Heuristic: CPU can go offline. */
1882                         retval = rcutorture_booster_init(i);
1883                         if (retval < 0) {
1884                                 firsterr = retval;
1885                                 goto unwind;
1886                         }
1887                 }
1888         }
1889         if (shutdown_secs > 0) {
1890                 shutdown_time = jiffies + shutdown_secs * HZ;
1891                 shutdown_task = kthread_run(rcu_torture_shutdown, NULL,
1892                                             "rcu_torture_shutdown");
1893                 if (IS_ERR(shutdown_task)) {
1894                         firsterr = PTR_ERR(shutdown_task);
1895                         VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown");
1896                         shutdown_task = NULL;
1897                         goto unwind;
1898                 }
1899         }
1900         rcu_torture_onoff_init();
1901         register_reboot_notifier(&rcutorture_shutdown_nb);
1902         rcu_torture_stall_init();
1903         rcutorture_record_test_transition();
1904         mutex_unlock(&fullstop_mutex);
1905         return 0;
1906
1907 unwind:
1908         mutex_unlock(&fullstop_mutex);
1909         rcu_torture_cleanup();
1910         return firsterr;
1911 }
1912
1913 module_init(rcu_torture_init);
1914 module_exit(rcu_torture_cleanup);