Root/kernel/rcutorture.c

1/*
2 * Read-Copy Update module-based torture test facility
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2005, 2006
19 *
20 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21 * Josh Triplett <josh@freedesktop.org>
22 *
23 * See also: Documentation/RCU/torture.txt
24 */
25#include <linux/types.h>
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/kthread.h>
30#include <linux/err.h>
31#include <linux/spinlock.h>
32#include <linux/smp.h>
33#include <linux/rcupdate.h>
34#include <linux/interrupt.h>
35#include <linux/sched.h>
36#include <asm/atomic.h>
37#include <linux/bitops.h>
38#include <linux/completion.h>
39#include <linux/moduleparam.h>
40#include <linux/percpu.h>
41#include <linux/notifier.h>
42#include <linux/reboot.h>
43#include <linux/freezer.h>
44#include <linux/cpu.h>
45#include <linux/delay.h>
46#include <linux/stat.h>
47#include <linux/srcu.h>
48#include <linux/slab.h>
49#include <asm/byteorder.h>
50
51MODULE_LICENSE("GPL");
52MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
53          "Josh Triplett <josh@freedesktop.org>");
54
55static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
56static int nfakewriters = 4; /* # fake writer threads */
57static int stat_interval; /* Interval between stats, in seconds. */
58                /* Defaults to "only at end of test". */
59static int verbose; /* Print more debug info. */
60static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
61static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
62static int stutter = 5; /* Start/stop testing interval (in sec) */
63static int irqreader = 1; /* RCU readers from irq (timers). */
64static int fqs_duration = 0; /* Duration of bursts (us), 0 to disable. */
65static int fqs_holdoff = 0; /* Hold time within burst (us). */
66static int fqs_stutter = 3; /* Wait time between bursts (s). */
67static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
68static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
69static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
70static char *torture_type = "rcu"; /* What RCU implementation to torture. */
71
72module_param(nreaders, int, 0444);
73MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
74module_param(nfakewriters, int, 0444);
75MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
76module_param(stat_interval, int, 0444);
77MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
78module_param(verbose, bool, 0444);
79MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
80module_param(test_no_idle_hz, bool, 0444);
81MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
82module_param(shuffle_interval, int, 0444);
83MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
84module_param(stutter, int, 0444);
85MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
86module_param(irqreader, int, 0444);
87MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
88module_param(fqs_duration, int, 0444);
89MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
90module_param(fqs_holdoff, int, 0444);
91MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
92module_param(fqs_stutter, int, 0444);
93MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
94module_param(test_boost, int, 0444);
95MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
96module_param(test_boost_interval, int, 0444);
97MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
98module_param(test_boost_duration, int, 0444);
99MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
100module_param(torture_type, charp, 0444);
101MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
102
103#define TORTURE_FLAG "-torture:"
104#define PRINTK_STRING(s) \
105    do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
106#define VERBOSE_PRINTK_STRING(s) \
107    do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
108#define VERBOSE_PRINTK_ERRSTRING(s) \
109    do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
110
111static char printk_buf[4096];
112
113static int nrealreaders;
114static struct task_struct *writer_task;
115static struct task_struct **fakewriter_tasks;
116static struct task_struct **reader_tasks;
117static struct task_struct *stats_task;
118static struct task_struct *shuffler_task;
119static struct task_struct *stutter_task;
120static struct task_struct *fqs_task;
121static struct task_struct *boost_tasks[NR_CPUS];
122
123#define RCU_TORTURE_PIPE_LEN 10
124
125struct rcu_torture {
126    struct rcu_head rtort_rcu;
127    int rtort_pipe_count;
128    struct list_head rtort_free;
129    int rtort_mbtest;
130};
131
132static LIST_HEAD(rcu_torture_freelist);
133static struct rcu_torture __rcu *rcu_torture_current;
134static unsigned long rcu_torture_current_version;
135static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
136static DEFINE_SPINLOCK(rcu_torture_lock);
137static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
138    { 0 };
139static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
140    { 0 };
141static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
142static atomic_t n_rcu_torture_alloc;
143static atomic_t n_rcu_torture_alloc_fail;
144static atomic_t n_rcu_torture_free;
145static atomic_t n_rcu_torture_mberror;
146static atomic_t n_rcu_torture_error;
147static long n_rcu_torture_boost_ktrerror;
148static long n_rcu_torture_boost_rterror;
149static long n_rcu_torture_boost_failure;
150static long n_rcu_torture_boosts;
151static long n_rcu_torture_timers;
152static struct list_head rcu_torture_removed;
153static cpumask_var_t shuffle_tmp_mask;
154
155static int stutter_pause_test;
156
157#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
158#define RCUTORTURE_RUNNABLE_INIT 1
159#else
160#define RCUTORTURE_RUNNABLE_INIT 0
161#endif
162int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
163
164#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
165#define rcu_can_boost() 1
166#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
167#define rcu_can_boost() 0
168#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
169
170static unsigned long boost_starttime; /* jiffies of next boost test start. */
171DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
172                    /* and boost task create/destroy. */
173
174/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
175
176#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
177#define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
178#define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
179static int fullstop = FULLSTOP_RMMOD;
180/*
181 * Protect fullstop transitions and spawning of kthreads.
182 */
183static DEFINE_MUTEX(fullstop_mutex);
184
185/*
186 * Detect and respond to a system shutdown.
187 */
188static int
189rcutorture_shutdown_notify(struct notifier_block *unused1,
190               unsigned long unused2, void *unused3)
191{
192    mutex_lock(&fullstop_mutex);
193    if (fullstop == FULLSTOP_DONTSTOP)
194        fullstop = FULLSTOP_SHUTDOWN;
195    else
196        printk(KERN_WARNING /* but going down anyway, so... */
197               "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
198    mutex_unlock(&fullstop_mutex);
199    return NOTIFY_DONE;
200}
201
202/*
203 * Absorb kthreads into a kernel function that won't return, so that
204 * they won't ever access module text or data again.
205 */
206static void rcutorture_shutdown_absorb(char *title)
207{
208    if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
209        printk(KERN_NOTICE
210               "rcutorture thread %s parking due to system shutdown\n",
211               title);
212        schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
213    }
214}
215
216/*
217 * Allocate an element from the rcu_tortures pool.
218 */
219static struct rcu_torture *
220rcu_torture_alloc(void)
221{
222    struct list_head *p;
223
224    spin_lock_bh(&rcu_torture_lock);
225    if (list_empty(&rcu_torture_freelist)) {
226        atomic_inc(&n_rcu_torture_alloc_fail);
227        spin_unlock_bh(&rcu_torture_lock);
228        return NULL;
229    }
230    atomic_inc(&n_rcu_torture_alloc);
231    p = rcu_torture_freelist.next;
232    list_del_init(p);
233    spin_unlock_bh(&rcu_torture_lock);
234    return container_of(p, struct rcu_torture, rtort_free);
235}
236
237/*
238 * Free an element to the rcu_tortures pool.
239 */
240static void
241rcu_torture_free(struct rcu_torture *p)
242{
243    atomic_inc(&n_rcu_torture_free);
244    spin_lock_bh(&rcu_torture_lock);
245    list_add_tail(&p->rtort_free, &rcu_torture_freelist);
246    spin_unlock_bh(&rcu_torture_lock);
247}
248
249struct rcu_random_state {
250    unsigned long rrs_state;
251    long rrs_count;
252};
253
254#define RCU_RANDOM_MULT 39916801 /* prime */
255#define RCU_RANDOM_ADD 479001701 /* prime */
256#define RCU_RANDOM_REFRESH 10000
257
258#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
259
260/*
261 * Crude but fast random-number generator. Uses a linear congruential
262 * generator, with occasional help from cpu_clock().
263 */
264static unsigned long
265rcu_random(struct rcu_random_state *rrsp)
266{
267    if (--rrsp->rrs_count < 0) {
268        rrsp->rrs_state += (unsigned long)local_clock();
269        rrsp->rrs_count = RCU_RANDOM_REFRESH;
270    }
271    rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
272    return swahw32(rrsp->rrs_state);
273}
274
275static void
276rcu_stutter_wait(char *title)
277{
278    while (stutter_pause_test || !rcutorture_runnable) {
279        if (rcutorture_runnable)
280            schedule_timeout_interruptible(1);
281        else
282            schedule_timeout_interruptible(round_jiffies_relative(HZ));
283        rcutorture_shutdown_absorb(title);
284    }
285}
286
287/*
288 * Operations vector for selecting different types of tests.
289 */
290
291struct rcu_torture_ops {
292    void (*init)(void);
293    void (*cleanup)(void);
294    int (*readlock)(void);
295    void (*read_delay)(struct rcu_random_state *rrsp);
296    void (*readunlock)(int idx);
297    int (*completed)(void);
298    void (*deferred_free)(struct rcu_torture *p);
299    void (*sync)(void);
300    void (*cb_barrier)(void);
301    void (*fqs)(void);
302    int (*stats)(char *page);
303    int irq_capable;
304    int can_boost;
305    char *name;
306};
307
308static struct rcu_torture_ops *cur_ops;
309
310/*
311 * Definitions for rcu torture testing.
312 */
313
314static int rcu_torture_read_lock(void) __acquires(RCU)
315{
316    rcu_read_lock();
317    return 0;
318}
319
320static void rcu_read_delay(struct rcu_random_state *rrsp)
321{
322    const unsigned long shortdelay_us = 200;
323    const unsigned long longdelay_ms = 50;
324
325    /* We want a short delay sometimes to make a reader delay the grace
326     * period, and we want a long delay occasionally to trigger
327     * force_quiescent_state. */
328
329    if (!(rcu_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
330        mdelay(longdelay_ms);
331    if (!(rcu_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
332        udelay(shortdelay_us);
333#ifdef CONFIG_PREEMPT
334    if (!preempt_count() && !(rcu_random(rrsp) % (nrealreaders * 20000)))
335        preempt_schedule(); /* No QS if preempt_disable() in effect */
336#endif
337}
338
339static void rcu_torture_read_unlock(int idx) __releases(RCU)
340{
341    rcu_read_unlock();
342}
343
344static int rcu_torture_completed(void)
345{
346    return rcu_batches_completed();
347}
348
349static void
350rcu_torture_cb(struct rcu_head *p)
351{
352    int i;
353    struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
354
355    if (fullstop != FULLSTOP_DONTSTOP) {
356        /* Test is ending, just drop callbacks on the floor. */
357        /* The next initialization will pick up the pieces. */
358        return;
359    }
360    i = rp->rtort_pipe_count;
361    if (i > RCU_TORTURE_PIPE_LEN)
362        i = RCU_TORTURE_PIPE_LEN;
363    atomic_inc(&rcu_torture_wcount[i]);
364    if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
365        rp->rtort_mbtest = 0;
366        rcu_torture_free(rp);
367    } else
368        cur_ops->deferred_free(rp);
369}
370
371static int rcu_no_completed(void)
372{
373    return 0;
374}
375
376static void rcu_torture_deferred_free(struct rcu_torture *p)
377{
378    call_rcu(&p->rtort_rcu, rcu_torture_cb);
379}
380
381static struct rcu_torture_ops rcu_ops = {
382    .init = NULL,
383    .cleanup = NULL,
384    .readlock = rcu_torture_read_lock,
385    .read_delay = rcu_read_delay,
386    .readunlock = rcu_torture_read_unlock,
387    .completed = rcu_torture_completed,
388    .deferred_free = rcu_torture_deferred_free,
389    .sync = synchronize_rcu,
390    .cb_barrier = rcu_barrier,
391    .fqs = rcu_force_quiescent_state,
392    .stats = NULL,
393    .irq_capable = 1,
394    .can_boost = rcu_can_boost(),
395    .name = "rcu"
396};
397
398static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
399{
400    int i;
401    struct rcu_torture *rp;
402    struct rcu_torture *rp1;
403
404    cur_ops->sync();
405    list_add(&p->rtort_free, &rcu_torture_removed);
406    list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
407        i = rp->rtort_pipe_count;
408        if (i > RCU_TORTURE_PIPE_LEN)
409            i = RCU_TORTURE_PIPE_LEN;
410        atomic_inc(&rcu_torture_wcount[i]);
411        if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
412            rp->rtort_mbtest = 0;
413            list_del(&rp->rtort_free);
414            rcu_torture_free(rp);
415        }
416    }
417}
418
419static void rcu_sync_torture_init(void)
420{
421    INIT_LIST_HEAD(&rcu_torture_removed);
422}
423
424static struct rcu_torture_ops rcu_sync_ops = {
425    .init = rcu_sync_torture_init,
426    .cleanup = NULL,
427    .readlock = rcu_torture_read_lock,
428    .read_delay = rcu_read_delay,
429    .readunlock = rcu_torture_read_unlock,
430    .completed = rcu_torture_completed,
431    .deferred_free = rcu_sync_torture_deferred_free,
432    .sync = synchronize_rcu,
433    .cb_barrier = NULL,
434    .fqs = rcu_force_quiescent_state,
435    .stats = NULL,
436    .irq_capable = 1,
437    .can_boost = rcu_can_boost(),
438    .name = "rcu_sync"
439};
440
441static struct rcu_torture_ops rcu_expedited_ops = {
442    .init = rcu_sync_torture_init,
443    .cleanup = NULL,
444    .readlock = rcu_torture_read_lock,
445    .read_delay = rcu_read_delay, /* just reuse rcu's version. */
446    .readunlock = rcu_torture_read_unlock,
447    .completed = rcu_no_completed,
448    .deferred_free = rcu_sync_torture_deferred_free,
449    .sync = synchronize_rcu_expedited,
450    .cb_barrier = NULL,
451    .fqs = rcu_force_quiescent_state,
452    .stats = NULL,
453    .irq_capable = 1,
454    .can_boost = rcu_can_boost(),
455    .name = "rcu_expedited"
456};
457
458/*
459 * Definitions for rcu_bh torture testing.
460 */
461
462static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
463{
464    rcu_read_lock_bh();
465    return 0;
466}
467
468static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
469{
470    rcu_read_unlock_bh();
471}
472
473static int rcu_bh_torture_completed(void)
474{
475    return rcu_batches_completed_bh();
476}
477
478static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
479{
480    call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
481}
482
483struct rcu_bh_torture_synchronize {
484    struct rcu_head head;
485    struct completion completion;
486};
487
488static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
489{
490    struct rcu_bh_torture_synchronize *rcu;
491
492    rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
493    complete(&rcu->completion);
494}
495
496static void rcu_bh_torture_synchronize(void)
497{
498    struct rcu_bh_torture_synchronize rcu;
499
500    init_rcu_head_on_stack(&rcu.head);
501    init_completion(&rcu.completion);
502    call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
503    wait_for_completion(&rcu.completion);
504    destroy_rcu_head_on_stack(&rcu.head);
505}
506
507static struct rcu_torture_ops rcu_bh_ops = {
508    .init = NULL,
509    .cleanup = NULL,
510    .readlock = rcu_bh_torture_read_lock,
511    .read_delay = rcu_read_delay, /* just reuse rcu's version. */
512    .readunlock = rcu_bh_torture_read_unlock,
513    .completed = rcu_bh_torture_completed,
514    .deferred_free = rcu_bh_torture_deferred_free,
515    .sync = rcu_bh_torture_synchronize,
516    .cb_barrier = rcu_barrier_bh,
517    .fqs = rcu_bh_force_quiescent_state,
518    .stats = NULL,
519    .irq_capable = 1,
520    .name = "rcu_bh"
521};
522
523static struct rcu_torture_ops rcu_bh_sync_ops = {
524    .init = rcu_sync_torture_init,
525    .cleanup = NULL,
526    .readlock = rcu_bh_torture_read_lock,
527    .read_delay = rcu_read_delay, /* just reuse rcu's version. */
528    .readunlock = rcu_bh_torture_read_unlock,
529    .completed = rcu_bh_torture_completed,
530    .deferred_free = rcu_sync_torture_deferred_free,
531    .sync = rcu_bh_torture_synchronize,
532    .cb_barrier = NULL,
533    .fqs = rcu_bh_force_quiescent_state,
534    .stats = NULL,
535    .irq_capable = 1,
536    .name = "rcu_bh_sync"
537};
538
539/*
540 * Definitions for srcu torture testing.
541 */
542
543static struct srcu_struct srcu_ctl;
544
545static void srcu_torture_init(void)
546{
547    init_srcu_struct(&srcu_ctl);
548    rcu_sync_torture_init();
549}
550
551static void srcu_torture_cleanup(void)
552{
553    synchronize_srcu(&srcu_ctl);
554    cleanup_srcu_struct(&srcu_ctl);
555}
556
557static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
558{
559    return srcu_read_lock(&srcu_ctl);
560}
561
562static void srcu_read_delay(struct rcu_random_state *rrsp)
563{
564    long delay;
565    const long uspertick = 1000000 / HZ;
566    const long longdelay = 10;
567
568    /* We want there to be long-running readers, but not all the time. */
569
570    delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
571    if (!delay)
572        schedule_timeout_interruptible(longdelay);
573    else
574        rcu_read_delay(rrsp);
575}
576
577static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
578{
579    srcu_read_unlock(&srcu_ctl, idx);
580}
581
582static int srcu_torture_completed(void)
583{
584    return srcu_batches_completed(&srcu_ctl);
585}
586
587static void srcu_torture_synchronize(void)
588{
589    synchronize_srcu(&srcu_ctl);
590}
591
592static int srcu_torture_stats(char *page)
593{
594    int cnt = 0;
595    int cpu;
596    int idx = srcu_ctl.completed & 0x1;
597
598    cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
599               torture_type, TORTURE_FLAG, idx);
600    for_each_possible_cpu(cpu) {
601        cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
602                   per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
603                   per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
604    }
605    cnt += sprintf(&page[cnt], "\n");
606    return cnt;
607}
608
609static struct rcu_torture_ops srcu_ops = {
610    .init = srcu_torture_init,
611    .cleanup = srcu_torture_cleanup,
612    .readlock = srcu_torture_read_lock,
613    .read_delay = srcu_read_delay,
614    .readunlock = srcu_torture_read_unlock,
615    .completed = srcu_torture_completed,
616    .deferred_free = rcu_sync_torture_deferred_free,
617    .sync = srcu_torture_synchronize,
618    .cb_barrier = NULL,
619    .stats = srcu_torture_stats,
620    .name = "srcu"
621};
622
623static void srcu_torture_synchronize_expedited(void)
624{
625    synchronize_srcu_expedited(&srcu_ctl);
626}
627
628static struct rcu_torture_ops srcu_expedited_ops = {
629    .init = srcu_torture_init,
630    .cleanup = srcu_torture_cleanup,
631    .readlock = srcu_torture_read_lock,
632    .read_delay = srcu_read_delay,
633    .readunlock = srcu_torture_read_unlock,
634    .completed = srcu_torture_completed,
635    .deferred_free = rcu_sync_torture_deferred_free,
636    .sync = srcu_torture_synchronize_expedited,
637    .cb_barrier = NULL,
638    .stats = srcu_torture_stats,
639    .name = "srcu_expedited"
640};
641
642/*
643 * Definitions for sched torture testing.
644 */
645
646static int sched_torture_read_lock(void)
647{
648    preempt_disable();
649    return 0;
650}
651
652static void sched_torture_read_unlock(int idx)
653{
654    preempt_enable();
655}
656
657static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
658{
659    call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
660}
661
662static void sched_torture_synchronize(void)
663{
664    synchronize_sched();
665}
666
667static struct rcu_torture_ops sched_ops = {
668    .init = rcu_sync_torture_init,
669    .cleanup = NULL,
670    .readlock = sched_torture_read_lock,
671    .read_delay = rcu_read_delay, /* just reuse rcu's version. */
672    .readunlock = sched_torture_read_unlock,
673    .completed = rcu_no_completed,
674    .deferred_free = rcu_sched_torture_deferred_free,
675    .sync = sched_torture_synchronize,
676    .cb_barrier = rcu_barrier_sched,
677    .fqs = rcu_sched_force_quiescent_state,
678    .stats = NULL,
679    .irq_capable = 1,
680    .name = "sched"
681};
682
683static struct rcu_torture_ops sched_sync_ops = {
684    .init = rcu_sync_torture_init,
685    .cleanup = NULL,
686    .readlock = sched_torture_read_lock,
687    .read_delay = rcu_read_delay, /* just reuse rcu's version. */
688    .readunlock = sched_torture_read_unlock,
689    .completed = rcu_no_completed,
690    .deferred_free = rcu_sync_torture_deferred_free,
691    .sync = sched_torture_synchronize,
692    .cb_barrier = NULL,
693    .fqs = rcu_sched_force_quiescent_state,
694    .stats = NULL,
695    .name = "sched_sync"
696};
697
698static struct rcu_torture_ops sched_expedited_ops = {
699    .init = rcu_sync_torture_init,
700    .cleanup = NULL,
701    .readlock = sched_torture_read_lock,
702    .read_delay = rcu_read_delay, /* just reuse rcu's version. */
703    .readunlock = sched_torture_read_unlock,
704    .completed = rcu_no_completed,
705    .deferred_free = rcu_sync_torture_deferred_free,
706    .sync = synchronize_sched_expedited,
707    .cb_barrier = NULL,
708    .fqs = rcu_sched_force_quiescent_state,
709    .stats = NULL,
710    .irq_capable = 1,
711    .name = "sched_expedited"
712};
713
714/*
715 * RCU torture priority-boost testing. Runs one real-time thread per
716 * CPU for moderate bursts, repeatedly registering RCU callbacks and
717 * spinning waiting for them to be invoked. If a given callback takes
718 * too long to be invoked, we assume that priority inversion has occurred.
719 */
720
721struct rcu_boost_inflight {
722    struct rcu_head rcu;
723    int inflight;
724};
725
726static void rcu_torture_boost_cb(struct rcu_head *head)
727{
728    struct rcu_boost_inflight *rbip =
729        container_of(head, struct rcu_boost_inflight, rcu);
730
731    smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
732    rbip->inflight = 0;
733}
734
735static int rcu_torture_boost(void *arg)
736{
737    unsigned long call_rcu_time;
738    unsigned long endtime;
739    unsigned long oldstarttime;
740    struct rcu_boost_inflight rbi = { .inflight = 0 };
741    struct sched_param sp;
742
743    VERBOSE_PRINTK_STRING("rcu_torture_boost started");
744
745    /* Set real-time priority. */
746    sp.sched_priority = 1;
747    if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
748        VERBOSE_PRINTK_STRING("rcu_torture_boost RT prio failed!");
749        n_rcu_torture_boost_rterror++;
750    }
751
752    init_rcu_head_on_stack(&rbi.rcu);
753    /* Each pass through the following loop does one boost-test cycle. */
754    do {
755        /* Wait for the next test interval. */
756        oldstarttime = boost_starttime;
757        while (jiffies - oldstarttime > ULONG_MAX / 2) {
758            schedule_timeout_uninterruptible(1);
759            rcu_stutter_wait("rcu_torture_boost");
760            if (kthread_should_stop() ||
761                fullstop != FULLSTOP_DONTSTOP)
762                goto checkwait;
763        }
764
765        /* Do one boost-test interval. */
766        endtime = oldstarttime + test_boost_duration * HZ;
767        call_rcu_time = jiffies;
768        while (jiffies - endtime > ULONG_MAX / 2) {
769            /* If we don't have a callback in flight, post one. */
770            if (!rbi.inflight) {
771                smp_mb(); /* RCU core before ->inflight = 1. */
772                rbi.inflight = 1;
773                call_rcu(&rbi.rcu, rcu_torture_boost_cb);
774                if (jiffies - call_rcu_time >
775                     test_boost_duration * HZ - HZ / 2) {
776                    VERBOSE_PRINTK_STRING("rcu_torture_boost boosting failed");
777                    n_rcu_torture_boost_failure++;
778                }
779                call_rcu_time = jiffies;
780            }
781            cond_resched();
782            rcu_stutter_wait("rcu_torture_boost");
783            if (kthread_should_stop() ||
784                fullstop != FULLSTOP_DONTSTOP)
785                goto checkwait;
786        }
787
788        /*
789         * Set the start time of the next test interval.
790         * Yes, this is vulnerable to long delays, but such
791         * delays simply cause a false negative for the next
792         * interval. Besides, we are running at RT priority,
793         * so delays should be relatively rare.
794         */
795        while (oldstarttime == boost_starttime) {
796            if (mutex_trylock(&boost_mutex)) {
797                boost_starttime = jiffies +
798                          test_boost_interval * HZ;
799                n_rcu_torture_boosts++;
800                mutex_unlock(&boost_mutex);
801                break;
802            }
803            schedule_timeout_uninterruptible(1);
804        }
805
806        /* Go do the stutter. */
807checkwait: rcu_stutter_wait("rcu_torture_boost");
808    } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
809
810    /* Clean up and exit. */
811    VERBOSE_PRINTK_STRING("rcu_torture_boost task stopping");
812    destroy_rcu_head_on_stack(&rbi.rcu);
813    rcutorture_shutdown_absorb("rcu_torture_boost");
814    while (!kthread_should_stop() || rbi.inflight)
815        schedule_timeout_uninterruptible(1);
816    smp_mb(); /* order accesses to ->inflight before stack-frame death. */
817    return 0;
818}
819
820/*
821 * RCU torture force-quiescent-state kthread. Repeatedly induces
822 * bursts of calls to force_quiescent_state(), increasing the probability
823 * of occurrence of some important types of race conditions.
824 */
825static int
826rcu_torture_fqs(void *arg)
827{
828    unsigned long fqs_resume_time;
829    int fqs_burst_remaining;
830
831    VERBOSE_PRINTK_STRING("rcu_torture_fqs task started");
832    do {
833        fqs_resume_time = jiffies + fqs_stutter * HZ;
834        while (jiffies - fqs_resume_time > LONG_MAX) {
835            schedule_timeout_interruptible(1);
836        }
837        fqs_burst_remaining = fqs_duration;
838        while (fqs_burst_remaining > 0) {
839            cur_ops->fqs();
840            udelay(fqs_holdoff);
841            fqs_burst_remaining -= fqs_holdoff;
842        }
843        rcu_stutter_wait("rcu_torture_fqs");
844    } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
845    VERBOSE_PRINTK_STRING("rcu_torture_fqs task stopping");
846    rcutorture_shutdown_absorb("rcu_torture_fqs");
847    while (!kthread_should_stop())
848        schedule_timeout_uninterruptible(1);
849    return 0;
850}
851
852/*
853 * RCU torture writer kthread. Repeatedly substitutes a new structure
854 * for that pointed to by rcu_torture_current, freeing the old structure
855 * after a series of grace periods (the "pipeline").
856 */
857static int
858rcu_torture_writer(void *arg)
859{
860    int i;
861    long oldbatch = rcu_batches_completed();
862    struct rcu_torture *rp;
863    struct rcu_torture *old_rp;
864    static DEFINE_RCU_RANDOM(rand);
865
866    VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
867    set_user_nice(current, 19);
868
869    do {
870        schedule_timeout_uninterruptible(1);
871        rp = rcu_torture_alloc();
872        if (rp == NULL)
873            continue;
874        rp->rtort_pipe_count = 0;
875        udelay(rcu_random(&rand) & 0x3ff);
876        old_rp = rcu_dereference_check(rcu_torture_current,
877                           current == writer_task);
878        rp->rtort_mbtest = 1;
879        rcu_assign_pointer(rcu_torture_current, rp);
880        smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
881        if (old_rp) {
882            i = old_rp->rtort_pipe_count;
883            if (i > RCU_TORTURE_PIPE_LEN)
884                i = RCU_TORTURE_PIPE_LEN;
885            atomic_inc(&rcu_torture_wcount[i]);
886            old_rp->rtort_pipe_count++;
887            cur_ops->deferred_free(old_rp);
888        }
889        rcutorture_record_progress(++rcu_torture_current_version);
890        oldbatch = cur_ops->completed();
891        rcu_stutter_wait("rcu_torture_writer");
892    } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
893    VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
894    rcutorture_shutdown_absorb("rcu_torture_writer");
895    while (!kthread_should_stop())
896        schedule_timeout_uninterruptible(1);
897    return 0;
898}
899
900/*
901 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
902 * delay between calls.
903 */
904static int
905rcu_torture_fakewriter(void *arg)
906{
907    DEFINE_RCU_RANDOM(rand);
908
909    VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
910    set_user_nice(current, 19);
911
912    do {
913        schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
914        udelay(rcu_random(&rand) & 0x3ff);
915        cur_ops->sync();
916        rcu_stutter_wait("rcu_torture_fakewriter");
917    } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
918
919    VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
920    rcutorture_shutdown_absorb("rcu_torture_fakewriter");
921    while (!kthread_should_stop())
922        schedule_timeout_uninterruptible(1);
923    return 0;
924}
925
926/*
927 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
928 * incrementing the corresponding element of the pipeline array. The
929 * counter in the element should never be greater than 1, otherwise, the
930 * RCU implementation is broken.
931 */
932static void rcu_torture_timer(unsigned long unused)
933{
934    int idx;
935    int completed;
936    static DEFINE_RCU_RANDOM(rand);
937    static DEFINE_SPINLOCK(rand_lock);
938    struct rcu_torture *p;
939    int pipe_count;
940
941    idx = cur_ops->readlock();
942    completed = cur_ops->completed();
943    p = rcu_dereference_check(rcu_torture_current,
944                  rcu_read_lock_held() ||
945                  rcu_read_lock_bh_held() ||
946                  rcu_read_lock_sched_held() ||
947                  srcu_read_lock_held(&srcu_ctl));
948    if (p == NULL) {
949        /* Leave because rcu_torture_writer is not yet underway */
950        cur_ops->readunlock(idx);
951        return;
952    }
953    if (p->rtort_mbtest == 0)
954        atomic_inc(&n_rcu_torture_mberror);
955    spin_lock(&rand_lock);
956    cur_ops->read_delay(&rand);
957    n_rcu_torture_timers++;
958    spin_unlock(&rand_lock);
959    preempt_disable();
960    pipe_count = p->rtort_pipe_count;
961    if (pipe_count > RCU_TORTURE_PIPE_LEN) {
962        /* Should not happen, but... */
963        pipe_count = RCU_TORTURE_PIPE_LEN;
964    }
965    __this_cpu_inc(rcu_torture_count[pipe_count]);
966    completed = cur_ops->completed() - completed;
967    if (completed > RCU_TORTURE_PIPE_LEN) {
968        /* Should not happen, but... */
969        completed = RCU_TORTURE_PIPE_LEN;
970    }
971    __this_cpu_inc(rcu_torture_batch[completed]);
972    preempt_enable();
973    cur_ops->readunlock(idx);
974}
975
976/*
977 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
978 * incrementing the corresponding element of the pipeline array. The
979 * counter in the element should never be greater than 1, otherwise, the
980 * RCU implementation is broken.
981 */
982static int
983rcu_torture_reader(void *arg)
984{
985    int completed;
986    int idx;
987    DEFINE_RCU_RANDOM(rand);
988    struct rcu_torture *p;
989    int pipe_count;
990    struct timer_list t;
991
992    VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
993    set_user_nice(current, 19);
994    if (irqreader && cur_ops->irq_capable)
995        setup_timer_on_stack(&t, rcu_torture_timer, 0);
996
997    do {
998        if (irqreader && cur_ops->irq_capable) {
999            if (!timer_pending(&t))
1000                mod_timer(&t, jiffies + 1);
1001        }
1002        idx = cur_ops->readlock();
1003        completed = cur_ops->completed();
1004        p = rcu_dereference_check(rcu_torture_current,
1005                      rcu_read_lock_held() ||
1006                      rcu_read_lock_bh_held() ||
1007                      rcu_read_lock_sched_held() ||
1008                      srcu_read_lock_held(&srcu_ctl));
1009        if (p == NULL) {
1010            /* Wait for rcu_torture_writer to get underway */
1011            cur_ops->readunlock(idx);
1012            schedule_timeout_interruptible(HZ);
1013            continue;
1014        }
1015        if (p->rtort_mbtest == 0)
1016            atomic_inc(&n_rcu_torture_mberror);
1017        cur_ops->read_delay(&rand);
1018        preempt_disable();
1019        pipe_count = p->rtort_pipe_count;
1020        if (pipe_count > RCU_TORTURE_PIPE_LEN) {
1021            /* Should not happen, but... */
1022            pipe_count = RCU_TORTURE_PIPE_LEN;
1023        }
1024        __this_cpu_inc(rcu_torture_count[pipe_count]);
1025        completed = cur_ops->completed() - completed;
1026        if (completed > RCU_TORTURE_PIPE_LEN) {
1027            /* Should not happen, but... */
1028            completed = RCU_TORTURE_PIPE_LEN;
1029        }
1030        __this_cpu_inc(rcu_torture_batch[completed]);
1031        preempt_enable();
1032        cur_ops->readunlock(idx);
1033        schedule();
1034        rcu_stutter_wait("rcu_torture_reader");
1035    } while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1036    VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
1037    rcutorture_shutdown_absorb("rcu_torture_reader");
1038    if (irqreader && cur_ops->irq_capable)
1039        del_timer_sync(&t);
1040    while (!kthread_should_stop())
1041        schedule_timeout_uninterruptible(1);
1042    return 0;
1043}
1044
1045/*
1046 * Create an RCU-torture statistics message in the specified buffer.
1047 */
1048static int
1049rcu_torture_printk(char *page)
1050{
1051    int cnt = 0;
1052    int cpu;
1053    int i;
1054    long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1055    long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
1056
1057    for_each_possible_cpu(cpu) {
1058        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1059            pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
1060            batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
1061        }
1062    }
1063    for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
1064        if (pipesummary[i] != 0)
1065            break;
1066    }
1067    cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
1068    cnt += sprintf(&page[cnt],
1069               "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d "
1070               "rtmbe: %d rtbke: %ld rtbre: %ld "
1071               "rtbf: %ld rtb: %ld nt: %ld",
1072               rcu_torture_current,
1073               rcu_torture_current_version,
1074               list_empty(&rcu_torture_freelist),
1075               atomic_read(&n_rcu_torture_alloc),
1076               atomic_read(&n_rcu_torture_alloc_fail),
1077               atomic_read(&n_rcu_torture_free),
1078               atomic_read(&n_rcu_torture_mberror),
1079               n_rcu_torture_boost_ktrerror,
1080               n_rcu_torture_boost_rterror,
1081               n_rcu_torture_boost_failure,
1082               n_rcu_torture_boosts,
1083               n_rcu_torture_timers);
1084    if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1085        n_rcu_torture_boost_ktrerror != 0 ||
1086        n_rcu_torture_boost_rterror != 0 ||
1087        n_rcu_torture_boost_failure != 0)
1088        cnt += sprintf(&page[cnt], " !!!");
1089    cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1090    if (i > 1) {
1091        cnt += sprintf(&page[cnt], "!!! ");
1092        atomic_inc(&n_rcu_torture_error);
1093        WARN_ON_ONCE(1);
1094    }
1095    cnt += sprintf(&page[cnt], "Reader Pipe: ");
1096    for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1097        cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
1098    cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1099    cnt += sprintf(&page[cnt], "Reader Batch: ");
1100    for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1101        cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
1102    cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
1103    cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
1104    for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1105        cnt += sprintf(&page[cnt], " %d",
1106                   atomic_read(&rcu_torture_wcount[i]));
1107    }
1108    cnt += sprintf(&page[cnt], "\n");
1109    if (cur_ops->stats)
1110        cnt += cur_ops->stats(&page[cnt]);
1111    return cnt;
1112}
1113
1114/*
1115 * Print torture statistics. Caller must ensure that there is only
1116 * one call to this function at a given time!!! This is normally
1117 * accomplished by relying on the module system to only have one copy
1118 * of the module loaded, and then by giving the rcu_torture_stats
1119 * kthread full control (or the init/cleanup functions when rcu_torture_stats
1120 * thread is not running).
1121 */
1122static void
1123rcu_torture_stats_print(void)
1124{
1125    int cnt;
1126
1127    cnt = rcu_torture_printk(printk_buf);
1128    printk(KERN_ALERT "%s", printk_buf);
1129}
1130
1131/*
1132 * Periodically prints torture statistics, if periodic statistics printing
1133 * was specified via the stat_interval module parameter.
1134 *
1135 * No need to worry about fullstop here, since this one doesn't reference
1136 * volatile state or register callbacks.
1137 */
1138static int
1139rcu_torture_stats(void *arg)
1140{
1141    VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
1142    do {
1143        schedule_timeout_interruptible(stat_interval * HZ);
1144        rcu_torture_stats_print();
1145        rcutorture_shutdown_absorb("rcu_torture_stats");
1146    } while (!kthread_should_stop());
1147    VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
1148    return 0;
1149}
1150
1151static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
1152
1153/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
1154 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
1155 */
1156static void rcu_torture_shuffle_tasks(void)
1157{
1158    int i;
1159
1160    cpumask_setall(shuffle_tmp_mask);
1161    get_online_cpus();
1162
1163    /* No point in shuffling if there is only one online CPU (ex: UP) */
1164    if (num_online_cpus() == 1) {
1165        put_online_cpus();
1166        return;
1167    }
1168
1169    if (rcu_idle_cpu != -1)
1170        cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
1171
1172    set_cpus_allowed_ptr(current, shuffle_tmp_mask);
1173
1174    if (reader_tasks) {
1175        for (i = 0; i < nrealreaders; i++)
1176            if (reader_tasks[i])
1177                set_cpus_allowed_ptr(reader_tasks[i],
1178                             shuffle_tmp_mask);
1179    }
1180
1181    if (fakewriter_tasks) {
1182        for (i = 0; i < nfakewriters; i++)
1183            if (fakewriter_tasks[i])
1184                set_cpus_allowed_ptr(fakewriter_tasks[i],
1185                             shuffle_tmp_mask);
1186    }
1187
1188    if (writer_task)
1189        set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
1190
1191    if (stats_task)
1192        set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
1193
1194    if (rcu_idle_cpu == -1)
1195        rcu_idle_cpu = num_online_cpus() - 1;
1196    else
1197        rcu_idle_cpu--;
1198
1199    put_online_cpus();
1200}
1201
1202/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
1203 * system to become idle at a time and cut off its timer ticks. This is meant
1204 * to test the support for such tickless idle CPU in RCU.
1205 */
1206static int
1207rcu_torture_shuffle(void *arg)
1208{
1209    VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
1210    do {
1211        schedule_timeout_interruptible(shuffle_interval * HZ);
1212        rcu_torture_shuffle_tasks();
1213        rcutorture_shutdown_absorb("rcu_torture_shuffle");
1214    } while (!kthread_should_stop());
1215    VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
1216    return 0;
1217}
1218
1219/* Cause the rcutorture test to "stutter", starting and stopping all
1220 * threads periodically.
1221 */
1222static int
1223rcu_torture_stutter(void *arg)
1224{
1225    VERBOSE_PRINTK_STRING("rcu_torture_stutter task started");
1226    do {
1227        schedule_timeout_interruptible(stutter * HZ);
1228        stutter_pause_test = 1;
1229        if (!kthread_should_stop())
1230            schedule_timeout_interruptible(stutter * HZ);
1231        stutter_pause_test = 0;
1232        rcutorture_shutdown_absorb("rcu_torture_stutter");
1233    } while (!kthread_should_stop());
1234    VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
1235    return 0;
1236}
1237
1238static inline void
1239rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
1240{
1241    printk(KERN_ALERT "%s" TORTURE_FLAG
1242        "--- %s: nreaders=%d nfakewriters=%d "
1243        "stat_interval=%d verbose=%d test_no_idle_hz=%d "
1244        "shuffle_interval=%d stutter=%d irqreader=%d "
1245        "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
1246        "test_boost=%d/%d test_boost_interval=%d "
1247        "test_boost_duration=%d\n",
1248        torture_type, tag, nrealreaders, nfakewriters,
1249        stat_interval, verbose, test_no_idle_hz, shuffle_interval,
1250        stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
1251        test_boost, cur_ops->can_boost,
1252        test_boost_interval, test_boost_duration);
1253}
1254
1255static struct notifier_block rcutorture_shutdown_nb = {
1256    .notifier_call = rcutorture_shutdown_notify,
1257};
1258
1259static void rcutorture_booster_cleanup(int cpu)
1260{
1261    struct task_struct *t;
1262
1263    if (boost_tasks[cpu] == NULL)
1264        return;
1265    mutex_lock(&boost_mutex);
1266    VERBOSE_PRINTK_STRING("Stopping rcu_torture_boost task");
1267    t = boost_tasks[cpu];
1268    boost_tasks[cpu] = NULL;
1269    mutex_unlock(&boost_mutex);
1270
1271    /* This must be outside of the mutex, otherwise deadlock! */
1272    kthread_stop(t);
1273}
1274
1275static int rcutorture_booster_init(int cpu)
1276{
1277    int retval;
1278
1279    if (boost_tasks[cpu] != NULL)
1280        return 0; /* Already created, nothing more to do. */
1281
1282    /* Don't allow time recalculation while creating a new task. */
1283    mutex_lock(&boost_mutex);
1284    VERBOSE_PRINTK_STRING("Creating rcu_torture_boost task");
1285    boost_tasks[cpu] = kthread_create(rcu_torture_boost, NULL,
1286                      "rcu_torture_boost");
1287    if (IS_ERR(boost_tasks[cpu])) {
1288        retval = PTR_ERR(boost_tasks[cpu]);
1289        VERBOSE_PRINTK_STRING("rcu_torture_boost task create failed");
1290        n_rcu_torture_boost_ktrerror++;
1291        boost_tasks[cpu] = NULL;
1292        mutex_unlock(&boost_mutex);
1293        return retval;
1294    }
1295    kthread_bind(boost_tasks[cpu], cpu);
1296    wake_up_process(boost_tasks[cpu]);
1297    mutex_unlock(&boost_mutex);
1298    return 0;
1299}
1300
1301static int rcutorture_cpu_notify(struct notifier_block *self,
1302                 unsigned long action, void *hcpu)
1303{
1304    long cpu = (long)hcpu;
1305
1306    switch (action) {
1307    case CPU_ONLINE:
1308    case CPU_DOWN_FAILED:
1309        (void)rcutorture_booster_init(cpu);
1310        break;
1311    case CPU_DOWN_PREPARE:
1312        rcutorture_booster_cleanup(cpu);
1313        break;
1314    default:
1315        break;
1316    }
1317    return NOTIFY_OK;
1318}
1319
1320static struct notifier_block rcutorture_cpu_nb = {
1321    .notifier_call = rcutorture_cpu_notify,
1322};
1323
1324static void
1325rcu_torture_cleanup(void)
1326{
1327    int i;
1328
1329    mutex_lock(&fullstop_mutex);
1330    rcutorture_record_test_transition();
1331    if (fullstop == FULLSTOP_SHUTDOWN) {
1332        printk(KERN_WARNING /* but going down anyway, so... */
1333               "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
1334        mutex_unlock(&fullstop_mutex);
1335        schedule_timeout_uninterruptible(10);
1336        if (cur_ops->cb_barrier != NULL)
1337            cur_ops->cb_barrier();
1338        return;
1339    }
1340    fullstop = FULLSTOP_RMMOD;
1341    mutex_unlock(&fullstop_mutex);
1342    unregister_reboot_notifier(&rcutorture_shutdown_nb);
1343    if (stutter_task) {
1344        VERBOSE_PRINTK_STRING("Stopping rcu_torture_stutter task");
1345        kthread_stop(stutter_task);
1346    }
1347    stutter_task = NULL;
1348    if (shuffler_task) {
1349        VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
1350        kthread_stop(shuffler_task);
1351        free_cpumask_var(shuffle_tmp_mask);
1352    }
1353    shuffler_task = NULL;
1354
1355    if (writer_task) {
1356        VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
1357        kthread_stop(writer_task);
1358    }
1359    writer_task = NULL;
1360
1361    if (reader_tasks) {
1362        for (i = 0; i < nrealreaders; i++) {
1363            if (reader_tasks[i]) {
1364                VERBOSE_PRINTK_STRING(
1365                    "Stopping rcu_torture_reader task");
1366                kthread_stop(reader_tasks[i]);
1367            }
1368            reader_tasks[i] = NULL;
1369        }
1370        kfree(reader_tasks);
1371        reader_tasks = NULL;
1372    }
1373    rcu_torture_current = NULL;
1374
1375    if (fakewriter_tasks) {
1376        for (i = 0; i < nfakewriters; i++) {
1377            if (fakewriter_tasks[i]) {
1378                VERBOSE_PRINTK_STRING(
1379                    "Stopping rcu_torture_fakewriter task");
1380                kthread_stop(fakewriter_tasks[i]);
1381            }
1382            fakewriter_tasks[i] = NULL;
1383        }
1384        kfree(fakewriter_tasks);
1385        fakewriter_tasks = NULL;
1386    }
1387
1388    if (stats_task) {
1389        VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
1390        kthread_stop(stats_task);
1391    }
1392    stats_task = NULL;
1393
1394    if (fqs_task) {
1395        VERBOSE_PRINTK_STRING("Stopping rcu_torture_fqs task");
1396        kthread_stop(fqs_task);
1397    }
1398    fqs_task = NULL;
1399    if ((test_boost == 1 && cur_ops->can_boost) ||
1400        test_boost == 2) {
1401        unregister_cpu_notifier(&rcutorture_cpu_nb);
1402        for_each_possible_cpu(i)
1403            rcutorture_booster_cleanup(i);
1404    }
1405
1406    /* Wait for all RCU callbacks to fire. */
1407
1408    if (cur_ops->cb_barrier != NULL)
1409        cur_ops->cb_barrier();
1410
1411    rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
1412
1413    if (cur_ops->cleanup)
1414        cur_ops->cleanup();
1415    if (atomic_read(&n_rcu_torture_error))
1416        rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1417    else
1418        rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1419}
1420
1421static int __init
1422rcu_torture_init(void)
1423{
1424    int i;
1425    int cpu;
1426    int firsterr = 0;
1427    static struct rcu_torture_ops *torture_ops[] =
1428        { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
1429          &rcu_bh_ops, &rcu_bh_sync_ops,
1430          &srcu_ops, &srcu_expedited_ops,
1431          &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
1432
1433    mutex_lock(&fullstop_mutex);
1434
1435    /* Process args and tell the world that the torturer is on the job. */
1436    for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1437        cur_ops = torture_ops[i];
1438        if (strcmp(torture_type, cur_ops->name) == 0)
1439            break;
1440    }
1441    if (i == ARRAY_SIZE(torture_ops)) {
1442        printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n",
1443               torture_type);
1444        printk(KERN_ALERT "rcu-torture types:");
1445        for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1446            printk(KERN_ALERT " %s", torture_ops[i]->name);
1447        printk(KERN_ALERT "\n");
1448        mutex_unlock(&fullstop_mutex);
1449        return -EINVAL;
1450    }
1451    if (cur_ops->fqs == NULL && fqs_duration != 0) {
1452        printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero "
1453                  "fqs_duration, fqs disabled.\n");
1454        fqs_duration = 0;
1455    }
1456    if (cur_ops->init)
1457        cur_ops->init(); /* no "goto unwind" prior to this point!!! */
1458
1459    if (nreaders >= 0)
1460        nrealreaders = nreaders;
1461    else
1462        nrealreaders = 2 * num_online_cpus();
1463    rcu_torture_print_module_parms(cur_ops, "Start of test");
1464    fullstop = FULLSTOP_DONTSTOP;
1465
1466    /* Set up the freelist. */
1467
1468    INIT_LIST_HEAD(&rcu_torture_freelist);
1469    for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
1470        rcu_tortures[i].rtort_mbtest = 0;
1471        list_add_tail(&rcu_tortures[i].rtort_free,
1472                  &rcu_torture_freelist);
1473    }
1474
1475    /* Initialize the statistics so that each run gets its own numbers. */
1476
1477    rcu_torture_current = NULL;
1478    rcu_torture_current_version = 0;
1479    atomic_set(&n_rcu_torture_alloc, 0);
1480    atomic_set(&n_rcu_torture_alloc_fail, 0);
1481    atomic_set(&n_rcu_torture_free, 0);
1482    atomic_set(&n_rcu_torture_mberror, 0);
1483    atomic_set(&n_rcu_torture_error, 0);
1484    n_rcu_torture_boost_ktrerror = 0;
1485    n_rcu_torture_boost_rterror = 0;
1486    n_rcu_torture_boost_failure = 0;
1487    n_rcu_torture_boosts = 0;
1488    for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1489        atomic_set(&rcu_torture_wcount[i], 0);
1490    for_each_possible_cpu(cpu) {
1491        for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1492            per_cpu(rcu_torture_count, cpu)[i] = 0;
1493            per_cpu(rcu_torture_batch, cpu)[i] = 0;
1494        }
1495    }
1496
1497    /* Start up the kthreads. */
1498
1499    VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
1500    writer_task = kthread_run(rcu_torture_writer, NULL,
1501                  "rcu_torture_writer");
1502    if (IS_ERR(writer_task)) {
1503        firsterr = PTR_ERR(writer_task);
1504        VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
1505        writer_task = NULL;
1506        goto unwind;
1507    }
1508    fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
1509                   GFP_KERNEL);
1510    if (fakewriter_tasks == NULL) {
1511        VERBOSE_PRINTK_ERRSTRING("out of memory");
1512        firsterr = -ENOMEM;
1513        goto unwind;
1514    }
1515    for (i = 0; i < nfakewriters; i++) {
1516        VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
1517        fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
1518                          "rcu_torture_fakewriter");
1519        if (IS_ERR(fakewriter_tasks[i])) {
1520            firsterr = PTR_ERR(fakewriter_tasks[i]);
1521            VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
1522            fakewriter_tasks[i] = NULL;
1523            goto unwind;
1524        }
1525    }
1526    reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1527                   GFP_KERNEL);
1528    if (reader_tasks == NULL) {
1529        VERBOSE_PRINTK_ERRSTRING("out of memory");
1530        firsterr = -ENOMEM;
1531        goto unwind;
1532    }
1533    for (i = 0; i < nrealreaders; i++) {
1534        VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
1535        reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
1536                          "rcu_torture_reader");
1537        if (IS_ERR(reader_tasks[i])) {
1538            firsterr = PTR_ERR(reader_tasks[i]);
1539            VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
1540            reader_tasks[i] = NULL;
1541            goto unwind;
1542        }
1543    }
1544    if (stat_interval > 0) {
1545        VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
1546        stats_task = kthread_run(rcu_torture_stats, NULL,
1547                    "rcu_torture_stats");
1548        if (IS_ERR(stats_task)) {
1549            firsterr = PTR_ERR(stats_task);
1550            VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
1551            stats_task = NULL;
1552            goto unwind;
1553        }
1554    }
1555    if (test_no_idle_hz) {
1556        rcu_idle_cpu = num_online_cpus() - 1;
1557
1558        if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
1559            firsterr = -ENOMEM;
1560            VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
1561            goto unwind;
1562        }
1563
1564        /* Create the shuffler thread */
1565        shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
1566                      "rcu_torture_shuffle");
1567        if (IS_ERR(shuffler_task)) {
1568            free_cpumask_var(shuffle_tmp_mask);
1569            firsterr = PTR_ERR(shuffler_task);
1570            VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
1571            shuffler_task = NULL;
1572            goto unwind;
1573        }
1574    }
1575    if (stutter < 0)
1576        stutter = 0;
1577    if (stutter) {
1578        /* Create the stutter thread */
1579        stutter_task = kthread_run(rcu_torture_stutter, NULL,
1580                      "rcu_torture_stutter");
1581        if (IS_ERR(stutter_task)) {
1582            firsterr = PTR_ERR(stutter_task);
1583            VERBOSE_PRINTK_ERRSTRING("Failed to create stutter");
1584            stutter_task = NULL;
1585            goto unwind;
1586        }
1587    }
1588    if (fqs_duration < 0)
1589        fqs_duration = 0;
1590    if (fqs_duration) {
1591        /* Create the stutter thread */
1592        fqs_task = kthread_run(rcu_torture_fqs, NULL,
1593                       "rcu_torture_fqs");
1594        if (IS_ERR(fqs_task)) {
1595            firsterr = PTR_ERR(fqs_task);
1596            VERBOSE_PRINTK_ERRSTRING("Failed to create fqs");
1597            fqs_task = NULL;
1598            goto unwind;
1599        }
1600    }
1601    if (test_boost_interval < 1)
1602        test_boost_interval = 1;
1603    if (test_boost_duration < 2)
1604        test_boost_duration = 2;
1605    if ((test_boost == 1 && cur_ops->can_boost) ||
1606        test_boost == 2) {
1607        int retval;
1608
1609        boost_starttime = jiffies + test_boost_interval * HZ;
1610        register_cpu_notifier(&rcutorture_cpu_nb);
1611        for_each_possible_cpu(i) {
1612            if (cpu_is_offline(i))
1613                continue; /* Heuristic: CPU can go offline. */
1614            retval = rcutorture_booster_init(i);
1615            if (retval < 0) {
1616                firsterr = retval;
1617                goto unwind;
1618            }
1619        }
1620    }
1621    register_reboot_notifier(&rcutorture_shutdown_nb);
1622    rcutorture_record_test_transition();
1623    mutex_unlock(&fullstop_mutex);
1624    return 0;
1625
1626unwind:
1627    mutex_unlock(&fullstop_mutex);
1628    rcu_torture_cleanup();
1629    return firsterr;
1630}
1631
1632module_init(rcu_torture_init);
1633module_exit(rcu_torture_cleanup);
1634

Archive Download this file



interactive