rcutorture.c 57.9 KB
Newer Older
1
/*
2
 * Read-Copy Update module-based torture test facility
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
18
 * Copyright (C) IBM Corporation, 2005, 2006
19 20
 *
 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21
 *	  Josh Triplett <josh@freedesktop.org>
22 23 24 25 26 27 28 29 30 31 32 33 34 35
 *
 * See also:  Documentation/RCU/torture.txt
 */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
A
Arun Sharma 已提交
36
#include <linux/atomic.h>
37 38 39 40 41
#include <linux/bitops.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
42
#include <linux/reboot.h>
43
#include <linux/freezer.h>
44 45 46
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/stat.h>
47
#include <linux/srcu.h>
48
#include <linux/slab.h>
49
#include <linux/trace_clock.h>
50
#include <asm/byteorder.h>
51
#include <linux/torture.h>
52 53

MODULE_LICENSE("GPL");
54
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
55

56 57 58 59 60 61
MODULE_ALIAS("rcutorture");
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX "rcutorture."

62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
torture_param(int, fqs_duration, 0,
	      "Duration of fqs bursts (us), 0 to disable");
torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
torture_param(bool, gp_normal, false,
	     "Use normal (non-expedited) GP wait primitives");
torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
torture_param(int, n_barrier_cbs, 0,
	     "# of callbacks/kthreads for barrier testing");
torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
torture_param(int, nreaders, -1, "Number of RCU reader threads");
torture_param(int, object_debug, 0,
	     "Enable debug-object double call_rcu() testing");
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
torture_param(int, onoff_interval, 0,
	     "Time between CPU hotplugs (s), 0=disable");
torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
torture_param(int, stall_cpu_holdoff, 10,
	     "Time to wait before starting stall (s).");
torture_param(int, stat_interval, 60,
	     "Number of seconds between stats printk()s");
torture_param(int, stutter, 5, "Number of seconds to run/halt test");
torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
torture_param(int, test_boost_duration, 4,
	     "Duration of each boost test, seconds.");
torture_param(int, test_boost_interval, 7,
	     "Interval between boost tests, seconds.");
torture_param(bool, test_no_idle_hz, true,
	     "Test support for tickless idle CPUs");

95 96
char *torture_type = "rcu";
EXPORT_SYMBOL_GPL(torture_type);
97
module_param(torture_type, charp, 0444);
98
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
99 100 101 102
bool verbose;
EXPORT_SYMBOL_GPL(verbose);
module_param(verbose, bool, 0444);
MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
103

104 105
static int nrealreaders;
static struct task_struct *writer_task;
106
static struct task_struct **fakewriter_tasks;
107 108
static struct task_struct **reader_tasks;
static struct task_struct *stats_task;
109
static struct task_struct *shuffler_task;
110
static struct task_struct *stutter_task;
111
static struct task_struct *fqs_task;
112
static struct task_struct *boost_tasks[NR_CPUS];
113
static struct task_struct *shutdown_task;
114 115 116
#ifdef CONFIG_HOTPLUG_CPU
static struct task_struct *onoff_task;
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
117
static struct task_struct *stall_task;
118 119
static struct task_struct **barrier_cbs_tasks;
static struct task_struct *barrier_task;
120 121 122 123 124 125 126

#define RCU_TORTURE_PIPE_LEN 10

struct rcu_torture {
	struct rcu_head rtort_rcu;
	int rtort_pipe_count;
	struct list_head rtort_free;
127
	int rtort_mbtest;
128 129 130
};

static LIST_HEAD(rcu_torture_freelist);
131
static struct rcu_torture __rcu *rcu_torture_current;
132
static unsigned long rcu_torture_current_version;
133 134
static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
static DEFINE_SPINLOCK(rcu_torture_lock);
135 136 137 138
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
		      rcu_torture_count) = { 0 };
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1],
		      rcu_torture_batch) = { 0 };
139
static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
140 141 142 143 144
static atomic_t n_rcu_torture_alloc;
static atomic_t n_rcu_torture_alloc_fail;
static atomic_t n_rcu_torture_free;
static atomic_t n_rcu_torture_mberror;
static atomic_t n_rcu_torture_error;
145
static long n_rcu_torture_barrier_error;
146 147 148 149
static long n_rcu_torture_boost_ktrerror;
static long n_rcu_torture_boost_rterror;
static long n_rcu_torture_boost_failure;
static long n_rcu_torture_boosts;
150
static long n_rcu_torture_timers;
151 152
static long n_offline_attempts;
static long n_offline_successes;
153 154 155
static unsigned long sum_offline;
static int min_offline = -1;
static int max_offline;
156 157
static long n_online_attempts;
static long n_online_successes;
158 159 160
static unsigned long sum_online;
static int min_online = -1;
static int max_online;
161 162
static long n_barrier_attempts;
static long n_barrier_successes;
163
static struct list_head rcu_torture_removed;
R
Rusty Russell 已提交
164
static cpumask_var_t shuffle_tmp_mask;
165

166
static int stutter_pause_test;
167

168 169 170 171 172 173
#if defined(MODULE) || defined(CONFIG_RCU_TORTURE_TEST_RUNNABLE)
#define RCUTORTURE_RUNNABLE_INIT 1
#else
#define RCUTORTURE_RUNNABLE_INIT 0
#endif
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
174 175
module_param(rcutorture_runnable, int, 0444);
MODULE_PARM_DESC(rcutorture_runnable, "Start rcutorture at boot");
176

177
#if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU)
178
#define rcu_can_boost() 1
179
#else /* #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
180
#define rcu_can_boost() 0
181
#endif /* #else #if defined(CONFIG_RCU_BOOST) && !defined(CONFIG_HOTPLUG_CPU) */
182

183 184 185 186 187 188 189 190 191 192 193 194 195 196
#ifdef CONFIG_RCU_TRACE
static u64 notrace rcu_trace_clock_local(void)
{
	u64 ts = trace_clock_local();
	unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC);
	return ts;
}
#else /* #ifdef CONFIG_RCU_TRACE */
static u64 notrace rcu_trace_clock_local(void)
{
	return 0ULL;
}
#endif /* #else #ifdef CONFIG_RCU_TRACE */

197
static unsigned long shutdown_time;	/* jiffies to system shutdown. */
198 199 200
static unsigned long boost_starttime;	/* jiffies of next boost test start. */
DEFINE_MUTEX(boost_mutex);		/* protect setting boost_starttime */
					/*  and boost task create/destroy. */
201
static atomic_t barrier_cbs_count;	/* Barrier callbacks registered. */
202
static bool barrier_phase;		/* Test phase. */
203 204 205
static atomic_t barrier_cbs_invoked;	/* Barrier callbacks invoked. */
static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
206

207 208 209
/* Forward reference. */
static void rcu_torture_cleanup(void);

210
/*
211
 * Detect and respond to a system shutdown.
212 213 214 215 216
 */
static int
rcutorture_shutdown_notify(struct notifier_block *unused1,
			   unsigned long unused2, void *unused3)
{
P
Paul E. McKenney 已提交
217
	mutex_lock(&fullstop_mutex);
218
	if (fullstop == FULLSTOP_DONTSTOP)
P
Paul E. McKenney 已提交
219
		fullstop = FULLSTOP_SHUTDOWN;
220
	else
221
		pr_warn(/* but going down anyway, so... */
222
		       "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
P
Paul E. McKenney 已提交
223
	mutex_unlock(&fullstop_mutex);
224 225 226
	return NOTIFY_DONE;
}

227 228 229
/*
 * Allocate an element from the rcu_tortures pool.
 */
A
Adrian Bunk 已提交
230
static struct rcu_torture *
231 232 233 234
rcu_torture_alloc(void)
{
	struct list_head *p;

235
	spin_lock_bh(&rcu_torture_lock);
236 237
	if (list_empty(&rcu_torture_freelist)) {
		atomic_inc(&n_rcu_torture_alloc_fail);
238
		spin_unlock_bh(&rcu_torture_lock);
239 240 241 242 243
		return NULL;
	}
	atomic_inc(&n_rcu_torture_alloc);
	p = rcu_torture_freelist.next;
	list_del_init(p);
244
	spin_unlock_bh(&rcu_torture_lock);
245 246 247 248 249 250 251 252 253 254
	return container_of(p, struct rcu_torture, rtort_free);
}

/*
 * Free an element to the rcu_tortures pool.
 */
static void
rcu_torture_free(struct rcu_torture *p)
{
	atomic_inc(&n_rcu_torture_free);
255
	spin_lock_bh(&rcu_torture_lock);
256
	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
257
	spin_unlock_bh(&rcu_torture_lock);
258 259
}

260
static void
261
rcu_stutter_wait(const char *title)
262
{
263
	while (stutter_pause_test || !rcutorture_runnable) {
264 265 266
		if (rcutorture_runnable)
			schedule_timeout_interruptible(1);
		else
267
			schedule_timeout_interruptible(round_jiffies_relative(HZ));
268
		torture_shutdown_absorb(title);
269
	}
270 271
}

272 273 274 275 276 277 278
/*
 * Operations vector for selecting different types of tests.
 */

struct rcu_torture_ops {
	void (*init)(void);
	int (*readlock)(void);
279
	void (*read_delay)(struct torture_random_state *rrsp);
280 281
	void (*readunlock)(int idx);
	int (*completed)(void);
282
	void (*deferred_free)(struct rcu_torture *p);
283
	void (*sync)(void);
284
	void (*exp_sync)(void);
285
	void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
286
	void (*cb_barrier)(void);
287
	void (*fqs)(void);
288
	void (*stats)(char *page);
289
	int irq_capable;
290
	int can_boost;
291
	const char *name;
292
};
293 294

static struct rcu_torture_ops *cur_ops;
295 296 297 298 299

/*
 * Definitions for rcu torture testing.
 */

300
static int rcu_torture_read_lock(void) __acquires(RCU)
301 302 303 304 305
{
	rcu_read_lock();
	return 0;
}

306
static void rcu_read_delay(struct torture_random_state *rrsp)
307
{
308 309
	const unsigned long shortdelay_us = 200;
	const unsigned long longdelay_ms = 50;
310

311 312 313
	/* We want a short delay sometimes to make a reader delay the grace
	 * period, and we want a long delay occasionally to trigger
	 * force_quiescent_state. */
314

315
	if (!(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms)))
316
		mdelay(longdelay_ms);
317
	if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us)))
318
		udelay(shortdelay_us);
319
#ifdef CONFIG_PREEMPT
320 321
	if (!preempt_count() &&
	    !(torture_random(rrsp) % (nrealreaders * 20000)))
322 323
		preempt_schedule();  /* No QS if preempt_disable() in effect */
#endif
324 325
}

326
static void rcu_torture_read_unlock(int idx) __releases(RCU)
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
{
	rcu_read_unlock();
}

static int rcu_torture_completed(void)
{
	return rcu_batches_completed();
}

static void
rcu_torture_cb(struct rcu_head *p)
{
	int i;
	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);

342
	if (fullstop != FULLSTOP_DONTSTOP) {
343 344 345 346 347 348 349 350 351 352 353
		/* Test is ending, just drop callbacks on the floor. */
		/* The next initialization will pick up the pieces. */
		return;
	}
	i = rp->rtort_pipe_count;
	if (i > RCU_TORTURE_PIPE_LEN)
		i = RCU_TORTURE_PIPE_LEN;
	atomic_inc(&rcu_torture_wcount[i]);
	if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
		rp->rtort_mbtest = 0;
		rcu_torture_free(rp);
354
	} else {
355
		cur_ops->deferred_free(rp);
356
	}
357 358
}

359 360 361 362 363
static int rcu_no_completed(void)
{
	return 0;
}

364 365 366 367 368
static void rcu_torture_deferred_free(struct rcu_torture *p)
{
	call_rcu(&p->rtort_rcu, rcu_torture_cb);
}

369 370 371 372 373
static void rcu_sync_torture_init(void)
{
	INIT_LIST_HEAD(&rcu_torture_removed);
}

374
static struct rcu_torture_ops rcu_ops = {
375 376 377 378 379
	.init		= rcu_sync_torture_init,
	.readlock	= rcu_torture_read_lock,
	.read_delay	= rcu_read_delay,
	.readunlock	= rcu_torture_read_unlock,
	.completed	= rcu_torture_completed,
380
	.deferred_free	= rcu_torture_deferred_free,
381
	.sync		= synchronize_rcu,
382 383 384
	.exp_sync	= synchronize_rcu_expedited,
	.call		= call_rcu,
	.cb_barrier	= rcu_barrier,
385
	.fqs		= rcu_force_quiescent_state,
386 387
	.stats		= NULL,
	.irq_capable	= 1,
388
	.can_boost	= rcu_can_boost(),
389
	.name		= "rcu"
390 391
};

392 393 394 395
/*
 * Definitions for rcu_bh torture testing.
 */

396
static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
397 398 399 400 401
{
	rcu_read_lock_bh();
	return 0;
}

402
static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
{
	rcu_read_unlock_bh();
}

static int rcu_bh_torture_completed(void)
{
	return rcu_batches_completed_bh();
}

static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
{
	call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
}

static struct rcu_torture_ops rcu_bh_ops = {
418
	.init		= rcu_sync_torture_init,
419 420 421 422 423
	.readlock	= rcu_bh_torture_read_lock,
	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
	.readunlock	= rcu_bh_torture_read_unlock,
	.completed	= rcu_bh_torture_completed,
	.deferred_free	= rcu_bh_torture_deferred_free,
424
	.sync		= synchronize_rcu_bh,
425
	.exp_sync	= synchronize_rcu_bh_expedited,
426
	.call		= call_rcu_bh,
427
	.cb_barrier	= rcu_barrier_bh,
428
	.fqs		= rcu_bh_force_quiescent_state,
429 430 431
	.stats		= NULL,
	.irq_capable	= 1,
	.name		= "rcu_bh"
432 433
};

434 435 436 437
/*
 * Definitions for srcu torture testing.
 */

438
DEFINE_STATIC_SRCU(srcu_ctl);
439

440
static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
441 442 443 444
{
	return srcu_read_lock(&srcu_ctl);
}

445
static void srcu_read_delay(struct torture_random_state *rrsp)
446 447 448 449 450 451 452
{
	long delay;
	const long uspertick = 1000000 / HZ;
	const long longdelay = 10;

	/* We want there to be long-running readers, but not all the time. */

453 454
	delay = torture_random(rrsp) %
		(nrealreaders * 2 * longdelay * uspertick);
455 456
	if (!delay)
		schedule_timeout_interruptible(longdelay);
457 458
	else
		rcu_read_delay(rrsp);
459 460
}

461
static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
462 463 464 465 466 467 468 469 470
{
	srcu_read_unlock(&srcu_ctl, idx);
}

static int srcu_torture_completed(void)
{
	return srcu_batches_completed(&srcu_ctl);
}

471 472 473 474 475
static void srcu_torture_deferred_free(struct rcu_torture *rp)
{
	call_srcu(&srcu_ctl, &rp->rtort_rcu, rcu_torture_cb);
}

476 477 478 479 480
static void srcu_torture_synchronize(void)
{
	synchronize_srcu(&srcu_ctl);
}

481 482 483 484 485 486 487 488 489 490 491
static void srcu_torture_call(struct rcu_head *head,
			      void (*func)(struct rcu_head *head))
{
	call_srcu(&srcu_ctl, head, func);
}

static void srcu_torture_barrier(void)
{
	srcu_barrier(&srcu_ctl);
}

492
static void srcu_torture_stats(char *page)
493 494 495 496
{
	int cpu;
	int idx = srcu_ctl.completed & 0x1;

497
	page += sprintf(page, "%s%s per-CPU(idx=%d):",
498 499
		       torture_type, TORTURE_FLAG, idx);
	for_each_possible_cpu(cpu) {
500
		page += sprintf(page, " %d(%lu,%lu)", cpu,
501 502 503
			       per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
			       per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
	}
504
	sprintf(page, "\n");
505 506
}

507 508 509 510 511
static void srcu_torture_synchronize_expedited(void)
{
	synchronize_srcu_expedited(&srcu_ctl);
}

512
static struct rcu_torture_ops srcu_ops = {
513
	.init		= rcu_sync_torture_init,
514 515 516 517
	.readlock	= srcu_torture_read_lock,
	.read_delay	= srcu_read_delay,
	.readunlock	= srcu_torture_read_unlock,
	.completed	= srcu_torture_completed,
518
	.deferred_free	= srcu_torture_deferred_free,
519
	.sync		= srcu_torture_synchronize,
520
	.exp_sync	= srcu_torture_synchronize_expedited,
521 522
	.call		= srcu_torture_call,
	.cb_barrier	= srcu_torture_barrier,
523 524
	.stats		= srcu_torture_stats,
	.name		= "srcu"
525 526
};

527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
/*
 * Definitions for sched torture testing.
 */

static int sched_torture_read_lock(void)
{
	preempt_disable();
	return 0;
}

static void sched_torture_read_unlock(int idx)
{
	preempt_enable();
}

542 543 544 545 546
static void rcu_sched_torture_deferred_free(struct rcu_torture *p)
{
	call_rcu_sched(&p->rtort_rcu, rcu_torture_cb);
}

547
static struct rcu_torture_ops sched_ops = {
548 549 550 551
	.init		= rcu_sync_torture_init,
	.readlock	= sched_torture_read_lock,
	.read_delay	= rcu_read_delay,  /* just reuse rcu's version. */
	.readunlock	= sched_torture_read_unlock,
552
	.completed	= rcu_no_completed,
553
	.deferred_free	= rcu_sched_torture_deferred_free,
554
	.sync		= synchronize_sched,
555 556
	.exp_sync	= synchronize_sched_expedited,
	.call		= call_rcu_sched,
557
	.cb_barrier	= rcu_barrier_sched,
558
	.fqs		= rcu_sched_force_quiescent_state,
559 560 561
	.stats		= NULL,
	.irq_capable	= 1,
	.name		= "sched"
562 563
};

564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
/*
 * RCU torture priority-boost testing.  Runs one real-time thread per
 * CPU for moderate bursts, repeatedly registering RCU callbacks and
 * spinning waiting for them to be invoked.  If a given callback takes
 * too long to be invoked, we assume that priority inversion has occurred.
 */

struct rcu_boost_inflight {
	struct rcu_head rcu;
	int inflight;
};

static void rcu_torture_boost_cb(struct rcu_head *head)
{
	struct rcu_boost_inflight *rbip =
		container_of(head, struct rcu_boost_inflight, rcu);

	smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
	rbip->inflight = 0;
}

static int rcu_torture_boost(void *arg)
{
	unsigned long call_rcu_time;
	unsigned long endtime;
	unsigned long oldstarttime;
	struct rcu_boost_inflight rbi = { .inflight = 0 };
	struct sched_param sp;

593
	VERBOSE_TOROUT_STRING("rcu_torture_boost started");
594 595 596 597

	/* Set real-time priority. */
	sp.sched_priority = 1;
	if (sched_setscheduler(current, SCHED_FIFO, &sp) < 0) {
598
		VERBOSE_TOROUT_STRING("rcu_torture_boost RT prio failed!");
599 600 601
		n_rcu_torture_boost_rterror++;
	}

602
	init_rcu_head_on_stack(&rbi.rcu);
603 604 605 606
	/* Each pass through the following loop does one boost-test cycle. */
	do {
		/* Wait for the next test interval. */
		oldstarttime = boost_starttime;
607
		while (ULONG_CMP_LT(jiffies, oldstarttime)) {
608
			schedule_timeout_interruptible(oldstarttime - jiffies);
609 610 611 612 613 614 615 616 617
			rcu_stutter_wait("rcu_torture_boost");
			if (kthread_should_stop() ||
			    fullstop != FULLSTOP_DONTSTOP)
				goto checkwait;
		}

		/* Do one boost-test interval. */
		endtime = oldstarttime + test_boost_duration * HZ;
		call_rcu_time = jiffies;
618
		while (ULONG_CMP_LT(jiffies, endtime)) {
619 620 621 622 623 624 625
			/* If we don't have a callback in flight, post one. */
			if (!rbi.inflight) {
				smp_mb(); /* RCU core before ->inflight = 1. */
				rbi.inflight = 1;
				call_rcu(&rbi.rcu, rcu_torture_boost_cb);
				if (jiffies - call_rcu_time >
					 test_boost_duration * HZ - HZ / 2) {
626
					VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
					n_rcu_torture_boost_failure++;
				}
				call_rcu_time = jiffies;
			}
			cond_resched();
			rcu_stutter_wait("rcu_torture_boost");
			if (kthread_should_stop() ||
			    fullstop != FULLSTOP_DONTSTOP)
				goto checkwait;
		}

		/*
		 * Set the start time of the next test interval.
		 * Yes, this is vulnerable to long delays, but such
		 * delays simply cause a false negative for the next
		 * interval.  Besides, we are running at RT priority,
		 * so delays should be relatively rare.
		 */
645 646
		while (oldstarttime == boost_starttime &&
		       !kthread_should_stop()) {
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
			if (mutex_trylock(&boost_mutex)) {
				boost_starttime = jiffies +
						  test_boost_interval * HZ;
				n_rcu_torture_boosts++;
				mutex_unlock(&boost_mutex);
				break;
			}
			schedule_timeout_uninterruptible(1);
		}

		/* Go do the stutter. */
checkwait:	rcu_stutter_wait("rcu_torture_boost");
	} while (!kthread_should_stop() && fullstop  == FULLSTOP_DONTSTOP);

	/* Clean up and exit. */
662
	VERBOSE_TOROUT_STRING("rcu_torture_boost task stopping");
663
	torture_shutdown_absorb("rcu_torture_boost");
664 665 666
	while (!kthread_should_stop() || rbi.inflight)
		schedule_timeout_uninterruptible(1);
	smp_mb(); /* order accesses to ->inflight before stack-frame death. */
667
	destroy_rcu_head_on_stack(&rbi.rcu);
668 669 670
	return 0;
}

671 672 673 674 675 676 677 678 679 680 681
/*
 * RCU torture force-quiescent-state kthread.  Repeatedly induces
 * bursts of calls to force_quiescent_state(), increasing the probability
 * of occurrence of some important types of race conditions.
 */
static int
rcu_torture_fqs(void *arg)
{
	unsigned long fqs_resume_time;
	int fqs_burst_remaining;

682
	VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
683 684
	do {
		fqs_resume_time = jiffies + fqs_stutter * HZ;
685 686
		while (ULONG_CMP_LT(jiffies, fqs_resume_time) &&
		       !kthread_should_stop()) {
687 688 689
			schedule_timeout_interruptible(1);
		}
		fqs_burst_remaining = fqs_duration;
690 691
		while (fqs_burst_remaining > 0 &&
		       !kthread_should_stop()) {
692 693 694 695 696 697
			cur_ops->fqs();
			udelay(fqs_holdoff);
			fqs_burst_remaining -= fqs_holdoff;
		}
		rcu_stutter_wait("rcu_torture_fqs");
	} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
698
	VERBOSE_TOROUT_STRING("rcu_torture_fqs task stopping");
699
	torture_shutdown_absorb("rcu_torture_fqs");
700 701 702 703 704
	while (!kthread_should_stop())
		schedule_timeout_uninterruptible(1);
	return 0;
}

705 706 707 708 709 710 711 712
/*
 * RCU torture writer kthread.  Repeatedly substitutes a new structure
 * for that pointed to by rcu_torture_current, freeing the old structure
 * after a series of grace periods (the "pipeline").
 */
static int
rcu_torture_writer(void *arg)
{
713
	bool exp;
714 715
	int i;
	struct rcu_torture *rp;
716
	struct rcu_torture *rp1;
717
	struct rcu_torture *old_rp;
718
	static DEFINE_TORTURE_RANDOM(rand);
719

720
	VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
721 722
	set_user_nice(current, 19);

723 724
	do {
		schedule_timeout_uninterruptible(1);
725 726
		rp = rcu_torture_alloc();
		if (rp == NULL)
727 728
			continue;
		rp->rtort_pipe_count = 0;
729
		udelay(torture_random(&rand) & 0x3ff);
730 731
		old_rp = rcu_dereference_check(rcu_torture_current,
					       current == writer_task);
732
		rp->rtort_mbtest = 1;
733
		rcu_assign_pointer(rcu_torture_current, rp);
734
		smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
735
		if (old_rp) {
736 737 738 739 740
			i = old_rp->rtort_pipe_count;
			if (i > RCU_TORTURE_PIPE_LEN)
				i = RCU_TORTURE_PIPE_LEN;
			atomic_inc(&rcu_torture_wcount[i]);
			old_rp->rtort_pipe_count++;
741
			if (gp_normal == gp_exp)
742
				exp = !!(torture_random(&rand) & 0x80);
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765
			else
				exp = gp_exp;
			if (!exp) {
				cur_ops->deferred_free(old_rp);
			} else {
				cur_ops->exp_sync();
				list_add(&old_rp->rtort_free,
					 &rcu_torture_removed);
				list_for_each_entry_safe(rp, rp1,
							 &rcu_torture_removed,
							 rtort_free) {
					i = rp->rtort_pipe_count;
					if (i > RCU_TORTURE_PIPE_LEN)
						i = RCU_TORTURE_PIPE_LEN;
					atomic_inc(&rcu_torture_wcount[i]);
					if (++rp->rtort_pipe_count >=
					    RCU_TORTURE_PIPE_LEN) {
						rp->rtort_mbtest = 0;
						list_del(&rp->rtort_free);
						rcu_torture_free(rp);
					}
				 }
			}
766
		}
767
		rcutorture_record_progress(++rcu_torture_current_version);
768 769
		rcu_stutter_wait("rcu_torture_writer");
	} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
770
	VERBOSE_TOROUT_STRING("rcu_torture_writer task stopping");
771
	torture_shutdown_absorb("rcu_torture_writer");
772
	while (!kthread_should_stop())
773 774 775 776
		schedule_timeout_uninterruptible(1);
	return 0;
}

777 778 779 780 781 782 783
/*
 * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
 * delay between calls.
 */
static int
rcu_torture_fakewriter(void *arg)
{
784
	DEFINE_TORTURE_RANDOM(rand);
785

786
	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
787 788 789
	set_user_nice(current, 19);

	do {
790 791
		schedule_timeout_uninterruptible(1 + torture_random(&rand)%10);
		udelay(torture_random(&rand) & 0x3ff);
792
		if (cur_ops->cb_barrier != NULL &&
793
		    torture_random(&rand) % (nfakewriters * 8) == 0) {
794
			cur_ops->cb_barrier();
795
		} else if (gp_normal == gp_exp) {
796
			if (torture_random(&rand) & 0x80)
797 798 799 800
				cur_ops->sync();
			else
				cur_ops->exp_sync();
		} else if (gp_normal) {
801
			cur_ops->sync();
802 803 804
		} else {
			cur_ops->exp_sync();
		}
805 806
		rcu_stutter_wait("rcu_torture_fakewriter");
	} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
807

808
	VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task stopping");
809
	torture_shutdown_absorb("rcu_torture_fakewriter");
810
	while (!kthread_should_stop())
811 812 813 814
		schedule_timeout_uninterruptible(1);
	return 0;
}

815 816 817 818 819 820 821 822 823 824 825
void rcutorture_trace_dump(void)
{
	static atomic_t beenhere = ATOMIC_INIT(0);

	if (atomic_read(&beenhere))
		return;
	if (atomic_xchg(&beenhere, 1) != 0)
		return;
	ftrace_dump(DUMP_ALL);
}

826 827 828 829 830 831 832 833 834 835
/*
 * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
 * incrementing the corresponding element of the pipeline array.  The
 * counter in the element should never be greater than 1, otherwise, the
 * RCU implementation is broken.
 */
static void rcu_torture_timer(unsigned long unused)
{
	int idx;
	int completed;
836
	int completed_end;
837
	static DEFINE_TORTURE_RANDOM(rand);
838 839 840
	static DEFINE_SPINLOCK(rand_lock);
	struct rcu_torture *p;
	int pipe_count;
841
	unsigned long long ts;
842 843 844

	idx = cur_ops->readlock();
	completed = cur_ops->completed();
845
	ts = rcu_trace_clock_local();
846 847 848 849
	p = rcu_dereference_check(rcu_torture_current,
				  rcu_read_lock_bh_held() ||
				  rcu_read_lock_sched_held() ||
				  srcu_read_lock_held(&srcu_ctl));
850 851 852 853 854 855 856 857
	if (p == NULL) {
		/* Leave because rcu_torture_writer is not yet underway */
		cur_ops->readunlock(idx);
		return;
	}
	if (p->rtort_mbtest == 0)
		atomic_inc(&n_rcu_torture_mberror);
	spin_lock(&rand_lock);
858
	cur_ops->read_delay(&rand);
859 860 861 862 863 864 865 866
	n_rcu_torture_timers++;
	spin_unlock(&rand_lock);
	preempt_disable();
	pipe_count = p->rtort_pipe_count;
	if (pipe_count > RCU_TORTURE_PIPE_LEN) {
		/* Should not happen, but... */
		pipe_count = RCU_TORTURE_PIPE_LEN;
	}
867 868 869 870
	completed_end = cur_ops->completed();
	if (pipe_count > 1) {
		do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
					  completed, completed_end);
871
		rcutorture_trace_dump();
872
	}
R
Rusty Russell 已提交
873
	__this_cpu_inc(rcu_torture_count[pipe_count]);
874
	completed = completed_end - completed;
875 876 877 878
	if (completed > RCU_TORTURE_PIPE_LEN) {
		/* Should not happen, but... */
		completed = RCU_TORTURE_PIPE_LEN;
	}
R
Rusty Russell 已提交
879
	__this_cpu_inc(rcu_torture_batch[completed]);
880 881 882 883
	preempt_enable();
	cur_ops->readunlock(idx);
}

884 885 886 887 888 889 890 891 892 893
/*
 * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
 * incrementing the corresponding element of the pipeline array.  The
 * counter in the element should never be greater than 1, otherwise, the
 * RCU implementation is broken.
 */
static int
rcu_torture_reader(void *arg)
{
	int completed;
894
	int completed_end;
895
	int idx;
896
	DEFINE_TORTURE_RANDOM(rand);
897 898
	struct rcu_torture *p;
	int pipe_count;
899
	struct timer_list t;
900
	unsigned long long ts;
901

902
	VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
903
	set_user_nice(current, 19);
904
	if (irqreader && cur_ops->irq_capable)
905
		setup_timer_on_stack(&t, rcu_torture_timer, 0);
906

907
	do {
908
		if (irqreader && cur_ops->irq_capable) {
909
			if (!timer_pending(&t))
910
				mod_timer(&t, jiffies + 1);
911
		}
912 913
		idx = cur_ops->readlock();
		completed = cur_ops->completed();
914
		ts = rcu_trace_clock_local();
915 916 917 918
		p = rcu_dereference_check(rcu_torture_current,
					  rcu_read_lock_bh_held() ||
					  rcu_read_lock_sched_held() ||
					  srcu_read_lock_held(&srcu_ctl));
919 920
		if (p == NULL) {
			/* Wait for rcu_torture_writer to get underway */
921
			cur_ops->readunlock(idx);
922 923 924
			schedule_timeout_interruptible(HZ);
			continue;
		}
925 926
		if (p->rtort_mbtest == 0)
			atomic_inc(&n_rcu_torture_mberror);
927
		cur_ops->read_delay(&rand);
928 929 930 931 932 933
		preempt_disable();
		pipe_count = p->rtort_pipe_count;
		if (pipe_count > RCU_TORTURE_PIPE_LEN) {
			/* Should not happen, but... */
			pipe_count = RCU_TORTURE_PIPE_LEN;
		}
934 935 936 937
		completed_end = cur_ops->completed();
		if (pipe_count > 1) {
			do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
						  ts, completed, completed_end);
938
			rcutorture_trace_dump();
939
		}
R
Rusty Russell 已提交
940
		__this_cpu_inc(rcu_torture_count[pipe_count]);
941
		completed = completed_end - completed;
942 943 944 945
		if (completed > RCU_TORTURE_PIPE_LEN) {
			/* Should not happen, but... */
			completed = RCU_TORTURE_PIPE_LEN;
		}
R
Rusty Russell 已提交
946
		__this_cpu_inc(rcu_torture_batch[completed]);
947
		preempt_enable();
948
		cur_ops->readunlock(idx);
949
		schedule();
950 951
		rcu_stutter_wait("rcu_torture_reader");
	} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
952
	VERBOSE_TOROUT_STRING("rcu_torture_reader task stopping");
953
	torture_shutdown_absorb("rcu_torture_reader");
954
	if (irqreader && cur_ops->irq_capable)
955
		del_timer_sync(&t);
956
	while (!kthread_should_stop())
957 958 959 960 961 962 963
		schedule_timeout_uninterruptible(1);
	return 0;
}

/*
 * Create an RCU-torture statistics message in the specified buffer.
 */
964
static void
965 966 967 968 969 970 971
rcu_torture_printk(char *page)
{
	int cpu;
	int i;
	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };

972
	for_each_possible_cpu(cpu) {
973 974 975 976 977 978 979 980 981
		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
			pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
			batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
		}
	}
	for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
		if (pipesummary[i] != 0)
			break;
	}
982 983
	page += sprintf(page, "%s%s ", torture_type, TORTURE_FLAG);
	page += sprintf(page,
984
		       "rtc: %p ver: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
985 986 987 988 989
		       rcu_torture_current,
		       rcu_torture_current_version,
		       list_empty(&rcu_torture_freelist),
		       atomic_read(&n_rcu_torture_alloc),
		       atomic_read(&n_rcu_torture_alloc_fail),
990
		       atomic_read(&n_rcu_torture_free));
991
	page += sprintf(page, "rtmbe: %d rtbke: %ld rtbre: %ld ",
992
		       atomic_read(&n_rcu_torture_mberror),
993
		       n_rcu_torture_boost_ktrerror,
994
		       n_rcu_torture_boost_rterror);
995
	page += sprintf(page, "rtbf: %ld rtb: %ld nt: %ld ",
996 997
		       n_rcu_torture_boost_failure,
		       n_rcu_torture_boosts,
998
		       n_rcu_torture_timers);
999
	page += sprintf(page,
1000 1001 1002 1003 1004 1005
		       "onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) ",
		       n_online_successes, n_online_attempts,
		       n_offline_successes, n_offline_attempts,
		       min_online, max_online,
		       min_offline, max_offline,
		       sum_online, sum_offline, HZ);
1006
	page += sprintf(page, "barrier: %ld/%ld:%ld",
1007 1008 1009
		       n_barrier_successes,
		       n_barrier_attempts,
		       n_rcu_torture_barrier_error);
1010
	page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
1011
	if (atomic_read(&n_rcu_torture_mberror) != 0 ||
1012
	    n_rcu_torture_barrier_error != 0 ||
1013 1014
	    n_rcu_torture_boost_ktrerror != 0 ||
	    n_rcu_torture_boost_rterror != 0 ||
1015 1016
	    n_rcu_torture_boost_failure != 0 ||
	    i > 1) {
1017
		page += sprintf(page, "!!! ");
1018
		atomic_inc(&n_rcu_torture_error);
1019
		WARN_ON_ONCE(1);
1020
	}
1021
	page += sprintf(page, "Reader Pipe: ");
1022
	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1023 1024 1025
		page += sprintf(page, " %ld", pipesummary[i]);
	page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
	page += sprintf(page, "Reader Batch: ");
1026
	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
1027 1028 1029
		page += sprintf(page, " %ld", batchsummary[i]);
	page += sprintf(page, "\n%s%s ", torture_type, TORTURE_FLAG);
	page += sprintf(page, "Free-Block Circulation: ");
1030
	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
1031
		page += sprintf(page, " %d",
1032 1033
			       atomic_read(&rcu_torture_wcount[i]));
	}
1034
	page += sprintf(page, "\n");
1035
	if (cur_ops->stats)
1036
		cur_ops->stats(page);
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
}

/*
 * Print torture statistics.  Caller must ensure that there is only
 * one call to this function at a given time!!!  This is normally
 * accomplished by relying on the module system to only have one copy
 * of the module loaded, and then by giving the rcu_torture_stats
 * kthread full control (or the init/cleanup functions when rcu_torture_stats
 * thread is not running).
 */
static void
rcu_torture_stats_print(void)
{
1050 1051
	int size = nr_cpu_ids * 200 + 8192;
	char *buf;
1052

1053 1054 1055 1056 1057 1058 1059 1060
	buf = kmalloc(size, GFP_KERNEL);
	if (!buf) {
		pr_err("rcu-torture: Out of memory, need: %d", size);
		return;
	}
	rcu_torture_printk(buf);
	pr_alert("%s", buf);
	kfree(buf);
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
}

/*
 * Periodically prints torture statistics, if periodic statistics printing
 * was specified via the stat_interval module parameter.
 *
 * No need to worry about fullstop here, since this one doesn't reference
 * volatile state or register callbacks.
 */
static int
rcu_torture_stats(void *arg)
{
1073
	VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
1074 1075 1076
	do {
		schedule_timeout_interruptible(stat_interval * HZ);
		rcu_torture_stats_print();
1077
		torture_shutdown_absorb("rcu_torture_stats");
1078
	} while (!kthread_should_stop());
1079
	VERBOSE_TOROUT_STRING("rcu_torture_stats task stopping");
1080 1081 1082
	return 0;
}

1083 1084 1085 1086 1087
static int rcu_idle_cpu;	/* Force all torture tasks off this CPU */

/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
 */
1088
static void rcu_torture_shuffle_tasks(void)
1089 1090 1091
{
	int i;

R
Rusty Russell 已提交
1092
	cpumask_setall(shuffle_tmp_mask);
1093
	get_online_cpus();
1094 1095

	/* No point in shuffling if there is only one online CPU (ex: UP) */
1096 1097 1098 1099
	if (num_online_cpus() == 1) {
		put_online_cpus();
		return;
	}
1100 1101

	if (rcu_idle_cpu != -1)
R
Rusty Russell 已提交
1102
		cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
1103

R
Rusty Russell 已提交
1104
	set_cpus_allowed_ptr(current, shuffle_tmp_mask);
1105

1106
	if (reader_tasks) {
1107 1108
		for (i = 0; i < nrealreaders; i++)
			if (reader_tasks[i])
1109
				set_cpus_allowed_ptr(reader_tasks[i],
R
Rusty Russell 已提交
1110
						     shuffle_tmp_mask);
1111
	}
1112
	if (fakewriter_tasks) {
1113 1114
		for (i = 0; i < nfakewriters; i++)
			if (fakewriter_tasks[i])
1115
				set_cpus_allowed_ptr(fakewriter_tasks[i],
R
Rusty Russell 已提交
1116
						     shuffle_tmp_mask);
1117
	}
1118
	if (writer_task)
R
Rusty Russell 已提交
1119
		set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
1120
	if (stats_task)
R
Rusty Russell 已提交
1121
		set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
	if (stutter_task)
		set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask);
	if (fqs_task)
		set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask);
	if (shutdown_task)
		set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask);
#ifdef CONFIG_HOTPLUG_CPU
	if (onoff_task)
		set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
	if (stall_task)
		set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask);
	if (barrier_cbs_tasks)
		for (i = 0; i < n_barrier_cbs; i++)
			if (barrier_cbs_tasks[i])
				set_cpus_allowed_ptr(barrier_cbs_tasks[i],
						     shuffle_tmp_mask);
	if (barrier_task)
		set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask);
1141 1142 1143 1144 1145 1146

	if (rcu_idle_cpu == -1)
		rcu_idle_cpu = num_online_cpus() - 1;
	else
		rcu_idle_cpu--;

1147
	put_online_cpus();
1148 1149 1150 1151 1152 1153 1154 1155 1156
}

/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
 * system to become idle at a time and cut off its timer ticks. This is meant
 * to test the support for such tickless idle CPU in RCU.
 */
static int
rcu_torture_shuffle(void *arg)
{
1157
	VERBOSE_TOROUT_STRING("rcu_torture_shuffle task started");
1158 1159 1160
	do {
		schedule_timeout_interruptible(shuffle_interval * HZ);
		rcu_torture_shuffle_tasks();
1161
		torture_shutdown_absorb("rcu_torture_shuffle");
1162
	} while (!kthread_should_stop());
1163
	VERBOSE_TOROUT_STRING("rcu_torture_shuffle task stopping");
1164 1165 1166
	return 0;
}

1167 1168 1169 1170 1171 1172
/* Cause the rcutorture test to "stutter", starting and stopping all
 * threads periodically.
 */
static int
rcu_torture_stutter(void *arg)
{
1173
	VERBOSE_TOROUT_STRING("rcu_torture_stutter task started");
1174 1175 1176
	do {
		schedule_timeout_interruptible(stutter * HZ);
		stutter_pause_test = 1;
1177
		if (!kthread_should_stop())
1178 1179
			schedule_timeout_interruptible(stutter * HZ);
		stutter_pause_test = 0;
1180
		torture_shutdown_absorb("rcu_torture_stutter");
1181
	} while (!kthread_should_stop());
1182
	VERBOSE_TOROUT_STRING("rcu_torture_stutter task stopping");
1183 1184 1185
	return 0;
}

1186
static inline void
1187
rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
1188
{
1189 1190 1191 1192 1193 1194 1195
	pr_alert("%s" TORTURE_FLAG
		 "--- %s: nreaders=%d nfakewriters=%d "
		 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
		 "shuffle_interval=%d stutter=%d irqreader=%d "
		 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
		 "test_boost=%d/%d test_boost_interval=%d "
		 "test_boost_duration=%d shutdown_secs=%d "
1196 1197
		 "stall_cpu=%d stall_cpu_holdoff=%d "
		 "n_barrier_cbs=%d "
1198 1199 1200 1201 1202 1203
		 "onoff_interval=%d onoff_holdoff=%d\n",
		 torture_type, tag, nrealreaders, nfakewriters,
		 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
		 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
		 test_boost, cur_ops->can_boost,
		 test_boost_interval, test_boost_duration, shutdown_secs,
1204 1205
		 stall_cpu, stall_cpu_holdoff,
		 n_barrier_cbs,
1206
		 onoff_interval, onoff_holdoff);
1207 1208
}

1209
static struct notifier_block rcutorture_shutdown_nb = {
1210 1211 1212
	.notifier_call = rcutorture_shutdown_notify,
};

1213 1214 1215 1216 1217 1218 1219
static void rcutorture_booster_cleanup(int cpu)
{
	struct task_struct *t;

	if (boost_tasks[cpu] == NULL)
		return;
	mutex_lock(&boost_mutex);
1220
	VERBOSE_TOROUT_STRING("Stopping rcu_torture_boost task");
1221 1222 1223 1224 1225 1226
	t = boost_tasks[cpu];
	boost_tasks[cpu] = NULL;
	mutex_unlock(&boost_mutex);

	/* This must be outside of the mutex, otherwise deadlock! */
	kthread_stop(t);
1227
	boost_tasks[cpu] = NULL;
1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
}

static int rcutorture_booster_init(int cpu)
{
	int retval;

	if (boost_tasks[cpu] != NULL)
		return 0;  /* Already created, nothing more to do. */

	/* Don't allow time recalculation while creating a new task. */
	mutex_lock(&boost_mutex);
1239
	VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
E
Eric Dumazet 已提交
1240 1241 1242
	boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
						  cpu_to_node(cpu),
						  "rcu_torture_boost");
1243 1244
	if (IS_ERR(boost_tasks[cpu])) {
		retval = PTR_ERR(boost_tasks[cpu]);
1245
		VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
		n_rcu_torture_boost_ktrerror++;
		boost_tasks[cpu] = NULL;
		mutex_unlock(&boost_mutex);
		return retval;
	}
	kthread_bind(boost_tasks[cpu], cpu);
	wake_up_process(boost_tasks[cpu]);
	mutex_unlock(&boost_mutex);
	return 0;
}

1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
/*
 * Cause the rcutorture test to shutdown the system after the test has
 * run for the time specified by the shutdown_secs module parameter.
 */
static int
rcu_torture_shutdown(void *arg)
{
	long delta;
	unsigned long jiffies_snap;

1267
	VERBOSE_TOROUT_STRING("rcu_torture_shutdown task started");
1268 1269 1270 1271 1272
	jiffies_snap = ACCESS_ONCE(jiffies);
	while (ULONG_CMP_LT(jiffies_snap, shutdown_time) &&
	       !kthread_should_stop()) {
		delta = shutdown_time - jiffies_snap;
		if (verbose)
1273 1274 1275
			pr_alert("%s" TORTURE_FLAG
				 "rcu_torture_shutdown task: %lu jiffies remaining\n",
				 torture_type, delta);
1276 1277 1278
		schedule_timeout_interruptible(delta);
		jiffies_snap = ACCESS_ONCE(jiffies);
	}
1279
	if (kthread_should_stop()) {
1280
		VERBOSE_TOROUT_STRING("rcu_torture_shutdown task stopping");
1281 1282 1283 1284 1285
		return 0;
	}

	/* OK, shut down the system. */

1286
	VERBOSE_TOROUT_STRING("rcu_torture_shutdown task shutting down system");
1287 1288 1289 1290 1291 1292
	shutdown_task = NULL;	/* Avoid self-kill deadlock. */
	rcu_torture_cleanup();	/* Get the success/failure message. */
	kernel_power_off();	/* Shut down the system. */
	return 0;
}

1293 1294 1295 1296 1297 1298
#ifdef CONFIG_HOTPLUG_CPU

/*
 * Execute random CPU-hotplug operations at the interval specified
 * by the onoff_interval.
 */
1299
static int
1300 1301 1302
rcu_torture_onoff(void *arg)
{
	int cpu;
1303
	unsigned long delta;
1304
	int maxcpu = -1;
1305
	DEFINE_TORTURE_RANDOM(rand);
1306
	int ret;
1307
	unsigned long starttime;
1308

1309
	VERBOSE_TOROUT_STRING("rcu_torture_onoff task started");
1310 1311 1312
	for_each_online_cpu(cpu)
		maxcpu = cpu;
	WARN_ON(maxcpu < 0);
1313
	if (onoff_holdoff > 0) {
1314
		VERBOSE_TOROUT_STRING("rcu_torture_onoff begin holdoff");
1315
		schedule_timeout_interruptible(onoff_holdoff * HZ);
1316
		VERBOSE_TOROUT_STRING("rcu_torture_onoff end holdoff");
1317
	}
1318
	while (!kthread_should_stop()) {
1319
		cpu = (torture_random(&rand) >> 4) % (maxcpu + 1);
1320
		if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) {
1321
			if (verbose)
1322 1323 1324
				pr_alert("%s" TORTURE_FLAG
					 "rcu_torture_onoff task: offlining %d\n",
					 torture_type, cpu);
1325
			starttime = jiffies;
1326
			n_offline_attempts++;
1327 1328 1329 1330 1331 1332 1333
			ret = cpu_down(cpu);
			if (ret) {
				if (verbose)
					pr_alert("%s" TORTURE_FLAG
						 "rcu_torture_onoff task: offline %d failed: errno %d\n",
						 torture_type, cpu, ret);
			} else {
1334
				if (verbose)
1335 1336 1337
					pr_alert("%s" TORTURE_FLAG
						 "rcu_torture_onoff task: offlined %d\n",
						 torture_type, cpu);
1338
				n_offline_successes++;
1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
				delta = jiffies - starttime;
				sum_offline += delta;
				if (min_offline < 0) {
					min_offline = delta;
					max_offline = delta;
				}
				if (min_offline > delta)
					min_offline = delta;
				if (max_offline < delta)
					max_offline = delta;
1349
			}
1350
		} else if (cpu_is_hotpluggable(cpu)) {
1351
			if (verbose)
1352 1353 1354
				pr_alert("%s" TORTURE_FLAG
					 "rcu_torture_onoff task: onlining %d\n",
					 torture_type, cpu);
1355
			starttime = jiffies;
1356
			n_online_attempts++;
1357 1358 1359 1360 1361 1362 1363
			ret = cpu_up(cpu);
			if (ret) {
				if (verbose)
					pr_alert("%s" TORTURE_FLAG
						 "rcu_torture_onoff task: online %d failed: errno %d\n",
						 torture_type, cpu, ret);
			} else {
1364
				if (verbose)
1365 1366 1367
					pr_alert("%s" TORTURE_FLAG
						 "rcu_torture_onoff task: onlined %d\n",
						 torture_type, cpu);
1368
				n_online_successes++;
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
				delta = jiffies - starttime;
				sum_online += delta;
				if (min_online < 0) {
					min_online = delta;
					max_online = delta;
				}
				if (min_online > delta)
					min_online = delta;
				if (max_online < delta)
					max_online = delta;
1379 1380 1381 1382
			}
		}
		schedule_timeout_interruptible(onoff_interval * HZ);
	}
1383
	VERBOSE_TOROUT_STRING("rcu_torture_onoff task stopping");
1384 1385 1386
	return 0;
}

1387
static int
1388 1389
rcu_torture_onoff_init(void)
{
1390 1391
	int ret;

1392 1393 1394 1395
	if (onoff_interval <= 0)
		return 0;
	onoff_task = kthread_run(rcu_torture_onoff, NULL, "rcu_torture_onoff");
	if (IS_ERR(onoff_task)) {
1396
		ret = PTR_ERR(onoff_task);
1397
		onoff_task = NULL;
1398
		return ret;
1399 1400 1401 1402 1403 1404 1405 1406
	}
	return 0;
}

static void rcu_torture_onoff_cleanup(void)
{
	if (onoff_task == NULL)
		return;
1407
	VERBOSE_TOROUT_STRING("Stopping rcu_torture_onoff task");
1408
	kthread_stop(onoff_task);
1409
	onoff_task = NULL;
1410 1411 1412 1413
}

#else /* #ifdef CONFIG_HOTPLUG_CPU */

1414
static int
1415 1416
rcu_torture_onoff_init(void)
{
1417
	return 0;
1418 1419 1420 1421 1422 1423 1424 1425
}

static void rcu_torture_onoff_cleanup(void)
{
}

#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */

1426 1427 1428 1429
/*
 * CPU-stall kthread.  It waits as specified by stall_cpu_holdoff, then
 * induces a CPU stall for the time specified by stall_cpu.
 */
1430
static int rcu_torture_stall(void *args)
1431 1432 1433
{
	unsigned long stop_at;

1434
	VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
1435
	if (stall_cpu_holdoff > 0) {
1436
		VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
1437
		schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
1438
		VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
1439 1440 1441 1442
	}
	if (!kthread_should_stop()) {
		stop_at = get_seconds() + stall_cpu;
		/* RCU CPU stall is expected behavior in following code. */
1443
		pr_alert("rcu_torture_stall start.\n");
1444 1445 1446 1447 1448 1449
		rcu_read_lock();
		preempt_disable();
		while (ULONG_CMP_LT(get_seconds(), stop_at))
			continue;  /* Induce RCU CPU stall warning. */
		preempt_enable();
		rcu_read_unlock();
1450
		pr_alert("rcu_torture_stall end.\n");
1451
	}
1452
	torture_shutdown_absorb("rcu_torture_stall");
1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
	while (!kthread_should_stop())
		schedule_timeout_interruptible(10 * HZ);
	return 0;
}

/* Spawn CPU-stall kthread, if stall_cpu specified. */
static int __init rcu_torture_stall_init(void)
{
	int ret;

	if (stall_cpu <= 0)
		return 0;
	stall_task = kthread_run(rcu_torture_stall, NULL, "rcu_torture_stall");
	if (IS_ERR(stall_task)) {
		ret = PTR_ERR(stall_task);
		stall_task = NULL;
		return ret;
	}
	return 0;
}

/* Clean up after the CPU-stall kthread, if one was spawned. */
static void rcu_torture_stall_cleanup(void)
{
	if (stall_task == NULL)
		return;
1479
	VERBOSE_TOROUT_STRING("Stopping rcu_torture_stall_task.");
1480
	kthread_stop(stall_task);
1481
	stall_task = NULL;
1482 1483
}

1484 1485 1486 1487 1488 1489 1490 1491 1492 1493
/* Callback function for RCU barrier testing. */
void rcu_torture_barrier_cbf(struct rcu_head *rcu)
{
	atomic_inc(&barrier_cbs_invoked);
}

/* kthread function to register callbacks used to test RCU barriers. */
static int rcu_torture_barrier_cbs(void *arg)
{
	long myid = (long)arg;
1494
	bool lastphase = 0;
1495
	bool newphase;
1496 1497 1498
	struct rcu_head rcu;

	init_rcu_head_on_stack(&rcu);
1499
	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
1500 1501 1502
	set_user_nice(current, 19);
	do {
		wait_event(barrier_cbs_wq[myid],
1503 1504
			   (newphase =
			    ACCESS_ONCE(barrier_phase)) != lastphase ||
1505 1506
			   kthread_should_stop() ||
			   fullstop != FULLSTOP_DONTSTOP);
1507
		lastphase = newphase;
1508
		smp_mb(); /* ensure barrier_phase load before ->call(). */
1509 1510 1511 1512 1513 1514
		if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
			break;
		cur_ops->call(&rcu, rcu_torture_barrier_cbf);
		if (atomic_dec_and_test(&barrier_cbs_count))
			wake_up(&barrier_wq);
	} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1515
	VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task stopping");
1516
	torture_shutdown_absorb("rcu_torture_barrier_cbs");
1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
	while (!kthread_should_stop())
		schedule_timeout_interruptible(1);
	cur_ops->cb_barrier();
	destroy_rcu_head_on_stack(&rcu);
	return 0;
}

/* kthread function to drive and coordinate RCU barrier testing. */
static int rcu_torture_barrier(void *arg)
{
	int i;

1529
	VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
1530 1531 1532
	do {
		atomic_set(&barrier_cbs_invoked, 0);
		atomic_set(&barrier_cbs_count, n_barrier_cbs);
1533 1534
		smp_mb(); /* Ensure barrier_phase after prior assignments. */
		barrier_phase = !barrier_phase;
1535 1536 1537 1538 1539 1540 1541 1542 1543
		for (i = 0; i < n_barrier_cbs; i++)
			wake_up(&barrier_cbs_wq[i]);
		wait_event(barrier_wq,
			   atomic_read(&barrier_cbs_count) == 0 ||
			   kthread_should_stop() ||
			   fullstop != FULLSTOP_DONTSTOP);
		if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP)
			break;
		n_barrier_attempts++;
1544
		cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
1545 1546 1547 1548 1549 1550 1551
		if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
			n_rcu_torture_barrier_error++;
			WARN_ON_ONCE(1);
		}
		n_barrier_successes++;
		schedule_timeout_interruptible(HZ / 10);
	} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
1552
	VERBOSE_TOROUT_STRING("rcu_torture_barrier task stopping");
1553
	torture_shutdown_absorb("rcu_torture_barrier");
1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
	while (!kthread_should_stop())
		schedule_timeout_interruptible(1);
	return 0;
}

/* Initialize RCU barrier testing. */
static int rcu_torture_barrier_init(void)
{
	int i;
	int ret;

	if (n_barrier_cbs == 0)
		return 0;
	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
1568 1569 1570 1571 1572 1573
		pr_alert("%s" TORTURE_FLAG
			 " Call or barrier ops missing for %s,\n",
			 torture_type, cur_ops->name);
		pr_alert("%s" TORTURE_FLAG
			 " RCU barrier testing omitted from run.\n",
			 torture_type);
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
		return 0;
	}
	atomic_set(&barrier_cbs_count, 0);
	atomic_set(&barrier_cbs_invoked, 0);
	barrier_cbs_tasks =
		kzalloc(n_barrier_cbs * sizeof(barrier_cbs_tasks[0]),
			GFP_KERNEL);
	barrier_cbs_wq =
		kzalloc(n_barrier_cbs * sizeof(barrier_cbs_wq[0]),
			GFP_KERNEL);
1584
	if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
1585 1586 1587 1588
		return -ENOMEM;
	for (i = 0; i < n_barrier_cbs; i++) {
		init_waitqueue_head(&barrier_cbs_wq[i]);
		barrier_cbs_tasks[i] = kthread_run(rcu_torture_barrier_cbs,
1589
						   (void *)(long)i,
1590 1591 1592
						   "rcu_torture_barrier_cbs");
		if (IS_ERR(barrier_cbs_tasks[i])) {
			ret = PTR_ERR(barrier_cbs_tasks[i]);
1593
			VERBOSE_TOROUT_ERRSTRING("Failed to create rcu_torture_barrier_cbs");
1594 1595 1596 1597 1598 1599 1600 1601
			barrier_cbs_tasks[i] = NULL;
			return ret;
		}
	}
	barrier_task = kthread_run(rcu_torture_barrier, NULL,
				   "rcu_torture_barrier");
	if (IS_ERR(barrier_task)) {
		ret = PTR_ERR(barrier_task);
1602
		VERBOSE_TOROUT_ERRSTRING("Failed to create rcu_torture_barrier");
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613
		barrier_task = NULL;
	}
	return 0;
}

/* Clean up after RCU barrier testing. */
static void rcu_torture_barrier_cleanup(void)
{
	int i;

	if (barrier_task != NULL) {
1614
		VERBOSE_TOROUT_STRING("Stopping rcu_torture_barrier task");
1615 1616 1617 1618 1619 1620
		kthread_stop(barrier_task);
		barrier_task = NULL;
	}
	if (barrier_cbs_tasks != NULL) {
		for (i = 0; i < n_barrier_cbs; i++) {
			if (barrier_cbs_tasks[i] != NULL) {
1621
				VERBOSE_TOROUT_STRING("Stopping rcu_torture_barrier_cbs task");
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
				kthread_stop(barrier_cbs_tasks[i]);
				barrier_cbs_tasks[i] = NULL;
			}
		}
		kfree(barrier_cbs_tasks);
		barrier_cbs_tasks = NULL;
	}
	if (barrier_cbs_wq != NULL) {
		kfree(barrier_cbs_wq);
		barrier_cbs_wq = NULL;
	}
}

1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
static int rcutorture_cpu_notify(struct notifier_block *self,
				 unsigned long action, void *hcpu)
{
	long cpu = (long)hcpu;

	switch (action) {
	case CPU_ONLINE:
	case CPU_DOWN_FAILED:
		(void)rcutorture_booster_init(cpu);
		break;
	case CPU_DOWN_PREPARE:
		rcutorture_booster_cleanup(cpu);
		break;
	default:
		break;
	}
	return NOTIFY_OK;
}

static struct notifier_block rcutorture_cpu_nb = {
	.notifier_call = rcutorture_cpu_notify,
};

1658 1659 1660 1661 1662
static void
rcu_torture_cleanup(void)
{
	int i;

1663
	mutex_lock(&fullstop_mutex);
1664
	rcutorture_record_test_transition();
1665
	if (fullstop == FULLSTOP_SHUTDOWN) {
1666
		pr_warn(/* but going down anyway, so... */
1667
		       "Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
1668
		mutex_unlock(&fullstop_mutex);
1669
		schedule_timeout_uninterruptible(10);
1670 1671 1672 1673
		if (cur_ops->cb_barrier != NULL)
			cur_ops->cb_barrier();
		return;
	}
1674
	fullstop = FULLSTOP_RMMOD;
1675
	mutex_unlock(&fullstop_mutex);
1676
	unregister_reboot_notifier(&rcutorture_shutdown_nb);
1677
	rcu_torture_barrier_cleanup();
1678
	rcu_torture_stall_cleanup();
1679
	if (stutter_task) {
1680
		VERBOSE_TOROUT_STRING("Stopping rcu_torture_stutter task");
1681 1682 1683
		kthread_stop(stutter_task);
	}
	stutter_task = NULL;
1684
	if (shuffler_task) {
1685
		VERBOSE_TOROUT_STRING("Stopping rcu_torture_shuffle task");
1686
		kthread_stop(shuffler_task);
R
Rusty Russell 已提交
1687
		free_cpumask_var(shuffle_tmp_mask);
1688 1689 1690
	}
	shuffler_task = NULL;

1691
	if (writer_task) {
1692
		VERBOSE_TOROUT_STRING("Stopping rcu_torture_writer task");
1693 1694 1695 1696
		kthread_stop(writer_task);
	}
	writer_task = NULL;

1697
	if (reader_tasks) {
1698
		for (i = 0; i < nrealreaders; i++) {
1699
			if (reader_tasks[i]) {
1700
				VERBOSE_TOROUT_STRING(
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710
					"Stopping rcu_torture_reader task");
				kthread_stop(reader_tasks[i]);
			}
			reader_tasks[i] = NULL;
		}
		kfree(reader_tasks);
		reader_tasks = NULL;
	}
	rcu_torture_current = NULL;

1711
	if (fakewriter_tasks) {
1712
		for (i = 0; i < nfakewriters; i++) {
1713
			if (fakewriter_tasks[i]) {
1714
				VERBOSE_TOROUT_STRING(
1715 1716 1717 1718 1719 1720 1721 1722 1723
					"Stopping rcu_torture_fakewriter task");
				kthread_stop(fakewriter_tasks[i]);
			}
			fakewriter_tasks[i] = NULL;
		}
		kfree(fakewriter_tasks);
		fakewriter_tasks = NULL;
	}

1724
	if (stats_task) {
1725
		VERBOSE_TOROUT_STRING("Stopping rcu_torture_stats task");
1726 1727 1728 1729
		kthread_stop(stats_task);
	}
	stats_task = NULL;

1730
	if (fqs_task) {
1731
		VERBOSE_TOROUT_STRING("Stopping rcu_torture_fqs task");
1732 1733 1734
		kthread_stop(fqs_task);
	}
	fqs_task = NULL;
1735 1736 1737 1738 1739 1740
	if ((test_boost == 1 && cur_ops->can_boost) ||
	    test_boost == 2) {
		unregister_cpu_notifier(&rcutorture_cpu_nb);
		for_each_possible_cpu(i)
			rcutorture_booster_cleanup(i);
	}
1741
	if (shutdown_task != NULL) {
1742
		VERBOSE_TOROUT_STRING("Stopping rcu_torture_shutdown task");
1743 1744
		kthread_stop(shutdown_task);
	}
1745
	shutdown_task = NULL;
1746
	rcu_torture_onoff_cleanup();
1747

1748
	/* Wait for all RCU callbacks to fire.  */
1749 1750 1751

	if (cur_ops->cb_barrier != NULL)
		cur_ops->cb_barrier();
1752 1753

	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
1754

1755
	if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
1756
		rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
1757 1758 1759 1760
	else if (n_online_successes != n_online_attempts ||
		 n_offline_successes != n_offline_attempts)
		rcu_torture_print_module_parms(cur_ops,
					       "End of test: RCU_HOTPLUG");
1761
	else
1762
		rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
1763 1764
}

1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
static void rcu_torture_leak_cb(struct rcu_head *rhp)
{
}

static void rcu_torture_err_cb(struct rcu_head *rhp)
{
	/*
	 * This -might- happen due to race conditions, but is unlikely.
	 * The scenario that leads to this happening is that the
	 * first of the pair of duplicate callbacks is queued,
	 * someone else starts a grace period that includes that
	 * callback, then the second of the pair must wait for the
	 * next grace period.  Unlikely, but can happen.  If it
	 * does happen, the debug-objects subsystem won't have splatted.
	 */
	pr_alert("rcutorture: duplicated callback was invoked.\n");
}
#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */

/*
 * Verify that double-free causes debug-objects to complain, but only
 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.  Otherwise, say that the test
 * cannot be carried out.
 */
static void rcu_test_debug_objects(void)
{
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
	struct rcu_head rh1;
	struct rcu_head rh2;

	init_rcu_head_on_stack(&rh1);
	init_rcu_head_on_stack(&rh2);
	pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n");

	/* Try to queue the rh2 pair of callbacks for the same grace period. */
	preempt_disable(); /* Prevent preemption from interrupting test. */
	rcu_read_lock(); /* Make it impossible to finish a grace period. */
	call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
	local_irq_disable(); /* Make it harder to start a new grace period. */
	call_rcu(&rh2, rcu_torture_leak_cb);
	call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
	local_irq_enable();
	rcu_read_unlock();
	preempt_enable();

	/* Wait for them all to get done so we can safely return. */
	rcu_barrier();
	pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n");
	destroy_rcu_head_on_stack(&rh1);
	destroy_rcu_head_on_stack(&rh2);
#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
	pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n");
#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
}

1821
static int __init
1822 1823 1824 1825 1826
rcu_torture_init(void)
{
	int i;
	int cpu;
	int firsterr = 0;
1827
	int retval;
1828 1829 1830
	static struct rcu_torture_ops *torture_ops[] = {
		&rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
	};
1831

1832 1833
	mutex_lock(&fullstop_mutex);

1834
	/* Process args and tell the world that the torturer is on the job. */
1835
	for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
1836
		cur_ops = torture_ops[i];
1837
		if (strcmp(torture_type, cur_ops->name) == 0)
1838 1839
			break;
	}
1840
	if (i == ARRAY_SIZE(torture_ops)) {
1841 1842 1843
		pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
			 torture_type);
		pr_alert("rcu-torture types:");
1844
		for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
1845 1846
			pr_alert(" %s", torture_ops[i]->name);
		pr_alert("\n");
1847
		mutex_unlock(&fullstop_mutex);
1848
		return -EINVAL;
1849
	}
1850
	if (cur_ops->fqs == NULL && fqs_duration != 0) {
1851
		pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
1852 1853
		fqs_duration = 0;
	}
1854
	if (cur_ops->init)
1855 1856
		cur_ops->init(); /* no "goto unwind" prior to this point!!! */

1857 1858 1859 1860
	if (nreaders >= 0)
		nrealreaders = nreaders;
	else
		nrealreaders = 2 * num_online_cpus();
1861
	rcu_torture_print_module_parms(cur_ops, "Start of test");
1862
	fullstop = FULLSTOP_DONTSTOP;
1863 1864 1865 1866

	/* Set up the freelist. */

	INIT_LIST_HEAD(&rcu_torture_freelist);
1867
	for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
1868
		rcu_tortures[i].rtort_mbtest = 0;
1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879
		list_add_tail(&rcu_tortures[i].rtort_free,
			      &rcu_torture_freelist);
	}

	/* Initialize the statistics so that each run gets its own numbers. */

	rcu_torture_current = NULL;
	rcu_torture_current_version = 0;
	atomic_set(&n_rcu_torture_alloc, 0);
	atomic_set(&n_rcu_torture_alloc_fail, 0);
	atomic_set(&n_rcu_torture_free, 0);
1880 1881
	atomic_set(&n_rcu_torture_mberror, 0);
	atomic_set(&n_rcu_torture_error, 0);
1882
	n_rcu_torture_barrier_error = 0;
1883 1884 1885 1886
	n_rcu_torture_boost_ktrerror = 0;
	n_rcu_torture_boost_rterror = 0;
	n_rcu_torture_boost_failure = 0;
	n_rcu_torture_boosts = 0;
1887 1888
	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
		atomic_set(&rcu_torture_wcount[i], 0);
1889
	for_each_possible_cpu(cpu) {
1890 1891 1892 1893 1894 1895 1896 1897
		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
			per_cpu(rcu_torture_count, cpu)[i] = 0;
			per_cpu(rcu_torture_batch, cpu)[i] = 0;
		}
	}

	/* Start up the kthreads. */

1898
	VERBOSE_TOROUT_STRING("Creating rcu_torture_writer task");
1899 1900
	writer_task = kthread_create(rcu_torture_writer, NULL,
				     "rcu_torture_writer");
1901 1902
	if (IS_ERR(writer_task)) {
		firsterr = PTR_ERR(writer_task);
1903
		VERBOSE_TOROUT_ERRSTRING("Failed to create writer");
1904 1905 1906
		writer_task = NULL;
		goto unwind;
	}
1907
	wake_up_process(writer_task);
1908
	fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
1909
				   GFP_KERNEL);
1910
	if (fakewriter_tasks == NULL) {
1911
		VERBOSE_TOROUT_ERRSTRING("out of memory");
1912 1913 1914 1915
		firsterr = -ENOMEM;
		goto unwind;
	}
	for (i = 0; i < nfakewriters; i++) {
1916
		VERBOSE_TOROUT_STRING("Creating rcu_torture_fakewriter task");
1917
		fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
1918
						  "rcu_torture_fakewriter");
1919 1920
		if (IS_ERR(fakewriter_tasks[i])) {
			firsterr = PTR_ERR(fakewriter_tasks[i]);
1921
			VERBOSE_TOROUT_ERRSTRING("Failed to create fakewriter");
1922 1923 1924 1925
			fakewriter_tasks[i] = NULL;
			goto unwind;
		}
	}
1926
	reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
1927 1928
			       GFP_KERNEL);
	if (reader_tasks == NULL) {
1929
		VERBOSE_TOROUT_ERRSTRING("out of memory");
1930 1931 1932 1933
		firsterr = -ENOMEM;
		goto unwind;
	}
	for (i = 0; i < nrealreaders; i++) {
1934
		VERBOSE_TOROUT_STRING("Creating rcu_torture_reader task");
1935 1936 1937 1938
		reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
					      "rcu_torture_reader");
		if (IS_ERR(reader_tasks[i])) {
			firsterr = PTR_ERR(reader_tasks[i]);
1939
			VERBOSE_TOROUT_ERRSTRING("Failed to create reader");
1940 1941 1942 1943 1944
			reader_tasks[i] = NULL;
			goto unwind;
		}
	}
	if (stat_interval > 0) {
1945
		VERBOSE_TOROUT_STRING("Creating rcu_torture_stats task");
1946 1947 1948 1949
		stats_task = kthread_run(rcu_torture_stats, NULL,
					"rcu_torture_stats");
		if (IS_ERR(stats_task)) {
			firsterr = PTR_ERR(stats_task);
1950
			VERBOSE_TOROUT_ERRSTRING("Failed to create stats");
1951 1952 1953 1954
			stats_task = NULL;
			goto unwind;
		}
	}
1955 1956
	if (test_no_idle_hz) {
		rcu_idle_cpu = num_online_cpus() - 1;
R
Rusty Russell 已提交
1957 1958 1959

		if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
			firsterr = -ENOMEM;
1960
			VERBOSE_TOROUT_ERRSTRING("Failed to alloc mask");
R
Rusty Russell 已提交
1961 1962 1963
			goto unwind;
		}

1964 1965 1966 1967
		/* Create the shuffler thread */
		shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
					  "rcu_torture_shuffle");
		if (IS_ERR(shuffler_task)) {
R
Rusty Russell 已提交
1968
			free_cpumask_var(shuffle_tmp_mask);
1969
			firsterr = PTR_ERR(shuffler_task);
1970
			VERBOSE_TOROUT_ERRSTRING("Failed to create shuffler");
1971 1972 1973 1974
			shuffler_task = NULL;
			goto unwind;
		}
	}
1975 1976 1977 1978 1979 1980 1981 1982
	if (stutter < 0)
		stutter = 0;
	if (stutter) {
		/* Create the stutter thread */
		stutter_task = kthread_run(rcu_torture_stutter, NULL,
					  "rcu_torture_stutter");
		if (IS_ERR(stutter_task)) {
			firsterr = PTR_ERR(stutter_task);
1983
			VERBOSE_TOROUT_ERRSTRING("Failed to create stutter");
1984 1985 1986 1987
			stutter_task = NULL;
			goto unwind;
		}
	}
1988 1989 1990 1991 1992 1993 1994 1995
	if (fqs_duration < 0)
		fqs_duration = 0;
	if (fqs_duration) {
		/* Create the stutter thread */
		fqs_task = kthread_run(rcu_torture_fqs, NULL,
				       "rcu_torture_fqs");
		if (IS_ERR(fqs_task)) {
			firsterr = PTR_ERR(fqs_task);
1996
			VERBOSE_TOROUT_ERRSTRING("Failed to create fqs");
1997 1998 1999 2000
			fqs_task = NULL;
			goto unwind;
		}
	}
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019
	if (test_boost_interval < 1)
		test_boost_interval = 1;
	if (test_boost_duration < 2)
		test_boost_duration = 2;
	if ((test_boost == 1 && cur_ops->can_boost) ||
	    test_boost == 2) {

		boost_starttime = jiffies + test_boost_interval * HZ;
		register_cpu_notifier(&rcutorture_cpu_nb);
		for_each_possible_cpu(i) {
			if (cpu_is_offline(i))
				continue;  /* Heuristic: CPU can go offline. */
			retval = rcutorture_booster_init(i);
			if (retval < 0) {
				firsterr = retval;
				goto unwind;
			}
		}
	}
2020 2021
	if (shutdown_secs > 0) {
		shutdown_time = jiffies + shutdown_secs * HZ;
2022 2023
		shutdown_task = kthread_create(rcu_torture_shutdown, NULL,
					       "rcu_torture_shutdown");
2024 2025
		if (IS_ERR(shutdown_task)) {
			firsterr = PTR_ERR(shutdown_task);
2026
			VERBOSE_TOROUT_ERRSTRING("Failed to create shutdown");
2027 2028 2029
			shutdown_task = NULL;
			goto unwind;
		}
2030
		wake_up_process(shutdown_task);
2031
	}
2032 2033 2034 2035 2036
	i = rcu_torture_onoff_init();
	if (i != 0) {
		firsterr = i;
		goto unwind;
	}
2037
	register_reboot_notifier(&rcutorture_shutdown_nb);
2038 2039 2040 2041 2042
	i = rcu_torture_stall_init();
	if (i != 0) {
		firsterr = i;
		goto unwind;
	}
2043 2044 2045 2046 2047
	retval = rcu_torture_barrier_init();
	if (retval != 0) {
		firsterr = retval;
		goto unwind;
	}
2048 2049
	if (object_debug)
		rcu_test_debug_objects();
2050
	rcutorture_record_test_transition();
2051
	mutex_unlock(&fullstop_mutex);
2052 2053 2054
	return 0;

unwind:
2055
	mutex_unlock(&fullstop_mutex);
2056 2057 2058 2059 2060 2061
	rcu_torture_cleanup();
	return firsterr;
}

module_init(rcu_torture_init);
module_exit(rcu_torture_cleanup);