rcutorture.c 26.8 KB
Newer Older
1
/*
2
 * Read-Copy Update module-based torture test facility
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 *
18
 * Copyright (C) IBM Corporation, 2005, 2006
19 20
 *
 * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21
 *          Josh Triplett <josh@freedesktop.org>
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
 *
 * See also:  Documentation/RCU/torture.txt
 */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/rcupdate.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <asm/atomic.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/moduleparam.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/random.h>
#include <linux/delay.h>
#include <linux/byteorder/swabb.h>
#include <linux/stat.h>
48
#include <linux/srcu.h>
49 50

MODULE_LICENSE("GPL");
51 52
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
              "Josh Triplett <josh@freedesktop.org>");
53

54
static int nreaders = -1;	/* # reader threads, defaults to 2*ncpus */
55
static int nfakewriters = 4;	/* # fake writer threads */
56
static int stat_interval;	/* Interval between stats, in seconds. */
57
				/*  Defaults to "only at end of test". */
58 59 60
static int verbose;		/* Print more debug info. */
static int test_no_idle_hz;	/* Test RCU's support for tickless idle CPUs. */
static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
61
static char *torture_type = "rcu"; /* What RCU implementation to torture. */
62

R
Rusty Russell 已提交
63
module_param(nreaders, int, 0);
64
MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
65 66
module_param(nfakewriters, int, 0);
MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
R
Rusty Russell 已提交
67
module_param(stat_interval, int, 0);
68
MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
R
Rusty Russell 已提交
69
module_param(verbose, bool, 0);
70
MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
R
Rusty Russell 已提交
71
module_param(test_no_idle_hz, bool, 0);
72
MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
R
Rusty Russell 已提交
73
module_param(shuffle_interval, int, 0);
74
MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
75
module_param(torture_type, charp, 0);
76
MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
77 78

#define TORTURE_FLAG "-torture:"
79
#define PRINTK_STRING(s) \
80
	do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
81
#define VERBOSE_PRINTK_STRING(s) \
82
	do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
83
#define VERBOSE_PRINTK_ERRSTRING(s) \
84
	do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
85 86 87 88 89

static char printk_buf[4096];

static int nrealreaders;
static struct task_struct *writer_task;
90
static struct task_struct **fakewriter_tasks;
91 92
static struct task_struct **reader_tasks;
static struct task_struct *stats_task;
93
static struct task_struct *shuffler_task;
94 95 96 97 98 99 100

#define RCU_TORTURE_PIPE_LEN 10

struct rcu_torture {
	struct rcu_head rtort_rcu;
	int rtort_pipe_count;
	struct list_head rtort_free;
101
	int rtort_mbtest;
102 103 104 105 106 107 108 109 110 111 112 113 114
};

static int fullstop = 0;	/* stop generating callbacks at test end. */
static LIST_HEAD(rcu_torture_freelist);
static struct rcu_torture *rcu_torture_current = NULL;
static long rcu_torture_current_version = 0;
static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
static DEFINE_SPINLOCK(rcu_torture_lock);
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
	{ 0 };
static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
	{ 0 };
static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
115 116 117 118 119
static atomic_t n_rcu_torture_alloc;
static atomic_t n_rcu_torture_alloc_fail;
static atomic_t n_rcu_torture_free;
static atomic_t n_rcu_torture_mberror;
static atomic_t n_rcu_torture_error;
120
static struct list_head rcu_torture_removed;
121 122 123 124

/*
 * Allocate an element from the rcu_tortures pool.
 */
A
Adrian Bunk 已提交
125
static struct rcu_torture *
126 127 128 129
rcu_torture_alloc(void)
{
	struct list_head *p;

130
	spin_lock_bh(&rcu_torture_lock);
131 132
	if (list_empty(&rcu_torture_freelist)) {
		atomic_inc(&n_rcu_torture_alloc_fail);
133
		spin_unlock_bh(&rcu_torture_lock);
134 135 136 137 138
		return NULL;
	}
	atomic_inc(&n_rcu_torture_alloc);
	p = rcu_torture_freelist.next;
	list_del_init(p);
139
	spin_unlock_bh(&rcu_torture_lock);
140 141 142 143 144 145 146 147 148 149
	return container_of(p, struct rcu_torture, rtort_free);
}

/*
 * Free an element to the rcu_tortures pool.
 */
static void
rcu_torture_free(struct rcu_torture *p)
{
	atomic_inc(&n_rcu_torture_free);
150
	spin_lock_bh(&rcu_torture_lock);
151
	list_add_tail(&p->rtort_free, &rcu_torture_freelist);
152
	spin_unlock_bh(&rcu_torture_lock);
153 154 155 156
}

struct rcu_random_state {
	unsigned long rrs_state;
157
	long rrs_count;
158 159 160 161 162 163 164 165 166 167 168 169
};

#define RCU_RANDOM_MULT 39916801  /* prime */
#define RCU_RANDOM_ADD	479001701 /* prime */
#define RCU_RANDOM_REFRESH 10000

#define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }

/*
 * Crude but fast random-number generator.  Uses a linear congruential
 * generator, with occasional help from get_random_bytes().
 */
170
static unsigned long
171 172 173 174 175 176 177 178 179 180 181 182 183
rcu_random(struct rcu_random_state *rrsp)
{
	long refresh;

	if (--rrsp->rrs_count < 0) {
		get_random_bytes(&refresh, sizeof(refresh));
		rrsp->rrs_state += refresh;
		rrsp->rrs_count = RCU_RANDOM_REFRESH;
	}
	rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
	return swahw32(rrsp->rrs_state);
}

184 185 186 187 188 189 190 191
/*
 * Operations vector for selecting different types of tests.
 */

struct rcu_torture_ops {
	void (*init)(void);
	void (*cleanup)(void);
	int (*readlock)(void);
192
	void (*readdelay)(struct rcu_random_state *rrsp);
193 194 195
	void (*readunlock)(int idx);
	int (*completed)(void);
	void (*deferredfree)(struct rcu_torture *p);
196
	void (*sync)(void);
197 198 199 200 201 202 203 204 205
	int (*stats)(char *page);
	char *name;
};
static struct rcu_torture_ops *cur_ops = NULL;

/*
 * Definitions for rcu torture testing.
 */

206
static int rcu_torture_read_lock(void) __acquires(RCU)
207 208 209 210 211
{
	rcu_read_lock();
	return 0;
}

212 213 214 215 216 217 218 219 220 221 222 223
static void rcu_read_delay(struct rcu_random_state *rrsp)
{
	long delay;
	const long longdelay = 200;

	/* We want there to be long-running readers, but not all the time. */

	delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
	if (!delay)
		udelay(longdelay);
}

224
static void rcu_torture_read_unlock(int idx) __releases(RCU)
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
{
	rcu_read_unlock();
}

static int rcu_torture_completed(void)
{
	return rcu_batches_completed();
}

static void
rcu_torture_cb(struct rcu_head *p)
{
	int i;
	struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);

	if (fullstop) {
		/* Test is ending, just drop callbacks on the floor. */
		/* The next initialization will pick up the pieces. */
		return;
	}
	i = rp->rtort_pipe_count;
	if (i > RCU_TORTURE_PIPE_LEN)
		i = RCU_TORTURE_PIPE_LEN;
	atomic_inc(&rcu_torture_wcount[i]);
	if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
		rp->rtort_mbtest = 0;
		rcu_torture_free(rp);
	} else
		cur_ops->deferredfree(rp);
}

static void rcu_torture_deferred_free(struct rcu_torture *p)
{
	call_rcu(&p->rtort_rcu, rcu_torture_cb);
}

static struct rcu_torture_ops rcu_ops = {
	.init = NULL,
	.cleanup = NULL,
	.readlock = rcu_torture_read_lock,
265
	.readdelay = rcu_read_delay,
266 267 268
	.readunlock = rcu_torture_read_unlock,
	.completed = rcu_torture_completed,
	.deferredfree = rcu_torture_deferred_free,
269
	.sync = synchronize_rcu,
270 271 272 273
	.stats = NULL,
	.name = "rcu"
};

274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
{
	int i;
	struct rcu_torture *rp;
	struct rcu_torture *rp1;

	cur_ops->sync();
	list_add(&p->rtort_free, &rcu_torture_removed);
	list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
		i = rp->rtort_pipe_count;
		if (i > RCU_TORTURE_PIPE_LEN)
			i = RCU_TORTURE_PIPE_LEN;
		atomic_inc(&rcu_torture_wcount[i]);
		if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
			rp->rtort_mbtest = 0;
			list_del(&rp->rtort_free);
			rcu_torture_free(rp);
		}
	}
}

static void rcu_sync_torture_init(void)
{
	INIT_LIST_HEAD(&rcu_torture_removed);
}

300 301 302 303 304 305 306 307 308 309 310 311 312
static struct rcu_torture_ops rcu_sync_ops = {
	.init = rcu_sync_torture_init,
	.cleanup = NULL,
	.readlock = rcu_torture_read_lock,
	.readdelay = rcu_read_delay,
	.readunlock = rcu_torture_read_unlock,
	.completed = rcu_torture_completed,
	.deferredfree = rcu_sync_torture_deferred_free,
	.sync = synchronize_rcu,
	.stats = NULL,
	.name = "rcu_sync"
};

313 314 315 316
/*
 * Definitions for rcu_bh torture testing.
 */

317
static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
318 319 320 321 322
{
	rcu_read_lock_bh();
	return 0;
}

323
static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
324 325 326 327 328 329 330 331 332 333 334 335 336 337
{
	rcu_read_unlock_bh();
}

static int rcu_bh_torture_completed(void)
{
	return rcu_batches_completed_bh();
}

static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
{
	call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
}

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
struct rcu_bh_torture_synchronize {
	struct rcu_head head;
	struct completion completion;
};

static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
{
	struct rcu_bh_torture_synchronize *rcu;

	rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
	complete(&rcu->completion);
}

static void rcu_bh_torture_synchronize(void)
{
	struct rcu_bh_torture_synchronize rcu;

	init_completion(&rcu.completion);
	call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
	wait_for_completion(&rcu.completion);
}

360 361 362 363
static struct rcu_torture_ops rcu_bh_ops = {
	.init = NULL,
	.cleanup = NULL,
	.readlock = rcu_bh_torture_read_lock,
364
	.readdelay = rcu_read_delay,  /* just reuse rcu's version. */
365 366 367
	.readunlock = rcu_bh_torture_read_unlock,
	.completed = rcu_bh_torture_completed,
	.deferredfree = rcu_bh_torture_deferred_free,
368
	.sync = rcu_bh_torture_synchronize,
369 370 371 372
	.stats = NULL,
	.name = "rcu_bh"
};

373 374 375 376 377 378 379 380 381 382 383 384 385
static struct rcu_torture_ops rcu_bh_sync_ops = {
	.init = rcu_sync_torture_init,
	.cleanup = NULL,
	.readlock = rcu_bh_torture_read_lock,
	.readdelay = rcu_read_delay,  /* just reuse rcu's version. */
	.readunlock = rcu_bh_torture_read_unlock,
	.completed = rcu_bh_torture_completed,
	.deferredfree = rcu_sync_torture_deferred_free,
	.sync = rcu_bh_torture_synchronize,
	.stats = NULL,
	.name = "rcu_bh_sync"
};

386 387 388 389 390 391 392 393 394
/*
 * Definitions for srcu torture testing.
 */

static struct srcu_struct srcu_ctl;

static void srcu_torture_init(void)
{
	init_srcu_struct(&srcu_ctl);
395
	rcu_sync_torture_init();
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
}

static void srcu_torture_cleanup(void)
{
	synchronize_srcu(&srcu_ctl);
	cleanup_srcu_struct(&srcu_ctl);
}

static int srcu_torture_read_lock(void)
{
	return srcu_read_lock(&srcu_ctl);
}

static void srcu_read_delay(struct rcu_random_state *rrsp)
{
	long delay;
	const long uspertick = 1000000 / HZ;
	const long longdelay = 10;

	/* We want there to be long-running readers, but not all the time. */

	delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
	if (!delay)
		schedule_timeout_interruptible(longdelay);
}

static void srcu_torture_read_unlock(int idx)
{
	srcu_read_unlock(&srcu_ctl, idx);
}

static int srcu_torture_completed(void)
{
	return srcu_batches_completed(&srcu_ctl);
}

432 433 434 435 436
static void srcu_torture_synchronize(void)
{
	synchronize_srcu(&srcu_ctl);
}

437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
static int srcu_torture_stats(char *page)
{
	int cnt = 0;
	int cpu;
	int idx = srcu_ctl.completed & 0x1;

	cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
		       torture_type, TORTURE_FLAG, idx);
	for_each_possible_cpu(cpu) {
		cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
			       per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
			       per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
	}
	cnt += sprintf(&page[cnt], "\n");
	return cnt;
}

static struct rcu_torture_ops srcu_ops = {
	.init = srcu_torture_init,
	.cleanup = srcu_torture_cleanup,
	.readlock = srcu_torture_read_lock,
	.readdelay = srcu_read_delay,
	.readunlock = srcu_torture_read_unlock,
	.completed = srcu_torture_completed,
461
	.deferredfree = rcu_sync_torture_deferred_free,
462
	.sync = srcu_torture_synchronize,
463 464 465 466
	.stats = srcu_torture_stats,
	.name = "srcu"
};

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
/*
 * Definitions for sched torture testing.
 */

static int sched_torture_read_lock(void)
{
	preempt_disable();
	return 0;
}

static void sched_torture_read_unlock(int idx)
{
	preempt_enable();
}

static int sched_torture_completed(void)
{
	return 0;
}

static void sched_torture_synchronize(void)
{
	synchronize_sched();
}

static struct rcu_torture_ops sched_ops = {
	.init = rcu_sync_torture_init,
	.cleanup = NULL,
	.readlock = sched_torture_read_lock,
	.readdelay = rcu_read_delay,  /* just reuse rcu's version. */
	.readunlock = sched_torture_read_unlock,
	.completed = sched_torture_completed,
	.deferredfree = rcu_sync_torture_deferred_free,
	.sync = sched_torture_synchronize,
	.stats = NULL,
	.name = "sched"
};

505
static struct rcu_torture_ops *torture_ops[] =
506
	{ &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops, &srcu_ops,
507
	  &sched_ops, NULL };
508

509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
/*
 * RCU torture writer kthread.  Repeatedly substitutes a new structure
 * for that pointed to by rcu_torture_current, freeing the old structure
 * after a series of grace periods (the "pipeline").
 */
static int
rcu_torture_writer(void *arg)
{
	int i;
	long oldbatch = rcu_batches_completed();
	struct rcu_torture *rp;
	struct rcu_torture *old_rp;
	static DEFINE_RCU_RANDOM(rand);

	VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
524 525
	set_user_nice(current, 19);

526 527 528 529 530 531 532
	do {
		schedule_timeout_uninterruptible(1);
		if ((rp = rcu_torture_alloc()) == NULL)
			continue;
		rp->rtort_pipe_count = 0;
		udelay(rcu_random(&rand) & 0x3ff);
		old_rp = rcu_torture_current;
533
		rp->rtort_mbtest = 1;
534 535 536 537 538 539 540 541
		rcu_assign_pointer(rcu_torture_current, rp);
		smp_wmb();
		if (old_rp != NULL) {
			i = old_rp->rtort_pipe_count;
			if (i > RCU_TORTURE_PIPE_LEN)
				i = RCU_TORTURE_PIPE_LEN;
			atomic_inc(&rcu_torture_wcount[i]);
			old_rp->rtort_pipe_count++;
542
			cur_ops->deferredfree(old_rp);
543 544
		}
		rcu_torture_current_version++;
545
		oldbatch = cur_ops->completed();
546 547 548 549 550 551 552
	} while (!kthread_should_stop() && !fullstop);
	VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
	while (!kthread_should_stop())
		schedule_timeout_uninterruptible(1);
	return 0;
}

553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
/*
 * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
 * delay between calls.
 */
static int
rcu_torture_fakewriter(void *arg)
{
	DEFINE_RCU_RANDOM(rand);

	VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
	set_user_nice(current, 19);

	do {
		schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
		udelay(rcu_random(&rand) & 0x3ff);
		cur_ops->sync();
	} while (!kthread_should_stop() && !fullstop);

	VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
	while (!kthread_should_stop())
		schedule_timeout_uninterruptible(1);
	return 0;
}

577 578 579 580 581 582 583 584 585 586
/*
 * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
 * incrementing the corresponding element of the pipeline array.  The
 * counter in the element should never be greater than 1, otherwise, the
 * RCU implementation is broken.
 */
static int
rcu_torture_reader(void *arg)
{
	int completed;
587
	int idx;
588 589 590 591 592
	DEFINE_RCU_RANDOM(rand);
	struct rcu_torture *p;
	int pipe_count;

	VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
593 594
	set_user_nice(current, 19);

595
	do {
596 597
		idx = cur_ops->readlock();
		completed = cur_ops->completed();
598 599 600
		p = rcu_dereference(rcu_torture_current);
		if (p == NULL) {
			/* Wait for rcu_torture_writer to get underway */
601
			cur_ops->readunlock(idx);
602 603 604
			schedule_timeout_interruptible(HZ);
			continue;
		}
605 606
		if (p->rtort_mbtest == 0)
			atomic_inc(&n_rcu_torture_mberror);
607
		cur_ops->readdelay(&rand);
608 609 610 611 612 613 614
		preempt_disable();
		pipe_count = p->rtort_pipe_count;
		if (pipe_count > RCU_TORTURE_PIPE_LEN) {
			/* Should not happen, but... */
			pipe_count = RCU_TORTURE_PIPE_LEN;
		}
		++__get_cpu_var(rcu_torture_count)[pipe_count];
615
		completed = cur_ops->completed() - completed;
616 617 618 619 620 621
		if (completed > RCU_TORTURE_PIPE_LEN) {
			/* Should not happen, but... */
			completed = RCU_TORTURE_PIPE_LEN;
		}
		++__get_cpu_var(rcu_torture_batch)[completed];
		preempt_enable();
622
		cur_ops->readunlock(idx);
623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
		schedule();
	} while (!kthread_should_stop() && !fullstop);
	VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
	while (!kthread_should_stop())
		schedule_timeout_uninterruptible(1);
	return 0;
}

/*
 * Create an RCU-torture statistics message in the specified buffer.
 */
static int
rcu_torture_printk(char *page)
{
	int cnt = 0;
	int cpu;
	int i;
	long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
	long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };

643
	for_each_possible_cpu(cpu) {
644 645 646 647 648 649 650 651 652
		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
			pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
			batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
		}
	}
	for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
		if (pipesummary[i] != 0)
			break;
	}
653
	cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
654
	cnt += sprintf(&page[cnt],
655 656
		       "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
		       "rtmbe: %d",
657 658 659 660 661
		       rcu_torture_current,
		       rcu_torture_current_version,
		       list_empty(&rcu_torture_freelist),
		       atomic_read(&n_rcu_torture_alloc),
		       atomic_read(&n_rcu_torture_alloc_fail),
662 663 664 665
		       atomic_read(&n_rcu_torture_free),
		       atomic_read(&n_rcu_torture_mberror));
	if (atomic_read(&n_rcu_torture_mberror) != 0)
		cnt += sprintf(&page[cnt], " !!!");
666
	cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
667
	if (i > 1) {
668
		cnt += sprintf(&page[cnt], "!!! ");
669 670
		atomic_inc(&n_rcu_torture_error);
	}
671 672 673
	cnt += sprintf(&page[cnt], "Reader Pipe: ");
	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
		cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
674
	cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
675
	cnt += sprintf(&page[cnt], "Reader Batch: ");
676
	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
677
		cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
678
	cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
679 680 681 682 683 684
	cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
		cnt += sprintf(&page[cnt], " %d",
			       atomic_read(&rcu_torture_wcount[i]));
	}
	cnt += sprintf(&page[cnt], "\n");
685 686
	if (cur_ops->stats != NULL)
		cnt += cur_ops->stats(&page[cnt]);
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
	return cnt;
}

/*
 * Print torture statistics.  Caller must ensure that there is only
 * one call to this function at a given time!!!  This is normally
 * accomplished by relying on the module system to only have one copy
 * of the module loaded, and then by giving the rcu_torture_stats
 * kthread full control (or the init/cleanup functions when rcu_torture_stats
 * thread is not running).
 */
static void
rcu_torture_stats_print(void)
{
	int cnt;

	cnt = rcu_torture_printk(printk_buf);
	printk(KERN_ALERT "%s", printk_buf);
}

/*
 * Periodically prints torture statistics, if periodic statistics printing
 * was specified via the stat_interval module parameter.
 *
 * No need to worry about fullstop here, since this one doesn't reference
 * volatile state or register callbacks.
 */
static int
rcu_torture_stats(void *arg)
{
	VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
	do {
		schedule_timeout_interruptible(stat_interval * HZ);
		rcu_torture_stats_print();
	} while (!kthread_should_stop());
	VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
	return 0;
}

726 727 728 729 730
static int rcu_idle_cpu;	/* Force all torture tasks off this CPU */

/* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
 * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
 */
731
static void rcu_torture_shuffle_tasks(void)
732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754
{
	cpumask_t tmp_mask = CPU_MASK_ALL;
	int i;

	lock_cpu_hotplug();

	/* No point in shuffling if there is only one online CPU (ex: UP) */
	if (num_online_cpus() == 1) {
		unlock_cpu_hotplug();
		return;
	}

	if (rcu_idle_cpu != -1)
		cpu_clear(rcu_idle_cpu, tmp_mask);

	set_cpus_allowed(current, tmp_mask);

	if (reader_tasks != NULL) {
		for (i = 0; i < nrealreaders; i++)
			if (reader_tasks[i])
				set_cpus_allowed(reader_tasks[i], tmp_mask);
	}

755 756 757 758 759 760
	if (fakewriter_tasks != NULL) {
		for (i = 0; i < nfakewriters; i++)
			if (fakewriter_tasks[i])
				set_cpus_allowed(fakewriter_tasks[i], tmp_mask);
	}

761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790
	if (writer_task)
		set_cpus_allowed(writer_task, tmp_mask);

	if (stats_task)
		set_cpus_allowed(stats_task, tmp_mask);

	if (rcu_idle_cpu == -1)
		rcu_idle_cpu = num_online_cpus() - 1;
	else
		rcu_idle_cpu--;

	unlock_cpu_hotplug();
}

/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
 * system to become idle at a time and cut off its timer ticks. This is meant
 * to test the support for such tickless idle CPU in RCU.
 */
static int
rcu_torture_shuffle(void *arg)
{
	VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
	do {
		schedule_timeout_interruptible(shuffle_interval * HZ);
		rcu_torture_shuffle_tasks();
	} while (!kthread_should_stop());
	VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
	return 0;
}

791 792 793
static inline void
rcu_torture_print_module_parms(char *tag)
{
794 795
	printk(KERN_ALERT "%s" TORTURE_FLAG
		"--- %s: nreaders=%d nfakewriters=%d "
796 797
		"stat_interval=%d verbose=%d test_no_idle_hz=%d "
		"shuffle_interval = %d\n",
798 799
		torture_type, tag, nrealreaders, nfakewriters,
		stat_interval, verbose, test_no_idle_hz, shuffle_interval);
800 801
}

802 803 804 805 806 807
static void
rcu_torture_cleanup(void)
{
	int i;

	fullstop = 1;
808 809 810 811 812 813
	if (shuffler_task != NULL) {
		VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
		kthread_stop(shuffler_task);
	}
	shuffler_task = NULL;

814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
	if (writer_task != NULL) {
		VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
		kthread_stop(writer_task);
	}
	writer_task = NULL;

	if (reader_tasks != NULL) {
		for (i = 0; i < nrealreaders; i++) {
			if (reader_tasks[i] != NULL) {
				VERBOSE_PRINTK_STRING(
					"Stopping rcu_torture_reader task");
				kthread_stop(reader_tasks[i]);
			}
			reader_tasks[i] = NULL;
		}
		kfree(reader_tasks);
		reader_tasks = NULL;
	}
	rcu_torture_current = NULL;

834 835 836 837 838 839 840 841 842 843 844 845 846
	if (fakewriter_tasks != NULL) {
		for (i = 0; i < nfakewriters; i++) {
			if (fakewriter_tasks[i] != NULL) {
				VERBOSE_PRINTK_STRING(
					"Stopping rcu_torture_fakewriter task");
				kthread_stop(fakewriter_tasks[i]);
			}
			fakewriter_tasks[i] = NULL;
		}
		kfree(fakewriter_tasks);
		fakewriter_tasks = NULL;
	}

847 848 849 850 851 852 853
	if (stats_task != NULL) {
		VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
		kthread_stop(stats_task);
	}
	stats_task = NULL;

	/* Wait for all RCU callbacks to fire.  */
854
	rcu_barrier();
855 856

	rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
857 858 859

	if (cur_ops->cleanup != NULL)
		cur_ops->cleanup();
860 861 862 863
	if (atomic_read(&n_rcu_torture_error))
		rcu_torture_print_module_parms("End of test: FAILURE");
	else
		rcu_torture_print_module_parms("End of test: SUCCESS");
864 865 866 867 868 869 870 871 872 873 874
}

static int
rcu_torture_init(void)
{
	int i;
	int cpu;
	int firsterr = 0;

	/* Process args and tell the world that the torturer is on the job. */

875 876 877 878 879 880 881 882 883 884 885 886 887 888
	for (i = 0; cur_ops = torture_ops[i], cur_ops != NULL; i++) {
		cur_ops = torture_ops[i];
		if (strcmp(torture_type, cur_ops->name) == 0) {
			break;
		}
	}
	if (cur_ops == NULL) {
		printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
		       torture_type);
		return (-EINVAL);
	}
	if (cur_ops->init != NULL)
		cur_ops->init(); /* no "goto unwind" prior to this point!!! */

889 890 891 892
	if (nreaders >= 0)
		nrealreaders = nreaders;
	else
		nrealreaders = 2 * num_online_cpus();
893
	rcu_torture_print_module_parms("Start of test");
894 895 896 897 898 899
	fullstop = 0;

	/* Set up the freelist. */

	INIT_LIST_HEAD(&rcu_torture_freelist);
	for (i = 0; i < sizeof(rcu_tortures) / sizeof(rcu_tortures[0]); i++) {
900
		rcu_tortures[i].rtort_mbtest = 0;
901 902 903 904 905 906 907 908 909 910 911
		list_add_tail(&rcu_tortures[i].rtort_free,
			      &rcu_torture_freelist);
	}

	/* Initialize the statistics so that each run gets its own numbers. */

	rcu_torture_current = NULL;
	rcu_torture_current_version = 0;
	atomic_set(&n_rcu_torture_alloc, 0);
	atomic_set(&n_rcu_torture_alloc_fail, 0);
	atomic_set(&n_rcu_torture_free, 0);
912 913
	atomic_set(&n_rcu_torture_mberror, 0);
	atomic_set(&n_rcu_torture_error, 0);
914 915
	for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
		atomic_set(&rcu_torture_wcount[i], 0);
916
	for_each_possible_cpu(cpu) {
917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933
		for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
			per_cpu(rcu_torture_count, cpu)[i] = 0;
			per_cpu(rcu_torture_batch, cpu)[i] = 0;
		}
	}

	/* Start up the kthreads. */

	VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
	writer_task = kthread_run(rcu_torture_writer, NULL,
				  "rcu_torture_writer");
	if (IS_ERR(writer_task)) {
		firsterr = PTR_ERR(writer_task);
		VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
		writer_task = NULL;
		goto unwind;
	}
934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
	fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
	                           GFP_KERNEL);
	if (fakewriter_tasks == NULL) {
		VERBOSE_PRINTK_ERRSTRING("out of memory");
		firsterr = -ENOMEM;
		goto unwind;
	}
	for (i = 0; i < nfakewriters; i++) {
		VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
		fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
		                                  "rcu_torture_fakewriter");
		if (IS_ERR(fakewriter_tasks[i])) {
			firsterr = PTR_ERR(fakewriter_tasks[i]);
			VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
			fakewriter_tasks[i] = NULL;
			goto unwind;
		}
	}
952
	reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
			       GFP_KERNEL);
	if (reader_tasks == NULL) {
		VERBOSE_PRINTK_ERRSTRING("out of memory");
		firsterr = -ENOMEM;
		goto unwind;
	}
	for (i = 0; i < nrealreaders; i++) {
		VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
		reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
					      "rcu_torture_reader");
		if (IS_ERR(reader_tasks[i])) {
			firsterr = PTR_ERR(reader_tasks[i]);
			VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
			reader_tasks[i] = NULL;
			goto unwind;
		}
	}
	if (stat_interval > 0) {
		VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
		stats_task = kthread_run(rcu_torture_stats, NULL,
					"rcu_torture_stats");
		if (IS_ERR(stats_task)) {
			firsterr = PTR_ERR(stats_task);
			VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
			stats_task = NULL;
			goto unwind;
		}
	}
981 982 983 984 985 986 987 988 989 990 991 992
	if (test_no_idle_hz) {
		rcu_idle_cpu = num_online_cpus() - 1;
		/* Create the shuffler thread */
		shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
					  "rcu_torture_shuffle");
		if (IS_ERR(shuffler_task)) {
			firsterr = PTR_ERR(shuffler_task);
			VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
			shuffler_task = NULL;
			goto unwind;
		}
	}
993 994 995 996 997 998 999 1000 1001
	return 0;

unwind:
	rcu_torture_cleanup();
	return firsterr;
}

module_init(rcu_torture_init);
module_exit(rcu_torture_cleanup);