trace_selftest.c 25.6 KB
Newer Older
S
Steven Rostedt 已提交
1 2
/* Include in trace.c */

3
#include <linux/stringify.h>
S
Steven Rostedt 已提交
4
#include <linux/kthread.h>
I
Ingo Molnar 已提交
5
#include <linux/delay.h>
6
#include <linux/slab.h>
S
Steven Rostedt 已提交
7

I
Ingo Molnar 已提交
8
static inline int trace_valid_entry(struct trace_entry *entry)
S
Steven Rostedt 已提交
9 10 11 12
{
	switch (entry->type) {
	case TRACE_FN:
	case TRACE_CTX:
13
	case TRACE_WAKE:
14
	case TRACE_STACK:
15
	case TRACE_PRINT:
S
Steven Rostedt 已提交
16
	case TRACE_BRANCH:
17 18
	case TRACE_GRAPH_ENT:
	case TRACE_GRAPH_RET:
S
Steven Rostedt 已提交
19 20 21 22 23
		return 1;
	}
	return 0;
}

24
static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
S
Steven Rostedt 已提交
25
{
26 27
	struct ring_buffer_event *event;
	struct trace_entry *entry;
28
	unsigned int loops = 0;
S
Steven Rostedt 已提交
29

30
	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
31
		entry = ring_buffer_event_data(event);
S
Steven Rostedt 已提交
32

33 34 35 36 37 38 39 40 41
		/*
		 * The ring buffer is a size of trace_buf_size, if
		 * we loop more than the size, there's something wrong
		 * with the ring buffer.
		 */
		if (loops++ > trace_buf_size) {
			printk(KERN_CONT ".. bad ring buffer ");
			goto failed;
		}
42
		if (!trace_valid_entry(entry)) {
I
Ingo Molnar 已提交
43
			printk(KERN_CONT ".. invalid entry %d ",
44
				entry->type);
S
Steven Rostedt 已提交
45 46 47 48 49 50
			goto failed;
		}
	}
	return 0;

 failed:
51 52
	/* disable tracing */
	tracing_disabled = 1;
S
Steven Rostedt 已提交
53 54 55 56 57 58 59 60 61 62
	printk(KERN_CONT ".. corrupted trace buffer .. ");
	return -1;
}

/*
 * Test the trace buffer to see if all the elements
 * are still sane.
 */
static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
{
63 64
	unsigned long flags, cnt = 0;
	int cpu, ret = 0;
S
Steven Rostedt 已提交
65

66
	/* Don't allow flipping of max traces now */
67
	local_irq_save(flags);
68
	arch_spin_lock(&ftrace_max_lock);
S
Steven Rostedt 已提交
69

70
	cnt = ring_buffer_entries(tr->buffer);
S
Steven Rostedt 已提交
71

72 73 74 75 76 77 78 79
	/*
	 * The trace_test_buffer_cpu runs a while loop to consume all data.
	 * If the calling tracer is broken, and is constantly filling
	 * the buffer, this will run forever, and hard lock the box.
	 * We disable the ring buffer while we do this test to prevent
	 * a hard lock up.
	 */
	tracing_off();
80 81
	for_each_possible_cpu(cpu) {
		ret = trace_test_buffer_cpu(tr, cpu);
S
Steven Rostedt 已提交
82 83 84
		if (ret)
			break;
	}
85
	tracing_on();
86
	arch_spin_unlock(&ftrace_max_lock);
87
	local_irq_restore(flags);
S
Steven Rostedt 已提交
88 89 90 91 92 93 94

	if (count)
		*count = cnt;

	return ret;
}

95 96 97 98 99
static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
{
	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
		trace->name, init_ret);
}
100
#ifdef CONFIG_FUNCTION_TRACER
101 102 103

#ifdef CONFIG_DYNAMIC_FTRACE

104 105
static int trace_selftest_test_probe1_cnt;
static void trace_selftest_test_probe1_func(unsigned long ip,
106
					    unsigned long pip,
107 108
					    struct ftrace_ops *op,
					    struct pt_regs *pt_regs)
109 110 111 112 113 114
{
	trace_selftest_test_probe1_cnt++;
}

static int trace_selftest_test_probe2_cnt;
static void trace_selftest_test_probe2_func(unsigned long ip,
115
					    unsigned long pip,
116 117
					    struct ftrace_ops *op,
					    struct pt_regs *pt_regs)
118 119 120 121 122 123
{
	trace_selftest_test_probe2_cnt++;
}

static int trace_selftest_test_probe3_cnt;
static void trace_selftest_test_probe3_func(unsigned long ip,
124
					    unsigned long pip,
125 126
					    struct ftrace_ops *op,
					    struct pt_regs *pt_regs)
127 128 129 130 131 132
{
	trace_selftest_test_probe3_cnt++;
}

static int trace_selftest_test_global_cnt;
static void trace_selftest_test_global_func(unsigned long ip,
133
					    unsigned long pip,
134 135
					    struct ftrace_ops *op,
					    struct pt_regs *pt_regs)
136 137 138 139 140 141
{
	trace_selftest_test_global_cnt++;
}

static int trace_selftest_test_dyn_cnt;
static void trace_selftest_test_dyn_func(unsigned long ip,
142
					 unsigned long pip,
143 144
					 struct ftrace_ops *op,
					 struct pt_regs *pt_regs)
145 146 147 148 149 150
{
	trace_selftest_test_dyn_cnt++;
}

static struct ftrace_ops test_probe1 = {
	.func			= trace_selftest_test_probe1_func,
151
	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
152 153 154 155
};

static struct ftrace_ops test_probe2 = {
	.func			= trace_selftest_test_probe2_func,
156
	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
157 158 159 160
};

static struct ftrace_ops test_probe3 = {
	.func			= trace_selftest_test_probe3_func,
161
	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
162 163 164
};

static struct ftrace_ops test_global = {
165 166
	.func		= trace_selftest_test_global_func,
	.flags		= FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
};

static void print_counts(void)
{
	printk("(%d %d %d %d %d) ",
	       trace_selftest_test_probe1_cnt,
	       trace_selftest_test_probe2_cnt,
	       trace_selftest_test_probe3_cnt,
	       trace_selftest_test_global_cnt,
	       trace_selftest_test_dyn_cnt);
}

static void reset_counts(void)
{
	trace_selftest_test_probe1_cnt = 0;
	trace_selftest_test_probe2_cnt = 0;
	trace_selftest_test_probe3_cnt = 0;
	trace_selftest_test_global_cnt = 0;
	trace_selftest_test_dyn_cnt = 0;
}

static int trace_selftest_ops(int cnt)
{
	int save_ftrace_enabled = ftrace_enabled;
	struct ftrace_ops *dyn_ops;
	char *func1_name;
	char *func2_name;
	int len1;
	int len2;
	int ret = -1;

	printk(KERN_CONT "PASSED\n");
	pr_info("Testing dynamic ftrace ops #%d: ", cnt);

	ftrace_enabled = 1;
	reset_counts();

	/* Handle PPC64 '.' name */
	func1_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
	func2_name = "*" __stringify(DYN_FTRACE_TEST_NAME2);
	len1 = strlen(func1_name);
	len2 = strlen(func2_name);

	/*
	 * Probe 1 will trace function 1.
	 * Probe 2 will trace function 2.
	 * Probe 3 will trace functions 1 and 2.
	 */
	ftrace_set_filter(&test_probe1, func1_name, len1, 1);
	ftrace_set_filter(&test_probe2, func2_name, len2, 1);
	ftrace_set_filter(&test_probe3, func1_name, len1, 1);
	ftrace_set_filter(&test_probe3, func2_name, len2, 0);

	register_ftrace_function(&test_probe1);
	register_ftrace_function(&test_probe2);
	register_ftrace_function(&test_probe3);
	register_ftrace_function(&test_global);

	DYN_FTRACE_TEST_NAME();

	print_counts();

	if (trace_selftest_test_probe1_cnt != 1)
		goto out;
	if (trace_selftest_test_probe2_cnt != 0)
		goto out;
	if (trace_selftest_test_probe3_cnt != 1)
		goto out;
	if (trace_selftest_test_global_cnt == 0)
		goto out;

	DYN_FTRACE_TEST_NAME2();

	print_counts();

	if (trace_selftest_test_probe1_cnt != 1)
		goto out;
	if (trace_selftest_test_probe2_cnt != 1)
		goto out;
	if (trace_selftest_test_probe3_cnt != 2)
		goto out;

	/* Add a dynamic probe */
	dyn_ops = kzalloc(sizeof(*dyn_ops), GFP_KERNEL);
	if (!dyn_ops) {
		printk("MEMORY ERROR ");
		goto out;
	}

	dyn_ops->func = trace_selftest_test_dyn_func;

	register_ftrace_function(dyn_ops);

	trace_selftest_test_global_cnt = 0;

	DYN_FTRACE_TEST_NAME();

	print_counts();

	if (trace_selftest_test_probe1_cnt != 2)
		goto out_free;
	if (trace_selftest_test_probe2_cnt != 1)
		goto out_free;
	if (trace_selftest_test_probe3_cnt != 3)
		goto out_free;
	if (trace_selftest_test_global_cnt == 0)
		goto out;
	if (trace_selftest_test_dyn_cnt == 0)
		goto out_free;

	DYN_FTRACE_TEST_NAME2();

	print_counts();

	if (trace_selftest_test_probe1_cnt != 2)
		goto out_free;
	if (trace_selftest_test_probe2_cnt != 2)
		goto out_free;
	if (trace_selftest_test_probe3_cnt != 4)
		goto out_free;

	ret = 0;
 out_free:
	unregister_ftrace_function(dyn_ops);
	kfree(dyn_ops);

 out:
	/* Purposely unregister in the same order */
	unregister_ftrace_function(&test_probe1);
	unregister_ftrace_function(&test_probe2);
	unregister_ftrace_function(&test_probe3);
	unregister_ftrace_function(&test_global);

	/* Make sure everything is off */
	reset_counts();
	DYN_FTRACE_TEST_NAME();
	DYN_FTRACE_TEST_NAME();

	if (trace_selftest_test_probe1_cnt ||
	    trace_selftest_test_probe2_cnt ||
	    trace_selftest_test_probe3_cnt ||
	    trace_selftest_test_global_cnt ||
	    trace_selftest_test_dyn_cnt)
		ret = -1;

	ftrace_enabled = save_ftrace_enabled;

	return ret;
}

317 318 319 320 321 322
/* Test dynamic code modification and ftrace filters */
int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
					   struct trace_array *tr,
					   int (*func)(void))
{
	int save_ftrace_enabled = ftrace_enabled;
323
	unsigned long count;
S
Steven Rostedt 已提交
324
	char *func_name;
325
	int ret;
326 327 328 329 330 331 332 333 334 335 336

	/* The ftrace test PASSED */
	printk(KERN_CONT "PASSED\n");
	pr_info("Testing dynamic ftrace: ");

	/* enable tracing, and record the filter function */
	ftrace_enabled = 1;

	/* passed in by parameter to fool gcc from optimizing */
	func();

S
Steven Rostedt 已提交
337
	/*
338
	 * Some archs *cough*PowerPC*cough* add characters to the
S
Steven Rostedt 已提交
339
	 * start of the function names. We simply put a '*' to
340
	 * accommodate them.
S
Steven Rostedt 已提交
341
	 */
342
	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
S
Steven Rostedt 已提交
343

344
	/* filter only on our function */
345
	ftrace_set_global_filter(func_name, strlen(func_name), 1);
346 347

	/* enable tracing */
348
	ret = tracer_init(trace, tr);
349 350 351 352
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		goto out;
	}
353

354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
	/* Sleep for a 1/10 of a second */
	msleep(100);

	/* we should have nothing in the buffer */
	ret = trace_test_buffer(tr, &count);
	if (ret)
		goto out;

	if (count) {
		ret = -1;
		printk(KERN_CONT ".. filter did not filter .. ");
		goto out;
	}

	/* call our function again */
	func();

	/* sleep again */
	msleep(100);

	/* stop the tracing. */
S
Steven Rostedt 已提交
375
	tracing_stop();
376 377 378 379
	ftrace_enabled = 0;

	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
S
Steven Rostedt 已提交
380
	tracing_start();
381 382 383

	/* we should only have one item */
	if (!ret && count != 1) {
384
		trace->reset(tr);
385
		printk(KERN_CONT ".. filter failed count=%ld ..", count);
386 387 388
		ret = -1;
		goto out;
	}
S
Steven Rostedt 已提交
389

390 391 392 393
	/* Test the ops with global tracing running */
	ret = trace_selftest_ops(1);
	trace->reset(tr);

394 395 396 397
 out:
	ftrace_enabled = save_ftrace_enabled;

	/* Enable tracing on all functions again */
398
	ftrace_set_global_filter(NULL, 0, 1);
399

400 401 402 403
	/* Test the ops with global tracing off */
	if (!ret)
		ret = trace_selftest_ops(2);

404 405
	return ret;
}
406 407 408 409 410 411 412 413 414 415 416 417

static int trace_selftest_recursion_cnt;
static void trace_selftest_test_recursion_func(unsigned long ip,
					       unsigned long pip,
					       struct ftrace_ops *op,
					       struct pt_regs *pt_regs)
{
	/*
	 * This function is registered without the recursion safe flag.
	 * The ftrace infrastructure should provide the recursion
	 * protection. If not, this will crash the kernel!
	 */
418 419
	if (trace_selftest_recursion_cnt++ > 10)
		return;
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
	DYN_FTRACE_TEST_NAME();
}

static void trace_selftest_test_recursion_safe_func(unsigned long ip,
						    unsigned long pip,
						    struct ftrace_ops *op,
						    struct pt_regs *pt_regs)
{
	/*
	 * We said we would provide our own recursion. By calling
	 * this function again, we should recurse back into this function
	 * and count again. But this only happens if the arch supports
	 * all of ftrace features and nothing else is using the function
	 * tracing utility.
	 */
	if (trace_selftest_recursion_cnt++)
		return;
	DYN_FTRACE_TEST_NAME();
}

static struct ftrace_ops test_rec_probe = {
	.func			= trace_selftest_test_recursion_func,
};

static struct ftrace_ops test_recsafe_probe = {
	.func			= trace_selftest_test_recursion_safe_func,
	.flags			= FTRACE_OPS_FL_RECURSION_SAFE,
};

static int
trace_selftest_function_recursion(void)
{
	int save_ftrace_enabled = ftrace_enabled;
	char *func_name;
	int len;
	int ret;

	/* The previous test PASSED */
	pr_cont("PASSED\n");
	pr_info("Testing ftrace recursion: ");


	/* enable tracing, and record the filter function */
	ftrace_enabled = 1;

	/* Handle PPC64 '.' name */
	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
	len = strlen(func_name);

	ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
	if (ret) {
		pr_cont("*Could not set filter* ");
		goto out;
	}

	ret = register_ftrace_function(&test_rec_probe);
	if (ret) {
		pr_cont("*could not register callback* ");
		goto out;
	}

	DYN_FTRACE_TEST_NAME();

	unregister_ftrace_function(&test_rec_probe);

	ret = -1;
	if (trace_selftest_recursion_cnt != 1) {
		pr_cont("*callback not called once (%d)* ",
			trace_selftest_recursion_cnt);
		goto out;
	}

	trace_selftest_recursion_cnt = 1;

	pr_cont("PASSED\n");
	pr_info("Testing ftrace recursion safe: ");

	ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
	if (ret) {
		pr_cont("*Could not set filter* ");
		goto out;
	}

	ret = register_ftrace_function(&test_recsafe_probe);
	if (ret) {
		pr_cont("*could not register callback* ");
		goto out;
	}

	DYN_FTRACE_TEST_NAME();

	unregister_ftrace_function(&test_recsafe_probe);

	ret = -1;
514 515 516
	if (trace_selftest_recursion_cnt != 2) {
		pr_cont("*callback not called expected 2 times (%d)* ",
			trace_selftest_recursion_cnt);
517 518 519 520 521 522 523 524 525
		goto out;
	}

	ret = 0;
out:
	ftrace_enabled = save_ftrace_enabled;

	return ret;
}
526 527
#else
# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
528
# define trace_selftest_function_recursion() ({ 0; })
529
#endif /* CONFIG_DYNAMIC_FTRACE */
I
Ingo Molnar 已提交
530

531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
static enum {
	TRACE_SELFTEST_REGS_START,
	TRACE_SELFTEST_REGS_FOUND,
	TRACE_SELFTEST_REGS_NOT_FOUND,
} trace_selftest_regs_stat;

static void trace_selftest_test_regs_func(unsigned long ip,
					  unsigned long pip,
					  struct ftrace_ops *op,
					  struct pt_regs *pt_regs)
{
	if (pt_regs)
		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
	else
		trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
}

static struct ftrace_ops test_regs_probe = {
	.func		= trace_selftest_test_regs_func,
	.flags		= FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
};

static int
trace_selftest_function_regs(void)
{
	int save_ftrace_enabled = ftrace_enabled;
	char *func_name;
	int len;
	int ret;
	int supported = 0;

562
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
	supported = 1;
#endif

	/* The previous test PASSED */
	pr_cont("PASSED\n");
	pr_info("Testing ftrace regs%s: ",
		!supported ? "(no arch support)" : "");

	/* enable tracing, and record the filter function */
	ftrace_enabled = 1;

	/* Handle PPC64 '.' name */
	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
	len = strlen(func_name);

	ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
	/*
	 * If DYNAMIC_FTRACE is not set, then we just trace all functions.
	 * This test really doesn't care.
	 */
	if (ret && ret != -ENODEV) {
		pr_cont("*Could not set filter* ");
		goto out;
	}

	ret = register_ftrace_function(&test_regs_probe);
	/*
	 * Now if the arch does not support passing regs, then this should
	 * have failed.
	 */
	if (!supported) {
		if (!ret) {
			pr_cont("*registered save-regs without arch support* ");
			goto out;
		}
		test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
		ret = register_ftrace_function(&test_regs_probe);
	}
	if (ret) {
		pr_cont("*could not register callback* ");
		goto out;
	}


	DYN_FTRACE_TEST_NAME();

	unregister_ftrace_function(&test_regs_probe);

	ret = -1;

	switch (trace_selftest_regs_stat) {
	case TRACE_SELFTEST_REGS_START:
		pr_cont("*callback never called* ");
		goto out;

	case TRACE_SELFTEST_REGS_FOUND:
		if (supported)
			break;
		pr_cont("*callback received regs without arch support* ");
		goto out;

	case TRACE_SELFTEST_REGS_NOT_FOUND:
		if (!supported)
			break;
		pr_cont("*callback received NULL regs* ");
		goto out;
	}

	ret = 0;
out:
	ftrace_enabled = save_ftrace_enabled;

	return ret;
}

S
Steven Rostedt 已提交
638 639 640 641 642 643 644 645
/*
 * Simple verification test of ftrace function tracer.
 * Enable ftrace, sleep 1/10 second, and then read the trace
 * buffer to see if all is in order.
 */
int
trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
{
646
	int save_ftrace_enabled = ftrace_enabled;
647 648
	unsigned long count;
	int ret;
S
Steven Rostedt 已提交
649

650 651 652
	/* make sure msleep has been recorded */
	msleep(1);

S
Steven Rostedt 已提交
653
	/* start the tracing */
I
Ingo Molnar 已提交
654 655
	ftrace_enabled = 1;

656
	ret = tracer_init(trace, tr);
657 658 659 660 661
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		goto out;
	}

S
Steven Rostedt 已提交
662 663 664
	/* Sleep for a 1/10 of a second */
	msleep(100);
	/* stop the tracing. */
S
Steven Rostedt 已提交
665
	tracing_stop();
I
Ingo Molnar 已提交
666 667
	ftrace_enabled = 0;

S
Steven Rostedt 已提交
668 669 670
	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
S
Steven Rostedt 已提交
671
	tracing_start();
S
Steven Rostedt 已提交
672 673 674 675

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
676
		goto out;
S
Steven Rostedt 已提交
677 678
	}

679 680
	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
						     DYN_FTRACE_TEST_NAME);
681 682
	if (ret)
		goto out;
683

684
	ret = trace_selftest_function_recursion();
685 686 687 688
	if (ret)
		goto out;

	ret = trace_selftest_function_regs();
689 690 691
 out:
	ftrace_enabled = save_ftrace_enabled;

692 693 694 695
	/* kill ftrace totally if we failed */
	if (ret)
		ftrace_kill();

S
Steven Rostedt 已提交
696 697
	return ret;
}
698
#endif /* CONFIG_FUNCTION_TRACER */
S
Steven Rostedt 已提交
699

700 701

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
702 703 704 705

/* Maximum number of functions to trace before diagnosing a hang */
#define GRAPH_MAX_FUNC_TEST	100000000

706 707
static void
__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
708 709 710 711 712 713 714 715 716 717
static unsigned int graph_hang_thresh;

/* Wrap the real function entry probe to avoid possible hanging */
static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
{
	/* This is harmlessly racy, we want to approximately detect a hang */
	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
		ftrace_graph_stop();
		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
		if (ftrace_dump_on_oops)
718
			__ftrace_dump(false, DUMP_ALL);
719 720 721 722 723 724
		return 0;
	}

	return trace_graph_entry(trace);
}

725 726 727 728 729 730 731 732 733 734 735
/*
 * Pretty much the same than for the function tracer from which the selftest
 * has been borrowed.
 */
int
trace_selftest_startup_function_graph(struct tracer *trace,
					struct trace_array *tr)
{
	int ret;
	unsigned long count;

736 737 738 739 740
	/*
	 * Simulate the init() callback but we attach a watchdog callback
	 * to detect and recover from possible hangs
	 */
	tracing_reset_online_cpus(tr);
741
	set_graph_array(tr);
742 743
	ret = register_ftrace_graph(&trace_graph_return,
				    &trace_graph_entry_watchdog);
744 745 746 747
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		goto out;
	}
748
	tracing_start_cmdline_record();
749 750 751 752

	/* Sleep for a 1/10 of a second */
	msleep(100);

753 754
	/* Have we just recovered from a hang? */
	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
755
		tracing_selftest_disabled = true;
756 757 758 759
		ret = -1;
		goto out;
	}

760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785
	tracing_stop();

	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);

	trace->reset(tr);
	tracing_start();

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
		goto out;
	}

	/* Don't test dynamic tracing, the function tracer already did */

out:
	/* Stop it if we failed */
	if (ret)
		ftrace_graph_stop();

	return ret;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */


S
Steven Rostedt 已提交
786 787 788 789 790 791 792 793 794
#ifdef CONFIG_IRQSOFF_TRACER
int
trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	unsigned long count;
	int ret;

	/* start the tracing */
795
	ret = tracer_init(trace, tr);
796 797 798 799 800
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

S
Steven Rostedt 已提交
801 802 803 804 805 806
	/* reset the max latency */
	tracing_max_latency = 0;
	/* disable interrupts for a bit */
	local_irq_disable();
	udelay(100);
	local_irq_enable();
807 808 809 810 811 812 813 814

	/*
	 * Stop the tracer to avoid a warning subsequent
	 * to buffer flipping failure because tracing_stop()
	 * disables the tr and max buffers, making flipping impossible
	 * in case of parallels max irqs off latencies.
	 */
	trace->stop(tr);
S
Steven Rostedt 已提交
815
	/* stop the tracing. */
S
Steven Rostedt 已提交
816
	tracing_stop();
S
Steven Rostedt 已提交
817 818 819 820 821
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);
	trace->reset(tr);
S
Steven Rostedt 已提交
822
	tracing_start();
S
Steven Rostedt 已提交
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	tracing_max_latency = save_max;

	return ret;
}
#endif /* CONFIG_IRQSOFF_TRACER */

#ifdef CONFIG_PREEMPT_TRACER
int
trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	unsigned long count;
	int ret;

843 844 845 846 847 848 849 850 851 852 853 854 855
	/*
	 * Now that the big kernel lock is no longer preemptable,
	 * and this is called with the BKL held, it will always
	 * fail. If preemption is already disabled, simply
	 * pass the test. When the BKL is removed, or becomes
	 * preemptible again, we will once again test this,
	 * so keep it in.
	 */
	if (preempt_count()) {
		printk(KERN_CONT "can not test ... force ");
		return 0;
	}

S
Steven Rostedt 已提交
856
	/* start the tracing */
857
	ret = tracer_init(trace, tr);
858 859 860 861 862
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

S
Steven Rostedt 已提交
863 864 865 866 867 868
	/* reset the max latency */
	tracing_max_latency = 0;
	/* disable preemption for a bit */
	preempt_disable();
	udelay(100);
	preempt_enable();
869 870 871 872 873 874 875 876

	/*
	 * Stop the tracer to avoid a warning subsequent
	 * to buffer flipping failure because tracing_stop()
	 * disables the tr and max buffers, making flipping impossible
	 * in case of parallels max preempt off latencies.
	 */
	trace->stop(tr);
S
Steven Rostedt 已提交
877
	/* stop the tracing. */
S
Steven Rostedt 已提交
878
	tracing_stop();
S
Steven Rostedt 已提交
879 880 881 882 883
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);
	trace->reset(tr);
S
Steven Rostedt 已提交
884
	tracing_start();
S
Steven Rostedt 已提交
885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	tracing_max_latency = save_max;

	return ret;
}
#endif /* CONFIG_PREEMPT_TRACER */

#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
int
trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	unsigned long count;
	int ret;

905 906 907 908 909 910 911 912 913 914 915 916 917
	/*
	 * Now that the big kernel lock is no longer preemptable,
	 * and this is called with the BKL held, it will always
	 * fail. If preemption is already disabled, simply
	 * pass the test. When the BKL is removed, or becomes
	 * preemptible again, we will once again test this,
	 * so keep it in.
	 */
	if (preempt_count()) {
		printk(KERN_CONT "can not test ... force ");
		return 0;
	}

S
Steven Rostedt 已提交
918
	/* start the tracing */
919
	ret = tracer_init(trace, tr);
920 921
	if (ret) {
		warn_failed_init_tracer(trace, ret);
922
		goto out_no_start;
923
	}
S
Steven Rostedt 已提交
924 925 926 927 928 929 930 931 932 933 934 935

	/* reset the max latency */
	tracing_max_latency = 0;

	/* disable preemption and interrupts for a bit */
	preempt_disable();
	local_irq_disable();
	udelay(100);
	preempt_enable();
	/* reverse the order of preempt vs irqs */
	local_irq_enable();

936 937 938 939 940 941 942
	/*
	 * Stop the tracer to avoid a warning subsequent
	 * to buffer flipping failure because tracing_stop()
	 * disables the tr and max buffers, making flipping impossible
	 * in case of parallels max irqs/preempt off latencies.
	 */
	trace->stop(tr);
S
Steven Rostedt 已提交
943
	/* stop the tracing. */
S
Steven Rostedt 已提交
944
	tracing_stop();
S
Steven Rostedt 已提交
945 946
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
947
	if (ret)
S
Steven Rostedt 已提交
948 949 950
		goto out;

	ret = trace_test_buffer(&max_tr, &count);
951
	if (ret)
S
Steven Rostedt 已提交
952 953 954 955 956 957 958 959 960 961
		goto out;

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
		goto out;
	}

	/* do the test by disabling interrupts first this time */
	tracing_max_latency = 0;
S
Steven Rostedt 已提交
962
	tracing_start();
963 964
	trace->start(tr);

S
Steven Rostedt 已提交
965 966 967 968 969 970 971
	preempt_disable();
	local_irq_disable();
	udelay(100);
	preempt_enable();
	/* reverse the order of preempt vs irqs */
	local_irq_enable();

972
	trace->stop(tr);
S
Steven Rostedt 已提交
973
	/* stop the tracing. */
S
Steven Rostedt 已提交
974
	tracing_stop();
S
Steven Rostedt 已提交
975 976 977 978 979 980 981 982 983 984 985 986 987
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (ret)
		goto out;

	ret = trace_test_buffer(&max_tr, &count);

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
		goto out;
	}

988
out:
S
Steven Rostedt 已提交
989
	tracing_start();
990 991
out_no_start:
	trace->reset(tr);
S
Steven Rostedt 已提交
992 993 994 995 996 997
	tracing_max_latency = save_max;

	return ret;
}
#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */

S
Steven Noonan 已提交
998 999 1000 1001 1002 1003 1004 1005 1006
#ifdef CONFIG_NOP_TRACER
int
trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
{
	/* What could possibly go wrong? */
	return 0;
}
#endif

S
Steven Rostedt 已提交
1007 1008 1009 1010
#ifdef CONFIG_SCHED_TRACER
static int trace_wakeup_test_thread(void *data)
{
	/* Make this a RT thread, doesn't need to be too high */
1011
	static const struct sched_param param = { .sched_priority = 5 };
1012
	struct completion *x = data;
S
Steven Rostedt 已提交
1013

1014
	sched_setscheduler(current, SCHED_FIFO, &param);
S
Steven Rostedt 已提交
1015 1016 1017 1018 1019 1020 1021 1022

	/* Make it know we have a new prio */
	complete(x);

	/* now go to sleep and let the test wake us up */
	set_current_state(TASK_INTERRUPTIBLE);
	schedule();

1023 1024
	complete(x);

S
Steven Rostedt 已提交
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
	/* we are awake, now wait to disappear */
	while (!kthread_should_stop()) {
		/*
		 * This is an RT task, do short sleeps to let
		 * others run.
		 */
		msleep(100);
	}

	return 0;
}

int
trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	struct task_struct *p;
	struct completion isrt;
	unsigned long count;
	int ret;

	init_completion(&isrt);

	/* create a high prio thread */
	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
I
Ingo Molnar 已提交
1050
	if (IS_ERR(p)) {
S
Steven Rostedt 已提交
1051 1052 1053 1054 1055 1056 1057 1058
		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
		return -1;
	}

	/* make sure the thread is running at an RT prio */
	wait_for_completion(&isrt);

	/* start the tracing */
1059
	ret = tracer_init(trace, tr);
1060 1061 1062 1063 1064
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

S
Steven Rostedt 已提交
1065 1066 1067
	/* reset the max latency */
	tracing_max_latency = 0;

1068 1069 1070 1071 1072 1073 1074 1075
	while (p->on_rq) {
		/*
		 * Sleep to make sure the RT thread is asleep too.
		 * On virtual machines we can't rely on timings,
		 * but we want to make sure this test still works.
		 */
		msleep(100);
	}
S
Steven Rostedt 已提交
1076

1077
	init_completion(&isrt);
S
Steven Rostedt 已提交
1078 1079 1080

	wake_up_process(p);

1081 1082
	/* Wait for the task to wake up */
	wait_for_completion(&isrt);
1083

S
Steven Rostedt 已提交
1084
	/* stop the tracing. */
S
Steven Rostedt 已提交
1085
	tracing_stop();
S
Steven Rostedt 已提交
1086 1087
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
1088
	printk("ret = %d\n", ret);
S
Steven Rostedt 已提交
1089 1090 1091 1092 1093
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);


	trace->reset(tr);
S
Steven Rostedt 已提交
1094
	tracing_start();
S
Steven Rostedt 已提交
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117

	tracing_max_latency = save_max;

	/* kill the thread */
	kthread_stop(p);

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	return ret;
}
#endif /* CONFIG_SCHED_TRACER */

#ifdef CONFIG_CONTEXT_SWITCH_TRACER
int
trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
{
	unsigned long count;
	int ret;

	/* start the tracing */
1118
	ret = tracer_init(trace, tr);
1119 1120 1121 1122 1123
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

S
Steven Rostedt 已提交
1124 1125 1126
	/* Sleep for a 1/10 of a second */
	msleep(100);
	/* stop the tracing. */
S
Steven Rostedt 已提交
1127
	tracing_stop();
S
Steven Rostedt 已提交
1128 1129 1130
	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
S
Steven Rostedt 已提交
1131
	tracing_start();
S
Steven Rostedt 已提交
1132 1133 1134 1135 1136 1137 1138 1139 1140

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	return ret;
}
#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
1141

S
Steven Rostedt 已提交
1142 1143 1144 1145 1146 1147 1148 1149
#ifdef CONFIG_BRANCH_TRACER
int
trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
{
	unsigned long count;
	int ret;

	/* start the tracing */
1150
	ret = tracer_init(trace, tr);
1151 1152 1153 1154 1155
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

S
Steven Rostedt 已提交
1156 1157 1158 1159 1160 1161 1162 1163 1164
	/* Sleep for a 1/10 of a second */
	msleep(100);
	/* stop the tracing. */
	tracing_stop();
	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
	tracing_start();

1165 1166 1167 1168 1169
	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

S
Steven Rostedt 已提交
1170 1171 1172
	return ret;
}
#endif /* CONFIG_BRANCH_TRACER */
1173