trace_selftest.c 18.3 KB
Newer Older
S
Steven Rostedt 已提交
1 2
/* Include in trace.c */

3
#include <linux/stringify.h>
S
Steven Rostedt 已提交
4
#include <linux/kthread.h>
I
Ingo Molnar 已提交
5
#include <linux/delay.h>
S
Steven Rostedt 已提交
6

I
Ingo Molnar 已提交
7
static inline int trace_valid_entry(struct trace_entry *entry)
S
Steven Rostedt 已提交
8 9 10 11
{
	switch (entry->type) {
	case TRACE_FN:
	case TRACE_CTX:
12
	case TRACE_WAKE:
13
	case TRACE_STACK:
14
	case TRACE_PRINT:
15
	case TRACE_SPECIAL:
S
Steven Rostedt 已提交
16
	case TRACE_BRANCH:
17 18
	case TRACE_GRAPH_ENT:
	case TRACE_GRAPH_RET:
19
	case TRACE_HW_BRANCHES:
20
	case TRACE_KSYM:
S
Steven Rostedt 已提交
21 22 23 24 25
		return 1;
	}
	return 0;
}

26
static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
S
Steven Rostedt 已提交
27
{
28 29
	struct ring_buffer_event *event;
	struct trace_entry *entry;
30
	unsigned int loops = 0;
S
Steven Rostedt 已提交
31

32 33
	while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
		entry = ring_buffer_event_data(event);
S
Steven Rostedt 已提交
34

35 36 37 38 39 40 41 42 43
		/*
		 * The ring buffer is a size of trace_buf_size, if
		 * we loop more than the size, there's something wrong
		 * with the ring buffer.
		 */
		if (loops++ > trace_buf_size) {
			printk(KERN_CONT ".. bad ring buffer ");
			goto failed;
		}
44
		if (!trace_valid_entry(entry)) {
I
Ingo Molnar 已提交
45
			printk(KERN_CONT ".. invalid entry %d ",
46
				entry->type);
S
Steven Rostedt 已提交
47 48 49 50 51 52
			goto failed;
		}
	}
	return 0;

 failed:
53 54
	/* disable tracing */
	tracing_disabled = 1;
S
Steven Rostedt 已提交
55 56 57 58 59 60 61 62 63 64
	printk(KERN_CONT ".. corrupted trace buffer .. ");
	return -1;
}

/*
 * Test the trace buffer to see if all the elements
 * are still sane.
 */
static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
{
65 66
	unsigned long flags, cnt = 0;
	int cpu, ret = 0;
S
Steven Rostedt 已提交
67

68
	/* Don't allow flipping of max traces now */
69
	local_irq_save(flags);
70
	arch_spin_lock(&ftrace_max_lock);
S
Steven Rostedt 已提交
71

72
	cnt = ring_buffer_entries(tr->buffer);
S
Steven Rostedt 已提交
73

74 75 76 77 78 79 80 81
	/*
	 * The trace_test_buffer_cpu runs a while loop to consume all data.
	 * If the calling tracer is broken, and is constantly filling
	 * the buffer, this will run forever, and hard lock the box.
	 * We disable the ring buffer while we do this test to prevent
	 * a hard lock up.
	 */
	tracing_off();
82 83
	for_each_possible_cpu(cpu) {
		ret = trace_test_buffer_cpu(tr, cpu);
S
Steven Rostedt 已提交
84 85 86
		if (ret)
			break;
	}
87
	tracing_on();
88
	arch_spin_unlock(&ftrace_max_lock);
89
	local_irq_restore(flags);
S
Steven Rostedt 已提交
90 91 92 93 94 95 96

	if (count)
		*count = cnt;

	return ret;
}

97 98 99 100 101
static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
{
	printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
		trace->name, init_ret);
}
102
#ifdef CONFIG_FUNCTION_TRACER
103 104 105 106 107 108 109 110 111 112

#ifdef CONFIG_DYNAMIC_FTRACE

/* Test dynamic code modification and ftrace filters */
int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
					   struct trace_array *tr,
					   int (*func)(void))
{
	int save_ftrace_enabled = ftrace_enabled;
	int save_tracer_enabled = tracer_enabled;
113
	unsigned long count;
S
Steven Rostedt 已提交
114
	char *func_name;
115
	int ret;
116 117 118 119 120 121 122 123 124 125 126 127

	/* The ftrace test PASSED */
	printk(KERN_CONT "PASSED\n");
	pr_info("Testing dynamic ftrace: ");

	/* enable tracing, and record the filter function */
	ftrace_enabled = 1;
	tracer_enabled = 1;

	/* passed in by parameter to fool gcc from optimizing */
	func();

S
Steven Rostedt 已提交
128
	/*
129
	 * Some archs *cough*PowerPC*cough* add characters to the
S
Steven Rostedt 已提交
130
	 * start of the function names. We simply put a '*' to
131
	 * accommodate them.
S
Steven Rostedt 已提交
132
	 */
133
	func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
S
Steven Rostedt 已提交
134

135
	/* filter only on our function */
S
Steven Rostedt 已提交
136
	ftrace_set_filter(func_name, strlen(func_name), 1);
137 138

	/* enable tracing */
139
	ret = tracer_init(trace, tr);
140 141 142 143
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		goto out;
	}
144

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
	/* Sleep for a 1/10 of a second */
	msleep(100);

	/* we should have nothing in the buffer */
	ret = trace_test_buffer(tr, &count);
	if (ret)
		goto out;

	if (count) {
		ret = -1;
		printk(KERN_CONT ".. filter did not filter .. ");
		goto out;
	}

	/* call our function again */
	func();

	/* sleep again */
	msleep(100);

	/* stop the tracing. */
S
Steven Rostedt 已提交
166
	tracing_stop();
167 168 169 170 171
	ftrace_enabled = 0;

	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
S
Steven Rostedt 已提交
172
	tracing_start();
173 174 175

	/* we should only have one item */
	if (!ret && count != 1) {
176
		printk(KERN_CONT ".. filter failed count=%ld ..", count);
177 178 179
		ret = -1;
		goto out;
	}
S
Steven Rostedt 已提交
180

181 182 183 184 185 186 187 188 189 190 191 192
 out:
	ftrace_enabled = save_ftrace_enabled;
	tracer_enabled = save_tracer_enabled;

	/* Enable tracing on all functions again */
	ftrace_set_filter(NULL, 0, 1);

	return ret;
}
#else
# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
#endif /* CONFIG_DYNAMIC_FTRACE */
I
Ingo Molnar 已提交
193

S
Steven Rostedt 已提交
194 195 196 197 198 199 200 201
/*
 * Simple verification test of ftrace function tracer.
 * Enable ftrace, sleep 1/10 second, and then read the trace
 * buffer to see if all is in order.
 */
int
trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
{
202 203
	int save_ftrace_enabled = ftrace_enabled;
	int save_tracer_enabled = tracer_enabled;
204 205
	unsigned long count;
	int ret;
S
Steven Rostedt 已提交
206

207 208 209
	/* make sure msleep has been recorded */
	msleep(1);

S
Steven Rostedt 已提交
210
	/* start the tracing */
I
Ingo Molnar 已提交
211
	ftrace_enabled = 1;
212
	tracer_enabled = 1;
I
Ingo Molnar 已提交
213

214
	ret = tracer_init(trace, tr);
215 216 217 218 219
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		goto out;
	}

S
Steven Rostedt 已提交
220 221 222
	/* Sleep for a 1/10 of a second */
	msleep(100);
	/* stop the tracing. */
S
Steven Rostedt 已提交
223
	tracing_stop();
I
Ingo Molnar 已提交
224 225
	ftrace_enabled = 0;

S
Steven Rostedt 已提交
226 227 228
	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
S
Steven Rostedt 已提交
229
	tracing_start();
S
Steven Rostedt 已提交
230 231 232 233

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
234
		goto out;
S
Steven Rostedt 已提交
235 236
	}

237 238 239 240 241 242 243
	ret = trace_selftest_startup_dynamic_tracing(trace, tr,
						     DYN_FTRACE_TEST_NAME);

 out:
	ftrace_enabled = save_ftrace_enabled;
	tracer_enabled = save_tracer_enabled;

244 245 246 247
	/* kill ftrace totally if we failed */
	if (ret)
		ftrace_kill();

S
Steven Rostedt 已提交
248 249
	return ret;
}
250
#endif /* CONFIG_FUNCTION_TRACER */
S
Steven Rostedt 已提交
251

252 253

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275

/* Maximum number of functions to trace before diagnosing a hang */
#define GRAPH_MAX_FUNC_TEST	100000000

static void __ftrace_dump(bool disable_tracing);
static unsigned int graph_hang_thresh;

/* Wrap the real function entry probe to avoid possible hanging */
static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
{
	/* This is harmlessly racy, we want to approximately detect a hang */
	if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
		ftrace_graph_stop();
		printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
		if (ftrace_dump_on_oops)
			__ftrace_dump(false);
		return 0;
	}

	return trace_graph_entry(trace);
}

276 277 278 279 280 281 282 283 284 285 286
/*
 * Pretty much the same than for the function tracer from which the selftest
 * has been borrowed.
 */
int
trace_selftest_startup_function_graph(struct tracer *trace,
					struct trace_array *tr)
{
	int ret;
	unsigned long count;

287 288 289 290 291
	/*
	 * Simulate the init() callback but we attach a watchdog callback
	 * to detect and recover from possible hangs
	 */
	tracing_reset_online_cpus(tr);
292
	set_graph_array(tr);
293 294
	ret = register_ftrace_graph(&trace_graph_return,
				    &trace_graph_entry_watchdog);
295 296 297 298
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		goto out;
	}
299
	tracing_start_cmdline_record();
300 301 302 303

	/* Sleep for a 1/10 of a second */
	msleep(100);

304 305
	/* Have we just recovered from a hang? */
	if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) {
306
		tracing_selftest_disabled = true;
307 308 309 310
		ret = -1;
		goto out;
	}

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
	tracing_stop();

	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);

	trace->reset(tr);
	tracing_start();

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
		goto out;
	}

	/* Don't test dynamic tracing, the function tracer already did */

out:
	/* Stop it if we failed */
	if (ret)
		ftrace_graph_stop();

	return ret;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */


S
Steven Rostedt 已提交
337 338 339 340 341 342 343 344 345
#ifdef CONFIG_IRQSOFF_TRACER
int
trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	unsigned long count;
	int ret;

	/* start the tracing */
346
	ret = tracer_init(trace, tr);
347 348 349 350 351
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

S
Steven Rostedt 已提交
352 353 354 355 356 357
	/* reset the max latency */
	tracing_max_latency = 0;
	/* disable interrupts for a bit */
	local_irq_disable();
	udelay(100);
	local_irq_enable();
358 359 360 361 362 363 364 365

	/*
	 * Stop the tracer to avoid a warning subsequent
	 * to buffer flipping failure because tracing_stop()
	 * disables the tr and max buffers, making flipping impossible
	 * in case of parallels max irqs off latencies.
	 */
	trace->stop(tr);
S
Steven Rostedt 已提交
366
	/* stop the tracing. */
S
Steven Rostedt 已提交
367
	tracing_stop();
S
Steven Rostedt 已提交
368 369 370 371 372
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);
	trace->reset(tr);
S
Steven Rostedt 已提交
373
	tracing_start();
S
Steven Rostedt 已提交
374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	tracing_max_latency = save_max;

	return ret;
}
#endif /* CONFIG_IRQSOFF_TRACER */

#ifdef CONFIG_PREEMPT_TRACER
int
trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	unsigned long count;
	int ret;

394 395 396 397 398 399 400 401 402 403 404 405 406
	/*
	 * Now that the big kernel lock is no longer preemptable,
	 * and this is called with the BKL held, it will always
	 * fail. If preemption is already disabled, simply
	 * pass the test. When the BKL is removed, or becomes
	 * preemptible again, we will once again test this,
	 * so keep it in.
	 */
	if (preempt_count()) {
		printk(KERN_CONT "can not test ... force ");
		return 0;
	}

S
Steven Rostedt 已提交
407
	/* start the tracing */
408
	ret = tracer_init(trace, tr);
409 410 411 412 413
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

S
Steven Rostedt 已提交
414 415 416 417 418 419
	/* reset the max latency */
	tracing_max_latency = 0;
	/* disable preemption for a bit */
	preempt_disable();
	udelay(100);
	preempt_enable();
420 421 422 423 424 425 426 427

	/*
	 * Stop the tracer to avoid a warning subsequent
	 * to buffer flipping failure because tracing_stop()
	 * disables the tr and max buffers, making flipping impossible
	 * in case of parallels max preempt off latencies.
	 */
	trace->stop(tr);
S
Steven Rostedt 已提交
428
	/* stop the tracing. */
S
Steven Rostedt 已提交
429
	tracing_stop();
S
Steven Rostedt 已提交
430 431 432 433 434
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);
	trace->reset(tr);
S
Steven Rostedt 已提交
435
	tracing_start();
S
Steven Rostedt 已提交
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	tracing_max_latency = save_max;

	return ret;
}
#endif /* CONFIG_PREEMPT_TRACER */

#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
int
trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	unsigned long count;
	int ret;

456 457 458 459 460 461 462 463 464 465 466 467 468
	/*
	 * Now that the big kernel lock is no longer preemptable,
	 * and this is called with the BKL held, it will always
	 * fail. If preemption is already disabled, simply
	 * pass the test. When the BKL is removed, or becomes
	 * preemptible again, we will once again test this,
	 * so keep it in.
	 */
	if (preempt_count()) {
		printk(KERN_CONT "can not test ... force ");
		return 0;
	}

S
Steven Rostedt 已提交
469
	/* start the tracing */
470
	ret = tracer_init(trace, tr);
471 472
	if (ret) {
		warn_failed_init_tracer(trace, ret);
473
		goto out_no_start;
474
	}
S
Steven Rostedt 已提交
475 476 477 478 479 480 481 482 483 484 485 486

	/* reset the max latency */
	tracing_max_latency = 0;

	/* disable preemption and interrupts for a bit */
	preempt_disable();
	local_irq_disable();
	udelay(100);
	preempt_enable();
	/* reverse the order of preempt vs irqs */
	local_irq_enable();

487 488 489 490 491 492 493
	/*
	 * Stop the tracer to avoid a warning subsequent
	 * to buffer flipping failure because tracing_stop()
	 * disables the tr and max buffers, making flipping impossible
	 * in case of parallels max irqs/preempt off latencies.
	 */
	trace->stop(tr);
S
Steven Rostedt 已提交
494
	/* stop the tracing. */
S
Steven Rostedt 已提交
495
	tracing_stop();
S
Steven Rostedt 已提交
496 497
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
498
	if (ret)
S
Steven Rostedt 已提交
499 500 501
		goto out;

	ret = trace_test_buffer(&max_tr, &count);
502
	if (ret)
S
Steven Rostedt 已提交
503 504 505 506 507 508 509 510 511 512
		goto out;

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
		goto out;
	}

	/* do the test by disabling interrupts first this time */
	tracing_max_latency = 0;
S
Steven Rostedt 已提交
513
	tracing_start();
514 515
	trace->start(tr);

S
Steven Rostedt 已提交
516 517 518 519 520 521 522
	preempt_disable();
	local_irq_disable();
	udelay(100);
	preempt_enable();
	/* reverse the order of preempt vs irqs */
	local_irq_enable();

523
	trace->stop(tr);
S
Steven Rostedt 已提交
524
	/* stop the tracing. */
S
Steven Rostedt 已提交
525
	tracing_stop();
S
Steven Rostedt 已提交
526 527 528 529 530 531 532 533 534 535 536 537 538
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (ret)
		goto out;

	ret = trace_test_buffer(&max_tr, &count);

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
		goto out;
	}

539
out:
S
Steven Rostedt 已提交
540
	tracing_start();
541 542
out_no_start:
	trace->reset(tr);
S
Steven Rostedt 已提交
543 544 545 546 547 548
	tracing_max_latency = save_max;

	return ret;
}
#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */

S
Steven Noonan 已提交
549 550 551 552 553 554 555 556 557
#ifdef CONFIG_NOP_TRACER
int
trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
{
	/* What could possibly go wrong? */
	return 0;
}
#endif

S
Steven Rostedt 已提交
558 559 560 561
#ifdef CONFIG_SCHED_TRACER
static int trace_wakeup_test_thread(void *data)
{
	/* Make this a RT thread, doesn't need to be too high */
562 563
	struct sched_param param = { .sched_priority = 5 };
	struct completion *x = data;
S
Steven Rostedt 已提交
564

565
	sched_setscheduler(current, SCHED_FIFO, &param);
S
Steven Rostedt 已提交
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598

	/* Make it know we have a new prio */
	complete(x);

	/* now go to sleep and let the test wake us up */
	set_current_state(TASK_INTERRUPTIBLE);
	schedule();

	/* we are awake, now wait to disappear */
	while (!kthread_should_stop()) {
		/*
		 * This is an RT task, do short sleeps to let
		 * others run.
		 */
		msleep(100);
	}

	return 0;
}

int
trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
{
	unsigned long save_max = tracing_max_latency;
	struct task_struct *p;
	struct completion isrt;
	unsigned long count;
	int ret;

	init_completion(&isrt);

	/* create a high prio thread */
	p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
I
Ingo Molnar 已提交
599
	if (IS_ERR(p)) {
S
Steven Rostedt 已提交
600 601 602 603 604 605 606 607
		printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
		return -1;
	}

	/* make sure the thread is running at an RT prio */
	wait_for_completion(&isrt);

	/* start the tracing */
608
	ret = tracer_init(trace, tr);
609 610 611 612 613
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

S
Steven Rostedt 已提交
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632
	/* reset the max latency */
	tracing_max_latency = 0;

	/* sleep to let the RT thread sleep too */
	msleep(100);

	/*
	 * Yes this is slightly racy. It is possible that for some
	 * strange reason that the RT thread we created, did not
	 * call schedule for 100ms after doing the completion,
	 * and we do a wakeup on a task that already is awake.
	 * But that is extremely unlikely, and the worst thing that
	 * happens in such a case, is that we disable tracing.
	 * Honestly, if this race does happen something is horrible
	 * wrong with the system.
	 */

	wake_up_process(p);

633 634 635
	/* give a little time to let the thread wake up */
	msleep(100);

S
Steven Rostedt 已提交
636
	/* stop the tracing. */
S
Steven Rostedt 已提交
637
	tracing_stop();
S
Steven Rostedt 已提交
638 639 640 641 642 643 644
	/* check both trace buffers */
	ret = trace_test_buffer(tr, NULL);
	if (!ret)
		ret = trace_test_buffer(&max_tr, &count);


	trace->reset(tr);
S
Steven Rostedt 已提交
645
	tracing_start();
S
Steven Rostedt 已提交
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668

	tracing_max_latency = save_max;

	/* kill the thread */
	kthread_stop(p);

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	return ret;
}
#endif /* CONFIG_SCHED_TRACER */

#ifdef CONFIG_CONTEXT_SWITCH_TRACER
int
trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
{
	unsigned long count;
	int ret;

	/* start the tracing */
669
	ret = tracer_init(trace, tr);
670 671 672 673 674
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

S
Steven Rostedt 已提交
675 676 677
	/* Sleep for a 1/10 of a second */
	msleep(100);
	/* stop the tracing. */
S
Steven Rostedt 已提交
678
	tracing_stop();
S
Steven Rostedt 已提交
679 680 681
	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
S
Steven Rostedt 已提交
682
	tracing_start();
S
Steven Rostedt 已提交
683 684 685 686 687 688 689 690 691

	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

	return ret;
}
#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
692 693 694 695 696 697 698 699 700

#ifdef CONFIG_SYSPROF_TRACER
int
trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
{
	unsigned long count;
	int ret;

	/* start the tracing */
701
	ret = tracer_init(trace, tr);
702 703
	if (ret) {
		warn_failed_init_tracer(trace, ret);
704
		return ret;
705 706
	}

707 708 709
	/* Sleep for a 1/10 of a second */
	msleep(100);
	/* stop the tracing. */
S
Steven Rostedt 已提交
710
	tracing_stop();
711 712 713
	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
S
Steven Rostedt 已提交
714
	tracing_start();
715

716 717 718 719 720
	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

721 722 723
	return ret;
}
#endif /* CONFIG_SYSPROF_TRACER */
S
Steven Rostedt 已提交
724 725 726 727 728 729 730 731 732

#ifdef CONFIG_BRANCH_TRACER
int
trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
{
	unsigned long count;
	int ret;

	/* start the tracing */
733
	ret = tracer_init(trace, tr);
734 735 736 737 738
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

S
Steven Rostedt 已提交
739 740 741 742 743 744 745 746 747
	/* Sleep for a 1/10 of a second */
	msleep(100);
	/* stop the tracing. */
	tracing_stop();
	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
	tracing_start();

748 749 750 751 752
	if (!ret && !count) {
		printk(KERN_CONT ".. no entries found ..");
		ret = -1;
	}

S
Steven Rostedt 已提交
753 754 755
	return ret;
}
#endif /* CONFIG_BRANCH_TRACER */
756 757 758 759 760 761

#ifdef CONFIG_HW_BRANCH_TRACER
int
trace_selftest_startup_hw_branches(struct tracer *trace,
				   struct trace_array *tr)
{
762
	struct trace_iterator *iter;
763
	struct tracer tracer;
I
Ingo Molnar 已提交
764 765
	unsigned long count;
	int ret;
766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781

	if (!trace->open) {
		printk(KERN_CONT "missing open function...");
		return -1;
	}

	ret = tracer_init(trace, tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

	/*
	 * The hw-branch tracer needs to collect the trace from the various
	 * cpu trace buffers - before tracing is stopped.
	 */
782 783 784 785
	iter = kzalloc(sizeof(*iter), GFP_KERNEL);
	if (!iter)
		return -ENOMEM;

786 787
	memcpy(&tracer, trace, sizeof(tracer));

788 789 790 791
	iter->trace = &tracer;
	iter->tr = tr;
	iter->pos = -1;
	mutex_init(&iter->mutex);
792

793
	trace->open(iter);
794

795 796
	mutex_destroy(&iter->mutex);
	kfree(iter);
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811

	tracing_stop();

	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
	tracing_start();

	if (!ret && !count) {
		printk(KERN_CONT "no entries found..");
		ret = -1;
	}

	return ret;
}
#endif /* CONFIG_HW_BRANCH_TRACER */
812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830

#ifdef CONFIG_KSYM_TRACER
static int ksym_selftest_dummy;

int
trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr)
{
	unsigned long count;
	int ret;

	/* start the tracing */
	ret = tracer_init(trace, tr);
	if (ret) {
		warn_failed_init_tracer(trace, ret);
		return ret;
	}

	ksym_selftest_dummy = 0;
	/* Register the read-write tracing request */
L
Li Zefan 已提交
831 832

	ret = process_new_ksym_entry("ksym_selftest_dummy",
833
				     HW_BREAKPOINT_R | HW_BREAKPOINT_W,
834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
					(unsigned long)(&ksym_selftest_dummy));

	if (ret < 0) {
		printk(KERN_CONT "ksym_trace read-write startup test failed\n");
		goto ret_path;
	}
	/* Perform a read and a write operation over the dummy variable to
	 * trigger the tracer
	 */
	if (ksym_selftest_dummy == 0)
		ksym_selftest_dummy++;

	/* stop the tracing. */
	tracing_stop();
	/* check the trace buffer */
	ret = trace_test_buffer(tr, &count);
	trace->reset(tr);
	tracing_start();

	/* read & write operations - one each is performed on the dummy variable
	 * triggering two entries in the trace buffer
	 */
	if (!ret && count != 2) {
		printk(KERN_CONT "Ksym tracer startup test failed");
		ret = -1;
	}

ret_path:
	return ret;
}
#endif /* CONFIG_KSYM_TRACER */