trace_functions.c 17.7 KB
Newer Older
S
Steven Rostedt 已提交
1 2 3 4 5 6 7 8 9
/*
 * ring buffer based function tracer
 *
 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
 *
 * Based on code from the latency_tracer, that is:
 *
 *  Copyright (C) 2004-2006 Ingo Molnar
10
 *  Copyright (C) 2004 Nadia Yvette Chambers
S
Steven Rostedt 已提交
11
 */
12
#include <linux/ring_buffer.h>
S
Steven Rostedt 已提交
13 14 15
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
16
#include <linux/slab.h>
I
Ingo Molnar 已提交
17
#include <linux/fs.h>
S
Steven Rostedt 已提交
18 19 20

#include "trace.h"

21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
static void tracing_start_function_trace(struct trace_array *tr);
static void tracing_stop_function_trace(struct trace_array *tr);
static void
function_trace_call(unsigned long ip, unsigned long parent_ip,
		    struct ftrace_ops *op, struct pt_regs *pt_regs);
static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
			  struct ftrace_ops *op, struct pt_regs *pt_regs);
static struct tracer_flags func_flags;

/* Our option */
enum {
	TRACE_FUNC_OPT_STACK	= 0x1,
};

static int allocate_ftrace_ops(struct trace_array *tr)
{
	struct ftrace_ops *ops;
39

40 41 42
	ops = kzalloc(sizeof(*ops), GFP_KERNEL);
	if (!ops)
		return -ENOMEM;
43

44 45
	/* Currently only the non stack verision is supported */
	ops->func = function_trace_call;
46
	ops->flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_PID;
47 48 49 50 51

	tr->ops = ops;
	ops->private = tr;
	return 0;
}
52

53 54 55 56 57 58

int ftrace_create_function_files(struct trace_array *tr,
				 struct dentry *parent)
{
	int ret;

59 60 61 62 63 64 65 66 67 68
	/*
	 * The top level array uses the "global_ops", and the files are
	 * created on boot up.
	 */
	if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
		return 0;

	ret = allocate_ftrace_ops(tr);
	if (ret)
		return ret;
69 70 71 72 73 74 75 76 77 78 79 80 81

	ftrace_create_filter_files(tr->ops, parent);

	return 0;
}

void ftrace_destroy_function_files(struct trace_array *tr)
{
	ftrace_destroy_filter_files(tr->ops);
	kfree(tr->ops);
	tr->ops = NULL;
}

82
static int function_trace_init(struct trace_array *tr)
S
Steven Rostedt 已提交
83
{
84
	ftrace_func_t func;
85

86 87 88 89 90 91
	/*
	 * Instance trace_arrays get their ops allocated
	 * at instance creation. Unless it failed
	 * the allocation.
	 */
	if (!tr->ops)
92
		return -ENOMEM;
93 94 95 96 97 98 99 100 101

	/* Currently only the global instance can do stack tracing */
	if (tr->flags & TRACE_ARRAY_FL_GLOBAL &&
	    func_flags.val & TRACE_FUNC_OPT_STACK)
		func = function_stack_trace_call;
	else
		func = function_trace_call;

	ftrace_init_array_ops(tr, func);
102

103
	tr->trace_buffer.cpu = get_cpu();
104 105
	put_cpu();

106
	tracing_start_cmdline_record();
107
	tracing_start_function_trace(tr);
108
	return 0;
S
Steven Rostedt 已提交
109 110
}

I
Ingo Molnar 已提交
111
static void function_trace_reset(struct trace_array *tr)
S
Steven Rostedt 已提交
112
{
113
	tracing_stop_function_trace(tr);
114
	tracing_stop_cmdline_record();
115
	ftrace_reset_array_ops(tr);
S
Steven Rostedt 已提交
116 117
}

118 119
static void function_trace_start(struct trace_array *tr)
{
120
	tracing_reset_online_cpus(&tr->trace_buffer);
121 122
}

123
static void
124
function_trace_call(unsigned long ip, unsigned long parent_ip,
125
		    struct ftrace_ops *op, struct pt_regs *pt_regs)
126
{
127
	struct trace_array *tr = op->private;
128 129
	struct trace_array_cpu *data;
	unsigned long flags;
130
	int bit;
131 132 133
	int cpu;
	int pc;

134
	if (unlikely(!tr->function_enabled))
135 136
		return;

137 138
	pc = preempt_count();
	preempt_disable_notrace();
139

140 141 142 143 144
	bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
	if (bit < 0)
		goto out;

	cpu = smp_processor_id();
145
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
146 147
	if (!atomic_read(&data->disabled)) {
		local_save_flags(flags);
148
		trace_function(tr, ip, parent_ip, flags, pc);
149
	}
150
	trace_clear_recursion(bit);
151

152 153
 out:
	preempt_enable_notrace();
154 155
}

156
static void
157
function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
158
			  struct ftrace_ops *op, struct pt_regs *pt_regs)
159
{
160
	struct trace_array *tr = op->private;
161 162 163 164 165 166
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

167
	if (unlikely(!tr->function_enabled))
168 169 170 171 172 173 174 175
		return;

	/*
	 * Need to use raw, since this must be called before the
	 * recursive protection is performed.
	 */
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
176
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
177 178 179 180
	disabled = atomic_inc_return(&data->disabled);

	if (likely(disabled == 1)) {
		pc = preempt_count();
181
		trace_function(tr, ip, parent_ip, flags, pc);
182 183 184 185 186 187 188 189
		/*
		 * skip over 5 funcs:
		 *    __ftrace_trace_stack,
		 *    __trace_stack,
		 *    function_stack_trace_call
		 *    ftrace_list_func
		 *    ftrace_call
		 */
190
		__trace_stack(tr, flags, 5, pc);
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

static struct tracer_opt func_opts[] = {
#ifdef CONFIG_STACKTRACE
	{ TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) },
#endif
	{ } /* Always set a last empty entry */
};

static struct tracer_flags func_flags = {
	.val = 0, /* By default: all flags disabled */
	.opts = func_opts
};

209
static void tracing_start_function_trace(struct trace_array *tr)
210
{
211 212 213
	tr->function_enabled = 0;
	register_ftrace_function(tr->ops);
	tr->function_enabled = 1;
214 215
}

216
static void tracing_stop_function_trace(struct trace_array *tr)
217
{
218 219
	tr->function_enabled = 0;
	unregister_ftrace_function(tr->ops);
220 221
}

222 223
static struct tracer function_trace;

224 225
static int
func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
226
{
227 228
	switch (bit) {
	case TRACE_FUNC_OPT_STACK:
229 230
		/* do nothing if already set */
		if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK))
231
			break;
232

233 234 235 236
		/* We can change this flag when not running. */
		if (tr->current_trace != &function_trace)
			break;

237 238
		unregister_ftrace_function(tr->ops);

239
		if (set) {
240
			tr->ops->func = function_stack_trace_call;
241
			register_ftrace_function(tr->ops);
242
		} else {
243
			tr->ops->func = function_trace_call;
244
			register_ftrace_function(tr->ops);
245
		}
246

247 248 249
		break;
	default:
		return -EINVAL;
250 251
	}

252
	return 0;
253 254
}

255
static struct tracer function_trace __tracer_data =
S
Steven Rostedt 已提交
256
{
257 258 259 260
	.name		= "function",
	.init		= function_trace_init,
	.reset		= function_trace_reset,
	.start		= function_trace_start,
261 262
	.flags		= &func_flags,
	.set_flag	= func_set_flag,
263
	.allow_instances = true,
S
Steven Rostedt 已提交
264
#ifdef CONFIG_FTRACE_SELFTEST
265
	.selftest	= trace_selftest_startup_function,
S
Steven Rostedt 已提交
266
#endif
S
Steven Rostedt 已提交
267 268
};

269
#ifdef CONFIG_DYNAMIC_FTRACE
270
static void update_traceon_count(struct ftrace_probe_ops *ops,
271 272
				 unsigned long ip,
				 struct trace_array *tr, bool on,
273
				 void *data)
274
{
275
	struct ftrace_func_mapper *mapper = data;
276 277
	long *count;
	long old_count;
278

279 280
	/*
	 * Tracing gets disabled (or enabled) once per count.
281
	 * This function can be called at the same time on multiple CPUs.
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
	 * It is fine if both disable (or enable) tracing, as disabling
	 * (or enabling) the second time doesn't do anything as the
	 * state of the tracer is already disabled (or enabled).
	 * What needs to be synchronized in this case is that the count
	 * only gets decremented once, even if the tracer is disabled
	 * (or enabled) twice, as the second one is really a nop.
	 *
	 * The memory barriers guarantee that we only decrement the
	 * counter once. First the count is read to a local variable
	 * and a read barrier is used to make sure that it is loaded
	 * before checking if the tracer is in the state we want.
	 * If the tracer is not in the state we want, then the count
	 * is guaranteed to be the old count.
	 *
	 * Next the tracer is set to the state we want (disabled or enabled)
	 * then a write memory barrier is used to make sure that
	 * the new state is visible before changing the counter by
	 * one minus the old counter. This guarantees that another CPU
	 * executing this code will see the new state before seeing
301
	 * the new counter value, and would not do anything if the new
302 303 304 305 306 307
	 * counter is seen.
	 *
	 * Note, there is no synchronization between this and a user
	 * setting the tracing_on file. But we currently don't care
	 * about that.
	 */
308 309 310 311
	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
	old_count = *count;

	if (old_count <= 0)
312
		return;
313

314 315
	/* Make sure we see count before checking tracing state */
	smp_rmb();
316

317
	if (on == !!tracer_tracing_is_on(tr))
318 319 320
		return;

	if (on)
321
		tracer_tracing_on(tr);
322
	else
323
		tracer_tracing_off(tr);
324 325 326 327 328

	/* Make sure tracing state is visible before updating count */
	smp_wmb();

	*count = old_count - 1;
329 330 331
}

static void
332
ftrace_traceon_count(unsigned long ip, unsigned long parent_ip,
333
		     struct trace_array *tr, struct ftrace_probe_ops *ops,
334
		     void *data)
335
{
336
	update_traceon_count(ops, ip, tr, 1, data);
337
}
338

339
static void
340
ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip,
341
		      struct trace_array *tr, struct ftrace_probe_ops *ops,
342
		      void *data)
343
{
344
	update_traceon_count(ops, ip, tr, 0, data);
345 346
}

347
static void
348
ftrace_traceon(unsigned long ip, unsigned long parent_ip,
349
	       struct trace_array *tr, struct ftrace_probe_ops *ops,
350
	       void *data)
351
{
352
	if (tracer_tracing_is_on(tr))
353 354
		return;

355
	tracer_tracing_on(tr);
356 357 358
}

static void
359
ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
360
		struct trace_array *tr, struct ftrace_probe_ops *ops,
361
		void *data)
362
{
363
	if (!tracer_tracing_is_on(tr))
364 365
		return;

366
	tracer_tracing_off(tr);
367 368
}

369 370 371 372 373 374 375 376 377
/*
 * Skip 4:
 *   ftrace_stacktrace()
 *   function_trace_probe_call()
 *   ftrace_ops_list_func()
 *   ftrace_call()
 */
#define STACK_SKIP 4

378 379 380 381 382 383 384 385 386 387 388
static __always_inline void trace_stack(struct trace_array *tr)
{
	unsigned long flags;
	int pc;

	local_save_flags(flags);
	pc = preempt_count();

	__trace_stack(tr, flags, STACK_SKIP, pc);
}

389
static void
390
ftrace_stacktrace(unsigned long ip, unsigned long parent_ip,
391
		  struct trace_array *tr, struct ftrace_probe_ops *ops,
392
		  void *data)
393
{
394
	trace_stack(tr);
395 396 397
}

static void
398
ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip,
399
			struct trace_array *tr, struct ftrace_probe_ops *ops,
400
			void *data)
401
{
402
	struct ftrace_func_mapper *mapper = data;
403
	long *count;
404 405 406
	long old_count;
	long new_count;

407 408 409 410 411
	if (!tracing_is_on())
		return;

	/* unlimited? */
	if (!mapper) {
412
		trace_stack(tr);
413 414 415 416 417
		return;
	}

	count = (long *)ftrace_func_mapper_find_ip(mapper, ip);

418 419 420 421 422 423 424 425 426 427 428 429 430
	/*
	 * Stack traces should only execute the number of times the
	 * user specified in the counter.
	 */
	do {
		old_count = *count;

		if (!old_count)
			return;

		new_count = old_count - 1;
		new_count = cmpxchg(count, old_count, new_count);
		if (new_count == old_count)
431
			trace_stack(tr);
432

433 434 435
		if (!tracing_is_on())
			return;

436 437 438
	} while (new_count != old_count);
}

439 440
static int update_count(struct ftrace_probe_ops *ops, unsigned long ip,
			void *data)
441
{
442
	struct ftrace_func_mapper *mapper = data;
443
	long *count = NULL;
444

445 446
	if (mapper)
		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
447

448 449 450
	if (count) {
		if (*count <= 0)
			return 0;
451
		(*count)--;
452
	}
453 454

	return 1;
455 456
}

457
static void
458
ftrace_dump_probe(unsigned long ip, unsigned long parent_ip,
459
		  struct trace_array *tr, struct ftrace_probe_ops *ops,
460
		  void *data)
461
{
462
	if (update_count(ops, ip, data))
463 464 465
		ftrace_dump(DUMP_ALL);
}

466 467
/* Only dump the current CPU buffer. */
static void
468
ftrace_cpudump_probe(unsigned long ip, unsigned long parent_ip,
469
		     struct trace_array *tr, struct ftrace_probe_ops *ops,
470
		     void *data)
471
{
472
	if (update_count(ops, ip, data))
473 474 475
		ftrace_dump(DUMP_ORIG);
}

476
static int
477
ftrace_probe_print(const char *name, struct seq_file *m,
478 479
		   unsigned long ip, struct ftrace_probe_ops *ops,
		   void *data)
480
{
481
	struct ftrace_func_mapper *mapper = data;
482
	long *count = NULL;
483 484 485

	seq_printf(m, "%ps:%s", (void *)ip, name);

486 487 488 489 490
	if (mapper)
		count = (long *)ftrace_func_mapper_find_ip(mapper, ip);

	if (count)
		seq_printf(m, ":count=%ld\n", *count);
491
	else
492
		seq_puts(m, ":unlimited\n");
493 494 495 496 497 498

	return 0;
}

static int
ftrace_traceon_print(struct seq_file *m, unsigned long ip,
499 500
		     struct ftrace_probe_ops *ops,
		     void *data)
501
{
502
	return ftrace_probe_print("traceon", m, ip, ops, data);
503 504 505 506 507 508
}

static int
ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
			 struct ftrace_probe_ops *ops, void *data)
{
509
	return ftrace_probe_print("traceoff", m, ip, ops, data);
510 511 512 513 514 515
}

static int
ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
			struct ftrace_probe_ops *ops, void *data)
{
516
	return ftrace_probe_print("stacktrace", m, ip, ops, data);
517
}
518

519 520 521 522
static int
ftrace_dump_print(struct seq_file *m, unsigned long ip,
			struct ftrace_probe_ops *ops, void *data)
{
523
	return ftrace_probe_print("dump", m, ip, ops, data);
524 525
}

526 527 528 529
static int
ftrace_cpudump_print(struct seq_file *m, unsigned long ip,
			struct ftrace_probe_ops *ops, void *data)
{
530
	return ftrace_probe_print("cpudump", m, ip, ops, data);
531 532 533 534
}


static int
535
ftrace_count_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
536
		  unsigned long ip, void *init_data, void **data)
537
{
538 539 540 541 542 543 544 545
	struct ftrace_func_mapper *mapper = *data;

	if (!mapper) {
		mapper = allocate_ftrace_func_mapper();
		if (!mapper)
			return -ENOMEM;
		*data = mapper;
	}
546

547
	return ftrace_func_mapper_add_ip(mapper, ip, init_data);
548 549 550
}

static void
551
ftrace_count_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
552
		  unsigned long ip, void *data)
553
{
554 555 556 557 558 559
	struct ftrace_func_mapper *mapper = data;

	if (!ip) {
		free_ftrace_func_mapper(mapper, NULL);
		return;
	}
560 561

	ftrace_func_mapper_remove_ip(mapper, ip);
562 563
}

564 565
static struct ftrace_probe_ops traceon_count_probe_ops = {
	.func			= ftrace_traceon_count,
566
	.print			= ftrace_traceon_print,
567 568
	.init			= ftrace_count_init,
	.free			= ftrace_count_free,
569 570 571 572
};

static struct ftrace_probe_ops traceoff_count_probe_ops = {
	.func			= ftrace_traceoff_count,
573
	.print			= ftrace_traceoff_print,
574 575
	.init			= ftrace_count_init,
	.free			= ftrace_count_free,
576 577 578 579 580
};

static struct ftrace_probe_ops stacktrace_count_probe_ops = {
	.func			= ftrace_stacktrace_count,
	.print			= ftrace_stacktrace_print,
581 582
	.init			= ftrace_count_init,
	.free			= ftrace_count_free,
583 584
};

585 586 587
static struct ftrace_probe_ops dump_probe_ops = {
	.func			= ftrace_dump_probe,
	.print			= ftrace_dump_print,
588 589
	.init			= ftrace_count_init,
	.free			= ftrace_count_free,
590 591
};

592 593 594 595 596
static struct ftrace_probe_ops cpudump_probe_ops = {
	.func			= ftrace_cpudump_probe,
	.print			= ftrace_cpudump_print,
};

S
Steven Rostedt 已提交
597
static struct ftrace_probe_ops traceon_probe_ops = {
598
	.func			= ftrace_traceon,
599
	.print			= ftrace_traceon_print,
600 601
};

S
Steven Rostedt 已提交
602
static struct ftrace_probe_ops traceoff_probe_ops = {
603
	.func			= ftrace_traceoff,
604
	.print			= ftrace_traceoff_print,
605 606
};

607 608 609 610
static struct ftrace_probe_ops stacktrace_probe_ops = {
	.func			= ftrace_stacktrace,
	.print			= ftrace_stacktrace_print,
};
611

612
static int
613 614
ftrace_trace_probe_callback(struct trace_array *tr,
			    struct ftrace_probe_ops *ops,
615 616
			    struct ftrace_hash *hash, char *glob,
			    char *cmd, char *param, int enable)
617 618 619 620 621 622 623 624 625
{
	void *count = (void *)-1;
	char *number;
	int ret;

	/* hash funcs only work with set_ftrace_filter */
	if (!enable)
		return -EINVAL;

626
	if (glob[0] == '!')
627
		return unregister_ftrace_function_probe_func(glob+1, tr, ops);
628

629 630 631 632 633 634 635 636 637 638 639 640
	if (!param)
		goto out_reg;

	number = strsep(&param, ":");

	if (!strlen(number))
		goto out_reg;

	/*
	 * We use the callback data field (which is a pointer)
	 * as our counter.
	 */
641
	ret = kstrtoul(number, 0, (unsigned long *)&count);
642 643 644 645
	if (ret)
		return ret;

 out_reg:
646
	ret = register_ftrace_function_probe(glob, tr, ops, count);
647

648
	return ret < 0 ? ret : 0;
649 650
}

651
static int
652
ftrace_trace_onoff_callback(struct trace_array *tr, struct ftrace_hash *hash,
653 654 655 656
			    char *glob, char *cmd, char *param, int enable)
{
	struct ftrace_probe_ops *ops;

657 658 659
	if (!tr)
		return -ENODEV;

660 661 662 663 664 665
	/* we register both traceon and traceoff to this callback */
	if (strcmp(cmd, "traceon") == 0)
		ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
	else
		ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;

666
	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
667 668 669 670
					   param, enable);
}

static int
671
ftrace_stacktrace_callback(struct trace_array *tr, struct ftrace_hash *hash,
672 673 674 675
			   char *glob, char *cmd, char *param, int enable)
{
	struct ftrace_probe_ops *ops;

676 677 678
	if (!tr)
		return -ENODEV;

679 680
	ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;

681
	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
682 683 684
					   param, enable);
}

685
static int
686
ftrace_dump_callback(struct trace_array *tr, struct ftrace_hash *hash,
687 688 689 690
			   char *glob, char *cmd, char *param, int enable)
{
	struct ftrace_probe_ops *ops;

691 692 693
	if (!tr)
		return -ENODEV;

694 695 696
	ops = &dump_probe_ops;

	/* Only dump once. */
697
	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
698 699 700
					   "1", enable);
}

701
static int
702
ftrace_cpudump_callback(struct trace_array *tr, struct ftrace_hash *hash,
703 704 705 706
			   char *glob, char *cmd, char *param, int enable)
{
	struct ftrace_probe_ops *ops;

707 708 709
	if (!tr)
		return -ENODEV;

710 711 712
	ops = &cpudump_probe_ops;

	/* Only dump once. */
713
	return ftrace_trace_probe_callback(tr, ops, hash, glob, cmd,
714 715 716
					   "1", enable);
}

717 718 719 720 721 722 723 724 725 726
static struct ftrace_func_command ftrace_traceon_cmd = {
	.name			= "traceon",
	.func			= ftrace_trace_onoff_callback,
};

static struct ftrace_func_command ftrace_traceoff_cmd = {
	.name			= "traceoff",
	.func			= ftrace_trace_onoff_callback,
};

727 728 729 730 731
static struct ftrace_func_command ftrace_stacktrace_cmd = {
	.name			= "stacktrace",
	.func			= ftrace_stacktrace_callback,
};

732 733 734 735 736
static struct ftrace_func_command ftrace_dump_cmd = {
	.name			= "dump",
	.func			= ftrace_dump_callback,
};

737 738 739 740 741
static struct ftrace_func_command ftrace_cpudump_cmd = {
	.name			= "cpudump",
	.func			= ftrace_cpudump_callback,
};

742 743 744 745 746 747 748 749 750 751
static int __init init_func_cmd_traceon(void)
{
	int ret;

	ret = register_ftrace_command(&ftrace_traceoff_cmd);
	if (ret)
		return ret;

	ret = register_ftrace_command(&ftrace_traceon_cmd);
	if (ret)
752
		goto out_free_traceoff;
753 754

	ret = register_ftrace_command(&ftrace_stacktrace_cmd);
755 756 757 758 759 760 761
	if (ret)
		goto out_free_traceon;

	ret = register_ftrace_command(&ftrace_dump_cmd);
	if (ret)
		goto out_free_stacktrace;

762 763 764 765
	ret = register_ftrace_command(&ftrace_cpudump_cmd);
	if (ret)
		goto out_free_dump;

766 767
	return 0;

768 769
 out_free_dump:
	unregister_ftrace_command(&ftrace_dump_cmd);
770 771 772 773 774 775 776
 out_free_stacktrace:
	unregister_ftrace_command(&ftrace_stacktrace_cmd);
 out_free_traceon:
	unregister_ftrace_command(&ftrace_traceon_cmd);
 out_free_traceoff:
	unregister_ftrace_command(&ftrace_traceoff_cmd);

777 778 779 780 781 782 783 784 785
	return ret;
}
#else
static inline int init_func_cmd_traceon(void)
{
	return 0;
}
#endif /* CONFIG_DYNAMIC_FTRACE */

786
__init int init_function_trace(void)
S
Steven Rostedt 已提交
787
{
788
	init_func_cmd_traceon();
S
Steven Rostedt 已提交
789 790
	return register_tracer(&function_trace);
}