trace_functions_graph.c 33.1 KB
Newer Older
1 2 3
/*
 *
 * Function graph tracer.
4
 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 6 7 8 9 10 11
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
12
#include <linux/slab.h>
13 14 15
#include <linux/fs.h>

#include "trace.h"
16
#include "trace_output.h"
17

18 19 20
/* When set, irq functions will be ignored */
static int ftrace_graph_skip_irqs;

21
struct fgraph_cpu_data {
22 23
	pid_t		last_pid;
	int		depth;
24
	int		depth_irq;
25
	int		ignore;
26
	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
27 28 29
};

struct fgraph_data {
30
	struct fgraph_cpu_data __percpu *cpu_data;
31 32 33 34 35 36

	/* Place to preserve last processed entry. */
	struct ftrace_graph_ent_entry	ent;
	struct ftrace_graph_ret_entry	ret;
	int				failed;
	int				cpu;
37 38
};

39
#define TRACE_GRAPH_INDENT	2
40

41
/* Flag options */
42
#define TRACE_GRAPH_PRINT_OVERRUN	0x1
43 44
#define TRACE_GRAPH_PRINT_CPU		0x2
#define TRACE_GRAPH_PRINT_OVERHEAD	0x4
45
#define TRACE_GRAPH_PRINT_PROC		0x8
46
#define TRACE_GRAPH_PRINT_DURATION	0x10
47
#define TRACE_GRAPH_PRINT_ABS_TIME	0x20
48
#define TRACE_GRAPH_PRINT_IRQS		0x40
49

50
static struct tracer_opt trace_opts[] = {
51
	/* Display overruns? (for self-debug purpose) */
52 53 54 55 56
	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	/* Display CPU ? */
	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
	/* Display Overhead ? */
	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
57 58
	/* Display proc name/pid */
	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
59 60 61 62
	/* Display duration of execution */
	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
	/* Display absolute time of an entry */
	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
63 64
	/* Display interrupts */
	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
65 66 67 68
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
69
	/* Don't display overruns and proc by default */
70
	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
71
	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
72 73 74
	.opts = trace_opts
};

75
static struct trace_array *graph_array;
76

77

78 79
/* Add a function return address to the trace stack on thread info.*/
int
80 81
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
			 unsigned long frame_pointer)
82
{
83
	unsigned long long calltime;
84 85 86 87 88
	int index;

	if (!current->ret_stack)
		return -EBUSY;

89 90 91 92 93 94
	/*
	 * We must make sure the ret_stack is tested before we read
	 * anything else.
	 */
	smp_rmb();

95 96 97 98 99 100
	/* The return trace stack is full */
	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
		atomic_inc(&current->trace_overrun);
		return -EBUSY;
	}

101 102
	calltime = trace_clock_local();

103 104 105 106
	index = ++current->curr_ret_stack;
	barrier();
	current->ret_stack[index].ret = ret;
	current->ret_stack[index].func = func;
107
	current->ret_stack[index].calltime = calltime;
108
	current->ret_stack[index].subtime = 0;
109
	current->ret_stack[index].fp = frame_pointer;
110 111 112 113 114 115
	*depth = index;

	return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
116
static void
117 118
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
			unsigned long frame_pointer)
119 120 121 122 123 124 125 126 127 128 129 130 131
{
	int index;

	index = current->curr_ret_stack;

	if (unlikely(index < 0)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic, otherwise we have no where to go */
		*ret = (unsigned long)panic;
		return;
	}

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
	/*
	 * The arch may choose to record the frame pointer used
	 * and check it here to make sure that it is what we expect it
	 * to be. If gcc does not set the place holder of the return
	 * address in the frame pointer, and does a copy instead, then
	 * the function graph trace will fail. This test detects this
	 * case.
	 *
	 * Currently, x86_32 with optimize for size (-Os) makes the latest
	 * gcc do the above.
	 */
	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
		ftrace_graph_stop();
		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
147
		     "  from func %ps return to %lx\n",
148 149 150 151 152 153 154 155 156
		     current->ret_stack[index].fp,
		     frame_pointer,
		     (void *)current->ret_stack[index].func,
		     current->ret_stack[index].ret);
		*ret = (unsigned long)panic;
		return;
	}
#endif

157 158 159 160 161 162 163 164 165 166 167
	*ret = current->ret_stack[index].ret;
	trace->func = current->ret_stack[index].func;
	trace->calltime = current->ret_stack[index].calltime;
	trace->overrun = atomic_read(&current->trace_overrun);
	trace->depth = index;
}

/*
 * Send the trace to the ring-buffer.
 * @return the original return address.
 */
168
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
169 170 171 172
{
	struct ftrace_graph_ret trace;
	unsigned long ret;

173
	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
174
	trace.rettime = trace_clock_local();
175
	ftrace_graph_return(&trace);
176 177
	barrier();
	current->curr_ret_stack--;
178 179 180 181 182 183 184 185 186 187 188

	if (unlikely(!ret)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic. What else to do? */
		ret = (unsigned long)panic;
	}

	return ret;
}

189
int __trace_graph_entry(struct trace_array *tr,
190 191 192 193 194 195
				struct ftrace_graph_ent *trace,
				unsigned long flags,
				int pc)
{
	struct ftrace_event_call *call = &event_funcgraph_entry;
	struct ring_buffer_event *event;
196
	struct ring_buffer *buffer = tr->buffer;
197 198
	struct ftrace_graph_ent_entry *entry;

R
Rusty Russell 已提交
199
	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
200 201
		return 0;

202
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
203 204 205 206 207
					  sizeof(*entry), flags, pc);
	if (!event)
		return 0;
	entry	= ring_buffer_event_data(event);
	entry->graph_ent			= *trace;
208 209
	if (!filter_current_check_discard(buffer, call, entry, event))
		ring_buffer_unlock_commit(buffer, event);
210 211 212 213

	return 1;
}

214 215 216 217 218 219 220 221
static inline int ftrace_graph_ignore_irqs(void)
{
	if (!ftrace_graph_skip_irqs)
		return 0;

	return in_irq();
}

222 223 224 225 226 227 228 229 230 231 232 233 234
int trace_graph_entry(struct ftrace_graph_ent *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int ret;
	int cpu;
	int pc;

	if (!ftrace_trace_task(current))
		return 0;

235
	/* trace it when it is-nested-in or is a function enabled. */
236 237
	if (!(trace->depth || ftrace_graph_addr(trace->func)) ||
	      ftrace_graph_ignore_irqs())
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
		return 0;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		ret = __trace_graph_entry(tr, trace, flags, pc);
	} else {
		ret = 0;
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);

	return ret;
}

257 258 259 260 261 262 263 264
int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
{
	if (tracing_thresh)
		return 1;
	else
		return trace_graph_entry(trace);
}

265
void __trace_graph_return(struct trace_array *tr,
266 267 268 269 270 271
				struct ftrace_graph_ret *trace,
				unsigned long flags,
				int pc)
{
	struct ftrace_event_call *call = &event_funcgraph_exit;
	struct ring_buffer_event *event;
272
	struct ring_buffer *buffer = tr->buffer;
273 274
	struct ftrace_graph_ret_entry *entry;

R
Rusty Russell 已提交
275
	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
276 277
		return;

278
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
279 280 281 282 283
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->ret				= *trace;
284 285
	if (!filter_current_check_discard(buffer, call, entry, event))
		ring_buffer_unlock_commit(buffer, event);
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
}

void trace_graph_return(struct ftrace_graph_ret *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
	data = tr->data[cpu];
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		__trace_graph_return(tr, trace, flags, pc);
	}
	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

309 310 311 312 313 314 315 316 317
void set_graph_array(struct trace_array *tr)
{
	graph_array = tr;

	/* Make graph_array visible before we start tracing */

	smp_mb();
}

318 319 320 321 322 323 324 325 326
void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
{
	if (tracing_thresh &&
	    (trace->rettime - trace->calltime < tracing_thresh))
		return;
	else
		trace_graph_return(trace);
}

327 328
static int graph_trace_init(struct trace_array *tr)
{
329 330
	int ret;

331
	set_graph_array(tr);
332 333 334 335 336 337
	if (tracing_thresh)
		ret = register_ftrace_graph(&trace_graph_thresh_return,
					    &trace_graph_thresh_entry);
	else
		ret = register_ftrace_graph(&trace_graph_return,
					    &trace_graph_entry);
338 339 340 341 342
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
343 344 345 346
}

static void graph_trace_reset(struct trace_array *tr)
{
347 348
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
349 350
}

351
static int max_bytes_for_cpu;
352 353 354 355 356 357

static enum print_line_t
print_graph_cpu(struct trace_seq *s, int cpu)
{
	int ret;

358 359 360 361 362
	/*
	 * Start with a space character - to make it stand out
	 * to the right a bit when trace output is pasted into
	 * email:
	 */
363
	ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
364
	if (!ret)
365 366
		return TRACE_TYPE_PARTIAL_LINE;

367 368 369
	return TRACE_TYPE_HANDLED;
}

370 371 372 373 374
#define TRACE_GRAPH_PROCINFO_LENGTH	14

static enum print_line_t
print_graph_proc(struct trace_seq *s, pid_t pid)
{
375
	char comm[TASK_COMM_LEN];
376 377
	/* sign + log10(MAX_INT) + '\0' */
	char pid_str[11];
378 379 380 381
	int spaces = 0;
	int ret;
	int len;
	int i;
382

383
	trace_find_cmdline(pid, comm);
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
	comm[7] = '\0';
	sprintf(pid_str, "%d", pid);

	/* 1 stands for the "-" character */
	len = strlen(comm) + strlen(pid_str) + 1;

	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;

	/* First spaces to align center */
	for (i = 0; i < spaces / 2; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Last spaces to align center */
	for (i = 0; i < spaces - (spaces / 2); i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
	return TRACE_TYPE_HANDLED;
}

413

414 415 416
static enum print_line_t
print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
417
	if (!trace_seq_putc(s, ' '))
418 419
		return 0;

420
	return trace_print_lat_fmt(s, entry);
421 422
}

423
/* If the pid changed since the last trace, output this event */
424
static enum print_line_t
425
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
426
{
427
	pid_t prev_pid;
428
	pid_t *last_pid;
429
	int ret;
430

431
	if (!data)
432 433
		return TRACE_TYPE_HANDLED;

434
	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
435 436

	if (*last_pid == pid)
437
		return TRACE_TYPE_HANDLED;
438

439 440
	prev_pid = *last_pid;
	*last_pid = pid;
441

442 443
	if (prev_pid == -1)
		return TRACE_TYPE_HANDLED;
444 445 446 447 448 449 450 451 452
/*
 * Context-switch trace line:

 ------------------------------------------
 | 1)  migration/0--1  =>  sshd-1755
 ------------------------------------------

 */
	ret = trace_seq_printf(s,
453
		" ------------------------------------------\n");
454
	if (!ret)
455
		return TRACE_TYPE_PARTIAL_LINE;
456 457 458

	ret = print_graph_cpu(s, cpu);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
459
		return TRACE_TYPE_PARTIAL_LINE;
460 461 462

	ret = print_graph_proc(s, prev_pid);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
463
		return TRACE_TYPE_PARTIAL_LINE;
464 465 466

	ret = trace_seq_printf(s, " => ");
	if (!ret)
467
		return TRACE_TYPE_PARTIAL_LINE;
468 469 470

	ret = print_graph_proc(s, pid);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
471
		return TRACE_TYPE_PARTIAL_LINE;
472 473 474 475

	ret = trace_seq_printf(s,
		"\n ------------------------------------------\n\n");
	if (!ret)
476
		return TRACE_TYPE_PARTIAL_LINE;
477

478
	return TRACE_TYPE_HANDLED;
479 480
}

481 482
static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
483 484
		struct ftrace_graph_ent_entry *curr)
{
485 486
	struct fgraph_data *data = iter->private;
	struct ring_buffer_iter *ring_iter = NULL;
487 488 489
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *next;

490 491 492 493 494 495 496 497
	/*
	 * If the previous output failed to write to the seq buffer,
	 * then we just reuse the data from before.
	 */
	if (data && data->failed) {
		curr = &data->ent;
		next = &data->ret;
	} else {
498

499 500 501 502 503 504 505 506 507 508
		ring_iter = iter->buffer_iter[iter->cpu];

		/* First peek to compare current entry and the next one */
		if (ring_iter)
			event = ring_buffer_iter_peek(ring_iter, NULL);
		else {
			/*
			 * We need to consume the current entry to see
			 * the next one.
			 */
509 510
			ring_buffer_consume(iter->tr->buffer, iter->cpu,
					    NULL, NULL);
511
			event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
512
						 NULL, NULL);
513
		}
514

515 516 517 518
		if (!event)
			return NULL;

		next = ring_buffer_event_data(event);
519

520 521 522 523 524 525
		if (data) {
			/*
			 * Save current and next entries for later reference
			 * if the output fails.
			 */
			data->ent = *curr;
526 527 528 529 530 531 532 533 534
			/*
			 * If the next event is not a return type, then
			 * we only care about what type it is. Otherwise we can
			 * safely copy the entire event.
			 */
			if (next->ent.type == TRACE_GRAPH_RET)
				data->ret = *next;
			else
				data->ret.ent.type = next->ent.type;
535 536
		}
	}
537 538

	if (next->ent.type != TRACE_GRAPH_RET)
539
		return NULL;
540 541 542

	if (curr->ent.pid != next->ent.pid ||
			curr->graph_ent.func != next->ret.func)
543
		return NULL;
544

545 546 547 548 549
	/* this is a leaf, now advance the iterator */
	if (ring_iter)
		ring_buffer_read(ring_iter, NULL);

	return next;
550 551
}

552 553
/* Signal a overhead of time execution to the output */
static int
554 555
print_graph_overhead(unsigned long long duration, struct trace_seq *s,
		     u32 flags)
556 557
{
	/* If duration disappear, we don't need anything */
558
	if (!(flags & TRACE_GRAPH_PRINT_DURATION))
559 560 561 562 563 564
		return 1;

	/* Non nested entry or return */
	if (duration == -1)
		return trace_seq_printf(s, "  ");

565
	if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
566 567 568 569 570 571 572 573 574 575 576 577
		/* Duration exceeded 100 msecs */
		if (duration > 100000ULL)
			return trace_seq_printf(s, "! ");

		/* Duration exceeded 10 msecs */
		if (duration > 10000ULL)
			return trace_seq_printf(s, "+ ");
	}

	return trace_seq_printf(s, "  ");
}

578 579 580 581 582 583 584 585 586 587 588
static int print_graph_abs_time(u64 t, struct trace_seq *s)
{
	unsigned long usecs_rem;

	usecs_rem = do_div(t, NSEC_PER_SEC);
	usecs_rem /= 1000;

	return trace_seq_printf(s, "%5lu.%06lu |  ",
			(unsigned long)t, usecs_rem);
}

589
static enum print_line_t
590
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
591
		enum trace_type type, int cpu, pid_t pid, u32 flags)
592 593
{
	int ret;
594
	struct trace_seq *s = &iter->seq;
595 596 597 598 599

	if (addr < (unsigned long)__irqentry_text_start ||
		addr >= (unsigned long)__irqentry_text_end)
		return TRACE_TYPE_UNHANDLED;

600
	/* Absolute time */
601
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
602 603 604 605 606
		ret = print_graph_abs_time(iter->ts, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

607
	/* Cpu */
608
	if (flags & TRACE_GRAPH_PRINT_CPU) {
609 610 611 612
		ret = print_graph_cpu(s, cpu);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
613

614
	/* Proc */
615
	if (flags & TRACE_GRAPH_PRINT_PROC) {
616 617 618 619 620 621 622
		ret = print_graph_proc(s, pid);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
		ret = trace_seq_printf(s, " | ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
623

624
	/* No overhead */
625
	ret = print_graph_overhead(-1, s, flags);
626 627
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
628

629 630 631 632 633 634 635 636 637
	if (type == TRACE_GRAPH_ENT)
		ret = trace_seq_printf(s, "==========>");
	else
		ret = trace_seq_printf(s, "<==========");

	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Don't close the duration column if haven't one */
638
	if (flags & TRACE_GRAPH_PRINT_DURATION)
639 640
		trace_seq_printf(s, " |");
	ret = trace_seq_printf(s, "\n");
641 642 643 644 645

	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
	return TRACE_TYPE_HANDLED;
}
646

647 648
enum print_line_t
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
649 650
{
	unsigned long nsecs_rem = do_div(duration, 1000);
651 652 653 654 655 656 657 658 659
	/* log10(ULONG_MAX) + '\0' */
	char msecs_str[21];
	char nsecs_str[5];
	int ret, len;
	int i;

	sprintf(msecs_str, "%lu", (unsigned long) duration);

	/* Print msecs */
660
	ret = trace_seq_printf(s, "%s", msecs_str);
661 662 663 664 665 666 667
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	len = strlen(msecs_str);

	/* Print nsecs (we don't want to exceed 7 numbers) */
	if (len < 7) {
668 669
		snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu",
			 nsecs_rem);
670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
		ret = trace_seq_printf(s, ".%s", nsecs_str);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
		len += strlen(nsecs_str);
	}

	ret = trace_seq_printf(s, " us ");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Print remaining spaces to fit the row's width */
	for (i = len; i < 7; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
686 687 688 689 690 691 692 693 694 695 696
	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
print_graph_duration(unsigned long long duration, struct trace_seq *s)
{
	int ret;

	ret = trace_print_graph_duration(duration, s);
	if (ret != TRACE_TYPE_HANDLED)
		return ret;
697 698 699 700 701

	ret = trace_seq_printf(s, "|  ");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

702
	return TRACE_TYPE_HANDLED;
703 704 705
}

/* Case of a leaf function on its call entry */
706
static enum print_line_t
707
print_graph_entry_leaf(struct trace_iterator *iter,
708
		struct ftrace_graph_ent_entry *entry,
709 710
		struct ftrace_graph_ret_entry *ret_entry,
		struct trace_seq *s, u32 flags)
711
{
712
	struct fgraph_data *data = iter->private;
713 714 715
	struct ftrace_graph_ret *graph_ret;
	struct ftrace_graph_ent *call;
	unsigned long long duration;
716
	int ret;
717
	int i;
718

719 720 721 722
	graph_ret = &ret_entry->ret;
	call = &entry->graph_ent;
	duration = graph_ret->rettime - graph_ret->calltime;

723
	if (data) {
724
		struct fgraph_cpu_data *cpu_data;
725
		int cpu = iter->cpu;
726 727

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
728 729 730 731 732 733

		/*
		 * Comments display at + 1 to depth. Since
		 * this is a leaf function, keep the comments
		 * equal to this depth.
		 */
734 735 736 737 738
		cpu_data->depth = call->depth - 1;

		/* No need to keep this function around for this depth */
		if (call->depth < FTRACE_RETFUNC_DEPTH)
			cpu_data->enter_funcs[call->depth] = 0;
739 740
	}

741
	/* Overhead */
742
	ret = print_graph_overhead(duration, s, flags);
743 744
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
745 746

	/* Duration */
747
	if (flags & TRACE_GRAPH_PRINT_DURATION) {
748 749 750 751
		ret = print_graph_duration(duration, s);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
752

753 754 755 756 757 758 759
	/* Function */
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

760
	ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
761 762 763 764 765 766 767
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
768 769
print_graph_entry_nested(struct trace_iterator *iter,
			 struct ftrace_graph_ent_entry *entry,
770
			 struct trace_seq *s, int cpu, u32 flags)
771 772
{
	struct ftrace_graph_ent *call = &entry->graph_ent;
773 774 775 776 777
	struct fgraph_data *data = iter->private;
	int ret;
	int i;

	if (data) {
778
		struct fgraph_cpu_data *cpu_data;
779 780
		int cpu = iter->cpu;

781 782 783 784 785 786
		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
		cpu_data->depth = call->depth;

		/* Save this function pointer to see if the exit matches */
		if (call->depth < FTRACE_RETFUNC_DEPTH)
			cpu_data->enter_funcs[call->depth] = call->func;
787
	}
788 789

	/* No overhead */
790
	ret = print_graph_overhead(-1, s, flags);
791 792
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
793

794
	/* No time */
795
	if (flags & TRACE_GRAPH_PRINT_DURATION) {
796 797 798 799 800
		ret = trace_seq_printf(s, "            |  ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

801
	/* Function */
802 803
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
804 805
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
806 807
	}

808
	ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
809 810 811
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

812 813 814 815 816
	/*
	 * we already consumed the current entry to check the next one
	 * and see if this is a leaf.
	 */
	return TRACE_TYPE_NO_CONSUME;
817 818
}

819
static enum print_line_t
820
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
821
		     int type, unsigned long addr, u32 flags)
822
{
823
	struct fgraph_data *data = iter->private;
824
	struct trace_entry *ent = iter->ent;
825 826
	int cpu = iter->cpu;
	int ret;
827

828
	/* Pid */
829
	if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
830 831
		return TRACE_TYPE_PARTIAL_LINE;

832 833
	if (type) {
		/* Interrupt */
834
		ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
835 836 837
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
838

839
	/* Absolute time */
840
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
841 842 843 844 845
		ret = print_graph_abs_time(iter->ts, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

846
	/* Cpu */
847
	if (flags & TRACE_GRAPH_PRINT_CPU) {
848
		ret = print_graph_cpu(s, cpu);
849 850 851 852 853
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Proc */
854
	if (flags & TRACE_GRAPH_PRINT_PROC) {
855
		ret = print_graph_proc(s, ent->pid);
856 857 858 859
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;

		ret = trace_seq_printf(s, " | ");
860 861 862
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
863

864 865 866 867 868 869 870
	/* Latency format */
	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
		ret = print_graph_lat_fmt(s, ent);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}

871 872 873
	return 0;
}

874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959
/*
 * Entry check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
 *  - we just extered irq code
 *
 * retunns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_entry(struct trace_iterator *iter, u32 flags,
		unsigned long addr, int depth)
{
	int cpu = iter->cpu;
	struct fgraph_data *data = iter->private;
	int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

	if (flags & TRACE_GRAPH_PRINT_IRQS)
		return 0;

	/*
	 * We are inside the irq code
	 */
	if (*depth_irq >= 0)
		return 1;

	if ((addr < (unsigned long)__irqentry_text_start) ||
	    (addr >= (unsigned long)__irqentry_text_end))
		return 0;

	/*
	 * We are entering irq code.
	 */
	*depth_irq = depth;
	return 1;
}

/*
 * Return check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
 *  - we just left irq code
 *
 * returns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
{
	int cpu = iter->cpu;
	struct fgraph_data *data = iter->private;
	int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

	if (flags & TRACE_GRAPH_PRINT_IRQS)
		return 0;

	/*
	 * We are not inside the irq code.
	 */
	if (*depth_irq == -1)
		return 0;

	/*
	 * We are inside the irq code, and this is returning entry.
	 * Let's not trace it and clear the entry depth, since
	 * we are out of irq code.
	 *
	 * This condition ensures that we 'leave the irq code' once
	 * we are out of the entry depth. Thus protecting us from
	 * the RETURN entry loss.
	 */
	if (*depth_irq >= depth) {
		*depth_irq = -1;
		return 1;
	}

	/*
	 * We are inside the irq code, and this is not the entry.
	 */
	return 1;
}

960 961
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
962
			struct trace_iterator *iter, u32 flags)
963
{
964
	struct fgraph_data *data = iter->private;
965 966
	struct ftrace_graph_ent *call = &field->graph_ent;
	struct ftrace_graph_ret_entry *leaf_ret;
967 968
	static enum print_line_t ret;
	int cpu = iter->cpu;
969

970 971 972
	if (check_irq_entry(iter, flags, call->func, call->depth))
		return TRACE_TYPE_HANDLED;

973
	if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
974 975
		return TRACE_TYPE_PARTIAL_LINE;

976 977
	leaf_ret = get_return_for_leaf(iter, field);
	if (leaf_ret)
978
		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
979
	else
980
		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
981

982 983 984 985 986 987 988 989 990 991 992 993 994
	if (data) {
		/*
		 * If we failed to write our output, then we need to make
		 * note of it. Because we already consumed our entry.
		 */
		if (s->full) {
			data->failed = 1;
			data->cpu = cpu;
		} else
			data->failed = 0;
	}

	return ret;
995 996
}

997 998
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
999 1000
		   struct trace_entry *ent, struct trace_iterator *iter,
		   u32 flags)
1001
{
1002
	unsigned long long duration = trace->rettime - trace->calltime;
1003 1004 1005
	struct fgraph_data *data = iter->private;
	pid_t pid = ent->pid;
	int cpu = iter->cpu;
1006
	int func_match = 1;
1007 1008 1009
	int ret;
	int i;

1010 1011 1012
	if (check_irq_return(iter, flags, trace->depth))
		return TRACE_TYPE_HANDLED;

1013
	if (data) {
1014 1015 1016 1017
		struct fgraph_cpu_data *cpu_data;
		int cpu = iter->cpu;

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1018 1019 1020 1021 1022 1023

		/*
		 * Comments display at + 1 to depth. This is the
		 * return from a function, we now want the comments
		 * to display at the same level of the bracket.
		 */
1024 1025 1026 1027 1028 1029 1030
		cpu_data->depth = trace->depth - 1;

		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
			if (cpu_data->enter_funcs[trace->depth] != trace->func)
				func_match = 0;
			cpu_data->enter_funcs[trace->depth] = 0;
		}
1031
	}
1032

1033
	if (print_graph_prologue(iter, s, 0, 0, flags))
1034 1035
		return TRACE_TYPE_PARTIAL_LINE;

1036
	/* Overhead */
1037
	ret = print_graph_overhead(duration, s, flags);
1038 1039
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
1040 1041

	/* Duration */
1042
	if (flags & TRACE_GRAPH_PRINT_DURATION) {
1043 1044 1045 1046
		ret = print_graph_duration(duration, s);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
1047 1048

	/* Closing brace */
1049 1050
	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
1051 1052
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
1053 1054
	}

1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
	/*
	 * If the return function does not have a matching entry,
	 * then the entry was lost. Instead of just printing
	 * the '}' and letting the user guess what function this
	 * belongs to, write out the function name.
	 */
	if (func_match) {
		ret = trace_seq_printf(s, "}\n");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	} else {
1066
		ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1067 1068 1069
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
1070

1071
	/* Overrun */
1072
	if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1073 1074
		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
					trace->overrun);
1075 1076
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
1077
	}
1078

1079 1080
	ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
			      cpu, pid, flags);
1081 1082 1083
	if (ret == TRACE_TYPE_PARTIAL_LINE)
		return TRACE_TYPE_PARTIAL_LINE;

1084 1085 1086
	return TRACE_TYPE_HANDLED;
}

1087
static enum print_line_t
1088 1089
print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
		    struct trace_iterator *iter, u32 flags)
1090
{
1091
	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1092
	struct fgraph_data *data = iter->private;
1093
	struct trace_event *event;
1094
	int depth = 0;
1095
	int ret;
1096 1097 1098
	int i;

	if (data)
1099
		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1100

1101
	if (print_graph_prologue(iter, s, 0, 0, flags))
1102 1103
		return TRACE_TYPE_PARTIAL_LINE;

1104
	/* No overhead */
1105
	ret = print_graph_overhead(-1, s, flags);
1106 1107 1108 1109
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* No time */
1110
	if (flags & TRACE_GRAPH_PRINT_DURATION) {
1111
		ret = trace_seq_printf(s, "            |  ");
1112 1113 1114 1115 1116
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Indentation */
1117 1118
	if (depth > 0)
		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1119 1120 1121 1122 1123 1124
			ret = trace_seq_printf(s, " ");
			if (!ret)
				return TRACE_TYPE_PARTIAL_LINE;
		}

	/* The comment */
1125 1126 1127 1128
	ret = trace_seq_printf(s, "/* ");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
	switch (iter->ent->type) {
	case TRACE_BPRINT:
		ret = trace_print_bprintk_msg_only(iter);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
		break;
	case TRACE_PRINT:
		ret = trace_print_printk_msg_only(iter);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
		break;
	default:
		event = ftrace_find_event(ent->type);
		if (!event)
			return TRACE_TYPE_UNHANDLED;

1145
		ret = event->funcs->trace(iter, sym_flags, event);
1146 1147 1148
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
	}
1149

1150 1151 1152 1153 1154 1155
	/* Strip ending newline */
	if (s->buffer[s->len - 1] == '\n') {
		s->buffer[s->len - 1] = '\0';
		s->len--;
	}

1156 1157 1158 1159 1160 1161 1162 1163
	ret = trace_seq_printf(s, " */\n");
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}


1164
enum print_line_t
1165
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1166
{
1167 1168
	struct ftrace_graph_ent_entry *field;
	struct fgraph_data *data = iter->private;
1169
	struct trace_entry *entry = iter->ent;
1170
	struct trace_seq *s = &iter->seq;
1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185
	int cpu = iter->cpu;
	int ret;

	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
		return TRACE_TYPE_HANDLED;
	}

	/*
	 * If the last output failed, there's a possibility we need
	 * to print out the missing entry which would never go out.
	 */
	if (data && data->failed) {
		field = &data->ent;
		iter->cpu = data->cpu;
1186
		ret = print_graph_entry(field, s, iter, flags);
1187 1188 1189 1190 1191 1192 1193
		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
			ret = TRACE_TYPE_NO_CONSUME;
		}
		iter->cpu = cpu;
		return ret;
	}
1194

1195 1196
	switch (entry->type) {
	case TRACE_GRAPH_ENT: {
1197 1198 1199 1200 1201 1202
		/*
		 * print_graph_entry() may consume the current event,
		 * thus @field may become invalid, so we need to save it.
		 * sizeof(struct ftrace_graph_ent_entry) is very small,
		 * it can be safely saved at the stack.
		 */
1203
		struct ftrace_graph_ent_entry saved;
1204
		trace_assign_type(field, entry);
1205
		saved = *field;
1206
		return print_graph_entry(&saved, s, iter, flags);
1207 1208 1209 1210
	}
	case TRACE_GRAPH_RET: {
		struct ftrace_graph_ret_entry *field;
		trace_assign_type(field, entry);
1211
		return print_graph_return(&field->ret, s, entry, iter, flags);
1212
	}
1213 1214 1215 1216 1217
	case TRACE_STACK:
	case TRACE_FN:
		/* dont trace stack and functions as comments */
		return TRACE_TYPE_UNHANDLED;

1218
	default:
1219
		return print_graph_comment(s, entry, iter, flags);
1220
	}
1221 1222

	return TRACE_TYPE_HANDLED;
1223 1224
}

1225 1226 1227 1228 1229 1230
static enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
	return print_graph_function_flags(iter, tracer_flags.val);
}

1231
static enum print_line_t
1232 1233
print_graph_function_event(struct trace_iterator *iter, int flags,
			   struct trace_event *event)
1234 1235 1236 1237
{
	return print_graph_function(iter);
}

1238
static void print_lat_header(struct seq_file *s, u32 flags)
1239 1240 1241 1242 1243 1244
{
	static const char spaces[] = "                "	/* 16 spaces */
		"    "					/* 4 spaces */
		"                 ";			/* 17 spaces */
	int size = 0;

1245
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1246
		size += 16;
1247
	if (flags & TRACE_GRAPH_PRINT_CPU)
1248
		size += 4;
1249
	if (flags & TRACE_GRAPH_PRINT_PROC)
1250 1251 1252 1253 1254 1255
		size += 17;

	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1256 1257
	seq_printf(s, "#%.*s||| / _-=> lock-depth      \n", size, spaces);
	seq_printf(s, "#%.*s|||| /                     \n", size, spaces);
1258 1259
}

1260
void print_graph_headers_flags(struct seq_file *s, u32 flags)
1261
{
1262 1263 1264
	int lat = trace_flags & TRACE_ITER_LATENCY_FMT;

	if (lat)
1265
		print_lat_header(s, flags);
1266

1267
	/* 1st line */
1268
	seq_printf(s, "#");
1269
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1270
		seq_printf(s, "     TIME       ");
1271
	if (flags & TRACE_GRAPH_PRINT_CPU)
1272
		seq_printf(s, " CPU");
1273
	if (flags & TRACE_GRAPH_PRINT_PROC)
1274 1275
		seq_printf(s, "  TASK/PID       ");
	if (lat)
1276
		seq_printf(s, "|||||");
1277
	if (flags & TRACE_GRAPH_PRINT_DURATION)
1278 1279
		seq_printf(s, "  DURATION   ");
	seq_printf(s, "               FUNCTION CALLS\n");
1280 1281

	/* 2nd line */
1282
	seq_printf(s, "#");
1283
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1284
		seq_printf(s, "      |         ");
1285
	if (flags & TRACE_GRAPH_PRINT_CPU)
1286
		seq_printf(s, " |  ");
1287
	if (flags & TRACE_GRAPH_PRINT_PROC)
1288 1289
		seq_printf(s, "   |    |        ");
	if (lat)
1290
		seq_printf(s, "|||||");
1291
	if (flags & TRACE_GRAPH_PRINT_DURATION)
1292 1293
		seq_printf(s, "   |   |      ");
	seq_printf(s, "               |   |   |   |\n");
1294
}
1295

1296
void print_graph_headers(struct seq_file *s)
1297 1298 1299 1300
{
	print_graph_headers_flags(s, tracer_flags.val);
}

1301
void graph_trace_open(struct trace_iterator *iter)
1302
{
1303
	/* pid and depth on the last trace processed */
1304
	struct fgraph_data *data;
1305 1306
	int cpu;

1307 1308 1309
	iter->private = NULL;

	data = kzalloc(sizeof(*data), GFP_KERNEL);
1310
	if (!data)
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
		goto out_err;

	data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
	if (!data->cpu_data)
		goto out_err_free;

	for_each_possible_cpu(cpu) {
		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1321 1322
		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

1323 1324 1325
		*pid = -1;
		*depth = 0;
		*ignore = 0;
1326
		*depth_irq = -1;
1327
	}
1328

1329
	iter->private = data;
1330 1331 1332 1333 1334 1335 1336

	return;

 out_err_free:
	kfree(data);
 out_err:
	pr_warning("function graph tracer: not enough memory\n");
1337 1338
}

1339
void graph_trace_close(struct trace_iterator *iter)
1340
{
1341 1342 1343 1344 1345 1346
	struct fgraph_data *data = iter->private;

	if (data) {
		free_percpu(data->cpu_data);
		kfree(data);
	}
1347 1348
}

1349 1350 1351 1352 1353 1354 1355 1356
static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
{
	if (bit == TRACE_GRAPH_PRINT_IRQS)
		ftrace_graph_skip_irqs = !set;

	return 0;
}

1357 1358 1359 1360
static struct trace_event_functions graph_functions = {
	.trace		= print_graph_function_event,
};

1361 1362
static struct trace_event graph_trace_entry_event = {
	.type		= TRACE_GRAPH_ENT,
1363
	.funcs		= &graph_functions,
1364 1365 1366 1367
};

static struct trace_event graph_trace_ret_event = {
	.type		= TRACE_GRAPH_RET,
1368
	.funcs		= &graph_functions
1369 1370
};

1371
static struct tracer graph_trace __read_mostly = {
1372
	.name		= "function_graph",
1373
	.open		= graph_trace_open,
1374
	.pipe_open	= graph_trace_open,
1375
	.close		= graph_trace_close,
1376
	.pipe_close	= graph_trace_close,
1377
	.wait_pipe	= poll_wait_pipe,
1378 1379
	.init		= graph_trace_init,
	.reset		= graph_trace_reset,
1380 1381
	.print_line	= print_graph_function,
	.print_header	= print_graph_headers,
1382
	.flags		= &tracer_flags,
1383
	.set_flag	= func_graph_set_flag,
1384 1385 1386
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest	= trace_selftest_startup_function_graph,
#endif
1387 1388 1389 1390
};

static __init int init_graph_trace(void)
{
1391 1392
	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);

1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
	if (!register_ftrace_event(&graph_trace_entry_event)) {
		pr_warning("Warning: could not register graph trace events\n");
		return 1;
	}

	if (!register_ftrace_event(&graph_trace_ret_event)) {
		pr_warning("Warning: could not register graph trace events\n");
		return 1;
	}

1403 1404 1405 1406
	return register_tracer(&graph_trace);
}

device_initcall(init_graph_trace);