trace_functions_graph.c 38.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4
/*
 *
 * Function graph tracer.
5
 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6 7 8 9 10 11
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/uaccess.h>
#include <linux/ftrace.h>
12
#include <linux/interrupt.h>
13
#include <linux/slab.h>
14 15 16
#include <linux/fs.h>

#include "trace.h"
17
#include "trace_output.h"
18

19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
static bool kill_ftrace_graph;

/**
 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
 *
 * ftrace_graph_stop() is called when a severe error is detected in
 * the function graph tracing. This function is called by the critical
 * paths of function graph to keep those paths from doing any more harm.
 */
bool ftrace_graph_is_dead(void)
{
	return kill_ftrace_graph;
}

/**
 * ftrace_graph_stop - set to permanently disable function graph tracincg
 *
 * In case of an error int function graph tracing, this is called
 * to try to keep function graph tracing from causing any more harm.
 * Usually this is pretty severe and this is called to try to at least
 * get a warning out to the user.
 */
void ftrace_graph_stop(void)
{
	kill_ftrace_graph = true;
}

46 47 48
/* When set, irq functions will be ignored */
static int ftrace_graph_skip_irqs;

49
struct fgraph_cpu_data {
50 51
	pid_t		last_pid;
	int		depth;
52
	int		depth_irq;
53
	int		ignore;
54
	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
55 56 57
};

struct fgraph_data {
58
	struct fgraph_cpu_data __percpu *cpu_data;
59 60 61 62 63 64

	/* Place to preserve last processed entry. */
	struct ftrace_graph_ent_entry	ent;
	struct ftrace_graph_ret_entry	ret;
	int				failed;
	int				cpu;
65 66
};

67
#define TRACE_GRAPH_INDENT	2
68

69
unsigned int fgraph_max_depth;
70

71
static struct tracer_opt trace_opts[] = {
72
	/* Display overruns? (for self-debug purpose) */
73 74 75 76 77
	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	/* Display CPU ? */
	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
	/* Display Overhead ? */
	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
78 79
	/* Display proc name/pid */
	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
80 81 82 83
	/* Display duration of execution */
	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
	/* Display absolute time of an entry */
	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
84 85
	/* Display interrupts */
	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
86 87
	/* Display function name after trailing } */
	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
88 89 90 91
	/* Include sleep time (scheduled out) between entry and return */
	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
	/* Include time within nested functions */
	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
92 93 94 95
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
96
	/* Don't display overruns, proc, or tail by default */
97
	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
98 99
	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
100 101 102
	.opts = trace_opts
};

103
static struct trace_array *graph_array;
104

105 106 107 108 109 110
/*
 * DURATION column is being also used to display IRQ signs,
 * following values are used by print_graph_irq and others
 * to fill in space into DURATION column.
 */
enum {
111 112 113
	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
114 115
};

116
static void
117 118
print_graph_duration(struct trace_array *tr, unsigned long long duration,
		     struct trace_seq *s, u32 flags);
119

120 121
/* Add a function return address to the trace stack on thread info.*/
int
122
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
123
			 unsigned long frame_pointer, unsigned long *retp)
124
{
125
	unsigned long long calltime;
126 127
	int index;

128 129 130
	if (unlikely(ftrace_graph_is_dead()))
		return -EBUSY;

131 132 133
	if (!current->ret_stack)
		return -EBUSY;

134 135 136 137 138 139
	/*
	 * We must make sure the ret_stack is tested before we read
	 * anything else.
	 */
	smp_rmb();

140 141 142 143 144 145
	/* The return trace stack is full */
	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
		atomic_inc(&current->trace_overrun);
		return -EBUSY;
	}

146 147 148 149 150 151 152 153 154 155 156 157 158 159
	/*
	 * The curr_ret_stack is an index to ftrace return stack of
	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
	 * DEPTH) when the function graph tracer is used.  To support
	 * filtering out specific functions, it makes the index
	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
	 * so when it sees a negative index the ftrace will ignore
	 * the record.  And the index gets recovered when returning
	 * from the filtered function by adding the FTRACE_NOTRACE_
	 * DEPTH and then it'll continue to record functions normally.
	 *
	 * The curr_ret_stack is initialized to -1 and get increased
	 * in this function.  So it can be less than -1 only if it was
	 * filtered out via ftrace_graph_notrace_addr() which can be
160
	 * set from set_graph_notrace file in tracefs by user.
161 162 163 164
	 */
	if (current->curr_ret_stack < -1)
		return -EBUSY;

165 166
	calltime = trace_clock_local();

167
	index = ++current->curr_ret_stack;
168 169
	if (ftrace_graph_notrace_addr(func))
		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
170 171 172
	barrier();
	current->ret_stack[index].ret = ret;
	current->ret_stack[index].func = func;
173
	current->ret_stack[index].calltime = calltime;
174
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
175
	current->ret_stack[index].fp = frame_pointer;
176 177 178
#endif
#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
	current->ret_stack[index].retp = retp;
179
#endif
180
	*depth = current->curr_ret_stack;
181 182 183 184

	return 0;
}

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
int function_graph_enter(unsigned long ret, unsigned long func,
			 unsigned long frame_pointer, unsigned long *retp)
{
	struct ftrace_graph_ent trace;

	trace.func = func;
	trace.depth = current->curr_ret_stack + 1;

	/* Only trace if the calling function expects to */
	if (!ftrace_graph_entry(&trace))
		return -EBUSY;

	return ftrace_push_return_trace(ret, func, &trace.depth,
					frame_pointer, retp);
}

201
/* Retrieve a function return address to the trace stack on thread info.*/
202
static void
203 204
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
			unsigned long frame_pointer)
205 206 207 208 209
{
	int index;

	index = current->curr_ret_stack;

210 211 212 213 214 215 216 217 218 219 220
	/*
	 * A negative index here means that it's just returned from a
	 * notrace'd function.  Recover index to get an original
	 * return address.  See ftrace_push_return_trace().
	 *
	 * TODO: Need to check whether the stack gets corrupted.
	 */
	if (index < 0)
		index += FTRACE_NOTRACE_DEPTH;

	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
221 222 223 224 225 226 227
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic, otherwise we have no where to go */
		*ret = (unsigned long)panic;
		return;
	}

228
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
229 230 231 232 233 234 235 236 237 238
	/*
	 * The arch may choose to record the frame pointer used
	 * and check it here to make sure that it is what we expect it
	 * to be. If gcc does not set the place holder of the return
	 * address in the frame pointer, and does a copy instead, then
	 * the function graph trace will fail. This test detects this
	 * case.
	 *
	 * Currently, x86_32 with optimize for size (-Os) makes the latest
	 * gcc do the above.
239 240 241
	 *
	 * Note, -mfentry does not use frame pointers, and this test
	 *  is not needed if CC_USING_FENTRY is set.
242 243 244 245
	 */
	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
		ftrace_graph_stop();
		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
246
		     "  from func %ps return to %lx\n",
247 248 249 250 251 252 253 254 255
		     current->ret_stack[index].fp,
		     frame_pointer,
		     (void *)current->ret_stack[index].func,
		     current->ret_stack[index].ret);
		*ret = (unsigned long)panic;
		return;
	}
#endif

256 257 258 259 260 261 262 263 264 265 266
	*ret = current->ret_stack[index].ret;
	trace->func = current->ret_stack[index].func;
	trace->calltime = current->ret_stack[index].calltime;
	trace->overrun = atomic_read(&current->trace_overrun);
	trace->depth = index;
}

/*
 * Send the trace to the ring-buffer.
 * @return the original return address.
 */
267
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
268 269 270 271
{
	struct ftrace_graph_ret trace;
	unsigned long ret;

272
	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
273
	trace.rettime = trace_clock_local();
274 275
	barrier();
	current->curr_ret_stack--;
276 277 278 279 280 281 282 283 284
	/*
	 * The curr_ret_stack can be less than -1 only if it was
	 * filtered out and it's about to return from the function.
	 * Recover the index and continue to trace normal functions.
	 */
	if (current->curr_ret_stack < -1) {
		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
		return ret;
	}
285

286 287 288 289 290 291 292
	/*
	 * The trace should run after decrementing the ret counter
	 * in case an interrupt were to come in. We don't want to
	 * lose the interrupt if max_depth is set.
	 */
	ftrace_graph_return(&trace);

293 294 295 296 297 298 299 300 301 302
	if (unlikely(!ret)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic. What else to do? */
		ret = (unsigned long)panic;
	}

	return ret;
}

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
/**
 * ftrace_graph_ret_addr - convert a potentially modified stack return address
 *			   to its original value
 *
 * This function can be called by stack unwinding code to convert a found stack
 * return address ('ret') to its original value, in case the function graph
 * tracer has modified it to be 'return_to_handler'.  If the address hasn't
 * been modified, the unchanged value of 'ret' is returned.
 *
 * 'idx' is a state variable which should be initialized by the caller to zero
 * before the first call.
 *
 * 'retp' is a pointer to the return address on the stack.  It's ignored if
 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
 */
#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
				    unsigned long ret, unsigned long *retp)
{
	int index = task->curr_ret_stack;
	int i;

	if (ret != (unsigned long)return_to_handler)
		return ret;

	if (index < -1)
		index += FTRACE_NOTRACE_DEPTH;

	if (index < 0)
		return ret;

	for (i = 0; i <= index; i++)
		if (task->ret_stack[i].retp == retp)
			return task->ret_stack[i].ret;

	return ret;
}
#else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
				    unsigned long ret, unsigned long *retp)
{
	int task_idx;

	if (ret != (unsigned long)return_to_handler)
		return ret;

	task_idx = task->curr_ret_stack;

	if (!task->ret_stack || task_idx < *idx)
		return ret;

	task_idx -= *idx;
	(*idx)++;

	return task->ret_stack[task_idx].ret;
}
#endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */

361
int __trace_graph_entry(struct trace_array *tr,
362 363 364 365
				struct ftrace_graph_ent *trace,
				unsigned long flags,
				int pc)
{
366
	struct trace_event_call *call = &event_funcgraph_entry;
367
	struct ring_buffer_event *event;
368
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
369 370
	struct ftrace_graph_ent_entry *entry;

371
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
372 373 374 375 376
					  sizeof(*entry), flags, pc);
	if (!event)
		return 0;
	entry	= ring_buffer_event_data(event);
	entry->graph_ent			= *trace;
377
	if (!call_filter_check_discard(call, entry, buffer, event))
378
		trace_buffer_unlock_commit_nostack(buffer, event);
379 380 381 382

	return 1;
}

383 384
static inline int ftrace_graph_ignore_irqs(void)
{
385
	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
386 387 388 389 390
		return 0;

	return in_irq();
}

391 392 393 394 395 396 397 398 399 400
int trace_graph_entry(struct ftrace_graph_ent *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int ret;
	int cpu;
	int pc;

401
	if (!ftrace_trace_task(tr))
402 403
		return 0;

404 405 406 407
	if (ftrace_graph_ignore_func(trace))
		return 0;

	if (ftrace_graph_ignore_irqs())
408 409
		return 0;

410 411 412 413 414 415 416 417 418 419
	/*
	 * Do not trace a function if it's filtered by set_graph_notrace.
	 * Make the index of ret stack negative to indicate that it should
	 * ignore further functions.  But it needs its own ret stack entry
	 * to recover the original index in order to continue tracing after
	 * returning from the function.
	 */
	if (ftrace_graph_notrace_addr(trace->func))
		return 1;

420 421 422 423 424 425 426
	/*
	 * Stop here if tracing_threshold is set. We only write function return
	 * events to the ring buffer.
	 */
	if (tracing_thresh)
		return 1;

427 428
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
429
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
430 431 432 433 434 435 436 437 438 439 440 441 442 443
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		ret = __trace_graph_entry(tr, trace, flags, pc);
	} else {
		ret = 0;
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);

	return ret;
}

444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
static void
__trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long flags, int pc)
{
	u64 time = trace_clock_local();
	struct ftrace_graph_ent ent = {
		.func  = ip,
		.depth = 0,
	};
	struct ftrace_graph_ret ret = {
		.func     = ip,
		.depth    = 0,
		.calltime = time,
		.rettime  = time,
	};

	__trace_graph_entry(tr, &ent, flags, pc);
	__trace_graph_return(tr, &ret, flags, pc);
}

void
trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long parent_ip,
		unsigned long flags, int pc)
{
	__trace_graph_function(tr, ip, flags, pc);
}

472
void __trace_graph_return(struct trace_array *tr,
473 474 475 476
				struct ftrace_graph_ret *trace,
				unsigned long flags,
				int pc)
{
477
	struct trace_event_call *call = &event_funcgraph_exit;
478
	struct ring_buffer_event *event;
479
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
480 481
	struct ftrace_graph_ret_entry *entry;

482
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
483 484 485 486 487
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->ret				= *trace;
488
	if (!call_filter_check_discard(call, entry, buffer, event))
489
		trace_buffer_unlock_commit_nostack(buffer, event);
490 491 492 493 494 495 496 497 498 499 500 501 502
}

void trace_graph_return(struct ftrace_graph_ret *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
503
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
504 505 506 507 508 509 510 511 512
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		__trace_graph_return(tr, trace, flags, pc);
	}
	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

513 514 515 516 517 518 519 520 521
void set_graph_array(struct trace_array *tr)
{
	graph_array = tr;

	/* Make graph_array visible before we start tracing */

	smp_mb();
}

522
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
523 524 525 526 527 528 529 530
{
	if (tracing_thresh &&
	    (trace->rettime - trace->calltime < tracing_thresh))
		return;
	else
		trace_graph_return(trace);
}

531 532
static int graph_trace_init(struct trace_array *tr)
{
533 534
	int ret;

535
	set_graph_array(tr);
536 537
	if (tracing_thresh)
		ret = register_ftrace_graph(&trace_graph_thresh_return,
538
					    &trace_graph_entry);
539 540 541
	else
		ret = register_ftrace_graph(&trace_graph_return,
					    &trace_graph_entry);
542 543 544 545 546
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
547 548 549 550
}

static void graph_trace_reset(struct trace_array *tr)
{
551 552
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
553 554
}

555
static int graph_trace_update_thresh(struct trace_array *tr)
556 557 558 559 560
{
	graph_trace_reset(tr);
	return graph_trace_init(tr);
}

561
static int max_bytes_for_cpu;
562

563
static void print_graph_cpu(struct trace_seq *s, int cpu)
564
{
565 566 567 568 569
	/*
	 * Start with a space character - to make it stand out
	 * to the right a bit when trace output is pasted into
	 * email:
	 */
570
	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
571 572
}

573 574
#define TRACE_GRAPH_PROCINFO_LENGTH	14

575
static void print_graph_proc(struct trace_seq *s, pid_t pid)
576
{
577
	char comm[TASK_COMM_LEN];
578 579
	/* sign + log10(MAX_INT) + '\0' */
	char pid_str[11];
580 581 582
	int spaces = 0;
	int len;
	int i;
583

584
	trace_find_cmdline(pid, comm);
585 586 587 588 589 590 591 592 593 594
	comm[7] = '\0';
	sprintf(pid_str, "%d", pid);

	/* 1 stands for the "-" character */
	len = strlen(comm) + strlen(pid_str) + 1;

	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;

	/* First spaces to align center */
595 596
	for (i = 0; i < spaces / 2; i++)
		trace_seq_putc(s, ' ');
597

598
	trace_seq_printf(s, "%s-%s", comm, pid_str);
599 600

	/* Last spaces to align center */
601 602
	for (i = 0; i < spaces - (spaces / 2); i++)
		trace_seq_putc(s, ' ');
603 604
}

605

606
static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
607
{
608 609
	trace_seq_putc(s, ' ');
	trace_print_lat_fmt(s, entry);
610 611
}

612
/* If the pid changed since the last trace, output this event */
613
static void
614
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
615
{
616
	pid_t prev_pid;
617
	pid_t *last_pid;
618

619
	if (!data)
620
		return;
621

622
	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
623 624

	if (*last_pid == pid)
625
		return;
626

627 628
	prev_pid = *last_pid;
	*last_pid = pid;
629

630
	if (prev_pid == -1)
631
		return;
632 633 634 635 636 637 638 639
/*
 * Context-switch trace line:

 ------------------------------------------
 | 1)  migration/0--1  =>  sshd-1755
 ------------------------------------------

 */
640 641 642 643 644 645
	trace_seq_puts(s, " ------------------------------------------\n");
	print_graph_cpu(s, cpu);
	print_graph_proc(s, prev_pid);
	trace_seq_puts(s, " => ");
	print_graph_proc(s, pid);
	trace_seq_puts(s, "\n ------------------------------------------\n\n");
646 647
}

648 649
static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
650 651
		struct ftrace_graph_ent_entry *curr)
{
652 653
	struct fgraph_data *data = iter->private;
	struct ring_buffer_iter *ring_iter = NULL;
654 655 656
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *next;

657 658 659 660 661 662 663 664
	/*
	 * If the previous output failed to write to the seq buffer,
	 * then we just reuse the data from before.
	 */
	if (data && data->failed) {
		curr = &data->ent;
		next = &data->ret;
	} else {
665

666
		ring_iter = trace_buffer_iter(iter, iter->cpu);
667 668 669 670 671 672 673 674 675

		/* First peek to compare current entry and the next one */
		if (ring_iter)
			event = ring_buffer_iter_peek(ring_iter, NULL);
		else {
			/*
			 * We need to consume the current entry to see
			 * the next one.
			 */
676
			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
677
					    NULL, NULL);
678
			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
679
						 NULL, NULL);
680
		}
681

682 683 684 685
		if (!event)
			return NULL;

		next = ring_buffer_event_data(event);
686

687 688 689 690 691 692
		if (data) {
			/*
			 * Save current and next entries for later reference
			 * if the output fails.
			 */
			data->ent = *curr;
693 694 695 696 697 698 699 700 701
			/*
			 * If the next event is not a return type, then
			 * we only care about what type it is. Otherwise we can
			 * safely copy the entire event.
			 */
			if (next->ent.type == TRACE_GRAPH_RET)
				data->ret = *next;
			else
				data->ret.ent.type = next->ent.type;
702 703
		}
	}
704 705

	if (next->ent.type != TRACE_GRAPH_RET)
706
		return NULL;
707 708 709

	if (curr->ent.pid != next->ent.pid ||
			curr->graph_ent.func != next->ret.func)
710
		return NULL;
711

712 713 714 715 716
	/* this is a leaf, now advance the iterator */
	if (ring_iter)
		ring_buffer_read(ring_iter, NULL);

	return next;
717 718
}

719
static void print_graph_abs_time(u64 t, struct trace_seq *s)
720 721 722 723 724 725
{
	unsigned long usecs_rem;

	usecs_rem = do_div(t, NSEC_PER_SEC);
	usecs_rem /= 1000;

726 727
	trace_seq_printf(s, "%5lu.%06lu |  ",
			 (unsigned long)t, usecs_rem);
728 729
}

730
static void
731
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
732
		enum trace_type type, int cpu, pid_t pid, u32 flags)
733
{
734
	struct trace_array *tr = iter->tr;
735
	struct trace_seq *s = &iter->seq;
736
	struct trace_entry *ent = iter->ent;
737 738 739

	if (addr < (unsigned long)__irqentry_text_start ||
		addr >= (unsigned long)__irqentry_text_end)
740
		return;
741

742
	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
743
		/* Absolute time */
744 745
		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
			print_graph_abs_time(iter->ts, s);
746

747
		/* Cpu */
748 749
		if (flags & TRACE_GRAPH_PRINT_CPU)
			print_graph_cpu(s, cpu);
750

751 752
		/* Proc */
		if (flags & TRACE_GRAPH_PRINT_PROC) {
753 754
			print_graph_proc(s, pid);
			trace_seq_puts(s, " | ");
755
		}
756 757

		/* Latency format */
758
		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
759
			print_graph_lat_fmt(s, ent);
760
	}
761

762
	/* No overhead */
763
	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
764

765
	if (type == TRACE_GRAPH_ENT)
766
		trace_seq_puts(s, "==========>");
767
	else
768
		trace_seq_puts(s, "<==========");
769

770
	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
771
	trace_seq_putc(s, '\n');
772
}
773

774
void
775
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
776 777
{
	unsigned long nsecs_rem = do_div(duration, 1000);
778
	/* log10(ULONG_MAX) + '\0' */
779
	char usecs_str[21];
780
	char nsecs_str[5];
781
	int len;
782 783
	int i;

784
	sprintf(usecs_str, "%lu", (unsigned long) duration);
785 786

	/* Print msecs */
787
	trace_seq_printf(s, "%s", usecs_str);
788

789
	len = strlen(usecs_str);
790 791 792

	/* Print nsecs (we don't want to exceed 7 numbers) */
	if (len < 7) {
793 794 795
		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);

		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
796
		trace_seq_printf(s, ".%s", nsecs_str);
797
		len += strlen(nsecs_str) + 1;
798 799
	}

800
	trace_seq_puts(s, " us ");
801 802

	/* Print remaining spaces to fit the row's width */
803
	for (i = len; i < 8; i++)
804
		trace_seq_putc(s, ' ');
805 806
}

807
static void
808 809
print_graph_duration(struct trace_array *tr, unsigned long long duration,
		     struct trace_seq *s, u32 flags)
810
{
811
	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
812
	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
813
		return;
814 815

	/* No real adata, just filling the column with spaces */
816 817
	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
	case FLAGS_FILL_FULL:
818 819
		trace_seq_puts(s, "              |  ");
		return;
820
	case FLAGS_FILL_START:
821 822
		trace_seq_puts(s, "  ");
		return;
823
	case FLAGS_FILL_END:
824 825
		trace_seq_puts(s, " |");
		return;
826 827 828
	}

	/* Signal a overhead of time execution to the output */
829 830 831
	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
		trace_seq_printf(s, "%c ", trace_find_mark(duration));
	else
832
		trace_seq_puts(s, "  ");
833

834 835
	trace_print_graph_duration(duration, s);
	trace_seq_puts(s, "|  ");
836 837 838
}

/* Case of a leaf function on its call entry */
839
static enum print_line_t
840
print_graph_entry_leaf(struct trace_iterator *iter,
841
		struct ftrace_graph_ent_entry *entry,
842 843
		struct ftrace_graph_ret_entry *ret_entry,
		struct trace_seq *s, u32 flags)
844
{
845
	struct fgraph_data *data = iter->private;
846
	struct trace_array *tr = iter->tr;
847 848 849
	struct ftrace_graph_ret *graph_ret;
	struct ftrace_graph_ent *call;
	unsigned long long duration;
850
	int cpu = iter->cpu;
851
	int i;
852

853 854 855 856
	graph_ret = &ret_entry->ret;
	call = &entry->graph_ent;
	duration = graph_ret->rettime - graph_ret->calltime;

857
	if (data) {
858 859 860
		struct fgraph_cpu_data *cpu_data;

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
861

862 863 864 865
		/* If a graph tracer ignored set_graph_notrace */
		if (call->depth < -1)
			call->depth += FTRACE_NOTRACE_DEPTH;

866 867 868 869 870
		/*
		 * Comments display at + 1 to depth. Since
		 * this is a leaf function, keep the comments
		 * equal to this depth.
		 */
871 872 873
		cpu_data->depth = call->depth - 1;

		/* No need to keep this function around for this depth */
874 875
		if (call->depth < FTRACE_RETFUNC_DEPTH &&
		    !WARN_ON_ONCE(call->depth < 0))
876
			cpu_data->enter_funcs[call->depth] = 0;
877 878
	}

879
	/* Overhead and duration */
880
	print_graph_duration(tr, duration, s, flags);
881

882
	/* Function */
883 884
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');
885

886
	trace_seq_printf(s, "%ps();\n", (void *)call->func);
887

888 889 890
	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
			cpu, iter->ent->pid, flags);

891
	return trace_handle_return(s);
892 893 894
}

static enum print_line_t
895 896
print_graph_entry_nested(struct trace_iterator *iter,
			 struct ftrace_graph_ent_entry *entry,
897
			 struct trace_seq *s, int cpu, u32 flags)
898 899
{
	struct ftrace_graph_ent *call = &entry->graph_ent;
900
	struct fgraph_data *data = iter->private;
901
	struct trace_array *tr = iter->tr;
902 903 904
	int i;

	if (data) {
905
		struct fgraph_cpu_data *cpu_data;
906 907
		int cpu = iter->cpu;

908 909 910 911
		/* If a graph tracer ignored set_graph_notrace */
		if (call->depth < -1)
			call->depth += FTRACE_NOTRACE_DEPTH;

912 913 914 915
		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
		cpu_data->depth = call->depth;

		/* Save this function pointer to see if the exit matches */
916 917
		if (call->depth < FTRACE_RETFUNC_DEPTH &&
		    !WARN_ON_ONCE(call->depth < 0))
918
			cpu_data->enter_funcs[call->depth] = call->func;
919
	}
920

921
	/* No time */
922
	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
923

924
	/* Function */
925 926 927 928
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');

	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
929

930
	if (trace_seq_has_overflowed(s))
931 932
		return TRACE_TYPE_PARTIAL_LINE;

933 934 935 936 937
	/*
	 * we already consumed the current entry to check the next one
	 * and see if this is a leaf.
	 */
	return TRACE_TYPE_NO_CONSUME;
938 939
}

940
static void
941
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
942
		     int type, unsigned long addr, u32 flags)
943
{
944
	struct fgraph_data *data = iter->private;
945
	struct trace_entry *ent = iter->ent;
946
	struct trace_array *tr = iter->tr;
947
	int cpu = iter->cpu;
948

949
	/* Pid */
950
	verif_pid(s, ent->pid, cpu, data);
951

952
	if (type)
953
		/* Interrupt */
954
		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
955

956
	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
957
		return;
958

959
	/* Absolute time */
960 961
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
		print_graph_abs_time(iter->ts, s);
962

963
	/* Cpu */
964 965
	if (flags & TRACE_GRAPH_PRINT_CPU)
		print_graph_cpu(s, cpu);
966 967

	/* Proc */
968
	if (flags & TRACE_GRAPH_PRINT_PROC) {
969 970
		print_graph_proc(s, ent->pid);
		trace_seq_puts(s, " | ");
971
	}
972

973
	/* Latency format */
974
	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
975
		print_graph_lat_fmt(s, ent);
976

977
	return;
978 979
}

980 981 982 983 984
/*
 * Entry check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
L
Lucas De Marchi 已提交
985
 *  - we just entered irq code
986 987 988 989 990 991 992 993 994 995
 *
 * retunns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_entry(struct trace_iterator *iter, u32 flags,
		unsigned long addr, int depth)
{
	int cpu = iter->cpu;
996
	int *depth_irq;
997 998
	struct fgraph_data *data = iter->private;

999 1000 1001 1002 1003 1004 1005
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
1006 1007
		return 0;

1008 1009
	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
	/*
	 * We are inside the irq code
	 */
	if (*depth_irq >= 0)
		return 1;

	if ((addr < (unsigned long)__irqentry_text_start) ||
	    (addr >= (unsigned long)__irqentry_text_end))
		return 0;

	/*
	 * We are entering irq code.
	 */
	*depth_irq = depth;
	return 1;
}

/*
 * Return check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
 *  - we just left irq code
 *
 * returns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
{
	int cpu = iter->cpu;
1042
	int *depth_irq;
1043 1044
	struct fgraph_data *data = iter->private;

1045 1046 1047 1048 1049 1050 1051
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
1052 1053
		return 0;

1054 1055
	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
	/*
	 * We are not inside the irq code.
	 */
	if (*depth_irq == -1)
		return 0;

	/*
	 * We are inside the irq code, and this is returning entry.
	 * Let's not trace it and clear the entry depth, since
	 * we are out of irq code.
	 *
	 * This condition ensures that we 'leave the irq code' once
	 * we are out of the entry depth. Thus protecting us from
	 * the RETURN entry loss.
	 */
	if (*depth_irq >= depth) {
		*depth_irq = -1;
		return 1;
	}

	/*
	 * We are inside the irq code, and this is not the entry.
	 */
	return 1;
}

1082 1083
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1084
			struct trace_iterator *iter, u32 flags)
1085
{
1086
	struct fgraph_data *data = iter->private;
1087 1088
	struct ftrace_graph_ent *call = &field->graph_ent;
	struct ftrace_graph_ret_entry *leaf_ret;
1089 1090
	static enum print_line_t ret;
	int cpu = iter->cpu;
1091

1092 1093 1094
	if (check_irq_entry(iter, flags, call->func, call->depth))
		return TRACE_TYPE_HANDLED;

1095
	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1096

1097 1098
	leaf_ret = get_return_for_leaf(iter, field);
	if (leaf_ret)
1099
		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1100
	else
1101
		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1102

1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
	if (data) {
		/*
		 * If we failed to write our output, then we need to make
		 * note of it. Because we already consumed our entry.
		 */
		if (s->full) {
			data->failed = 1;
			data->cpu = cpu;
		} else
			data->failed = 0;
	}

	return ret;
1116 1117
}

1118 1119
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1120 1121
		   struct trace_entry *ent, struct trace_iterator *iter,
		   u32 flags)
1122
{
1123
	unsigned long long duration = trace->rettime - trace->calltime;
1124
	struct fgraph_data *data = iter->private;
1125
	struct trace_array *tr = iter->tr;
1126 1127
	pid_t pid = ent->pid;
	int cpu = iter->cpu;
1128
	int func_match = 1;
1129 1130
	int i;

1131 1132 1133
	if (check_irq_return(iter, flags, trace->depth))
		return TRACE_TYPE_HANDLED;

1134
	if (data) {
1135 1136 1137 1138
		struct fgraph_cpu_data *cpu_data;
		int cpu = iter->cpu;

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1139 1140 1141 1142 1143 1144

		/*
		 * Comments display at + 1 to depth. This is the
		 * return from a function, we now want the comments
		 * to display at the same level of the bracket.
		 */
1145 1146
		cpu_data->depth = trace->depth - 1;

1147 1148
		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
		    !WARN_ON_ONCE(trace->depth < 0)) {
1149 1150 1151 1152
			if (cpu_data->enter_funcs[trace->depth] != trace->func)
				func_match = 0;
			cpu_data->enter_funcs[trace->depth] = 0;
		}
1153
	}
1154

1155
	print_graph_prologue(iter, s, 0, 0, flags);
1156

1157
	/* Overhead and duration */
1158
	print_graph_duration(tr, duration, s, flags);
1159

1160
	/* Closing brace */
1161 1162
	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');
1163

1164 1165 1166 1167
	/*
	 * If the return function does not have a matching entry,
	 * then the entry was lost. Instead of just printing
	 * the '}' and letting the user guess what function this
1168 1169
	 * belongs to, write out the function name. Always do
	 * that if the funcgraph-tail option is enabled.
1170
	 */
1171 1172 1173 1174
	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
		trace_seq_puts(s, "}\n");
	else
		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1175

1176
	/* Overrun */
1177 1178 1179
	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
		trace_seq_printf(s, " (Overruns: %lu)\n",
				 trace->overrun);
1180

1181 1182
	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
			cpu, pid, flags);
1183

1184
	return trace_handle_return(s);
1185 1186
}

1187
static enum print_line_t
1188 1189
print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
		    struct trace_iterator *iter, u32 flags)
1190
{
1191 1192
	struct trace_array *tr = iter->tr;
	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1193
	struct fgraph_data *data = iter->private;
1194
	struct trace_event *event;
1195
	int depth = 0;
1196
	int ret;
1197 1198 1199
	int i;

	if (data)
1200
		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1201

1202
	print_graph_prologue(iter, s, 0, 0, flags);
1203

1204
	/* No time */
1205
	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1206 1207

	/* Indentation */
1208
	if (depth > 0)
1209 1210
		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
			trace_seq_putc(s, ' ');
1211 1212

	/* The comment */
1213
	trace_seq_puts(s, "/* ");
1214

1215
	switch (iter->ent->type) {
1216 1217 1218 1219 1220
	case TRACE_BPUTS:
		ret = trace_print_bputs_msg_only(iter);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
		break;
1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
	case TRACE_BPRINT:
		ret = trace_print_bprintk_msg_only(iter);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
		break;
	case TRACE_PRINT:
		ret = trace_print_printk_msg_only(iter);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
		break;
	default:
		event = ftrace_find_event(ent->type);
		if (!event)
			return TRACE_TYPE_UNHANDLED;

1236
		ret = event->funcs->trace(iter, sym_flags, event);
1237 1238 1239
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
	}
1240

1241 1242 1243
	if (trace_seq_has_overflowed(s))
		goto out;

1244
	/* Strip ending newline */
1245 1246 1247
	if (s->buffer[s->seq.len - 1] == '\n') {
		s->buffer[s->seq.len - 1] = '\0';
		s->seq.len--;
1248 1249
	}

1250
	trace_seq_puts(s, " */\n");
1251
 out:
1252
	return trace_handle_return(s);
1253 1254 1255
}


1256
enum print_line_t
1257
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1258
{
1259 1260
	struct ftrace_graph_ent_entry *field;
	struct fgraph_data *data = iter->private;
1261
	struct trace_entry *entry = iter->ent;
1262
	struct trace_seq *s = &iter->seq;
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
	int cpu = iter->cpu;
	int ret;

	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
		return TRACE_TYPE_HANDLED;
	}

	/*
	 * If the last output failed, there's a possibility we need
	 * to print out the missing entry which would never go out.
	 */
	if (data && data->failed) {
		field = &data->ent;
		iter->cpu = data->cpu;
1278
		ret = print_graph_entry(field, s, iter, flags);
1279 1280 1281 1282 1283 1284 1285
		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
			ret = TRACE_TYPE_NO_CONSUME;
		}
		iter->cpu = cpu;
		return ret;
	}
1286

1287 1288
	switch (entry->type) {
	case TRACE_GRAPH_ENT: {
1289 1290 1291 1292 1293 1294
		/*
		 * print_graph_entry() may consume the current event,
		 * thus @field may become invalid, so we need to save it.
		 * sizeof(struct ftrace_graph_ent_entry) is very small,
		 * it can be safely saved at the stack.
		 */
1295
		struct ftrace_graph_ent_entry saved;
1296
		trace_assign_type(field, entry);
1297
		saved = *field;
1298
		return print_graph_entry(&saved, s, iter, flags);
1299 1300 1301 1302
	}
	case TRACE_GRAPH_RET: {
		struct ftrace_graph_ret_entry *field;
		trace_assign_type(field, entry);
1303
		return print_graph_return(&field->ret, s, entry, iter, flags);
1304
	}
1305 1306 1307 1308 1309
	case TRACE_STACK:
	case TRACE_FN:
		/* dont trace stack and functions as comments */
		return TRACE_TYPE_UNHANDLED;

1310
	default:
1311
		return print_graph_comment(s, entry, iter, flags);
1312
	}
1313 1314

	return TRACE_TYPE_HANDLED;
1315 1316
}

1317 1318 1319
static enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
1320
	return print_graph_function_flags(iter, tracer_flags.val);
1321 1322
}

1323
static enum print_line_t
1324 1325
print_graph_function_event(struct trace_iterator *iter, int flags,
			   struct trace_event *event)
1326 1327 1328 1329
{
	return print_graph_function(iter);
}

1330
static void print_lat_header(struct seq_file *s, u32 flags)
1331 1332 1333 1334 1335 1336
{
	static const char spaces[] = "                "	/* 16 spaces */
		"    "					/* 4 spaces */
		"                 ";			/* 17 spaces */
	int size = 0;

1337
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1338
		size += 16;
1339
	if (flags & TRACE_GRAPH_PRINT_CPU)
1340
		size += 4;
1341
	if (flags & TRACE_GRAPH_PRINT_PROC)
1342 1343 1344 1345 1346 1347
		size += 17;

	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1348
	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1349 1350
}

1351 1352
static void __print_graph_headers_flags(struct trace_array *tr,
					struct seq_file *s, u32 flags)
1353
{
1354
	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1355 1356

	if (lat)
1357
		print_lat_header(s, flags);
1358

1359
	/* 1st line */
1360
	seq_putc(s, '#');
1361
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1362
		seq_puts(s, "     TIME       ");
1363
	if (flags & TRACE_GRAPH_PRINT_CPU)
1364
		seq_puts(s, " CPU");
1365
	if (flags & TRACE_GRAPH_PRINT_PROC)
1366
		seq_puts(s, "  TASK/PID       ");
1367
	if (lat)
1368
		seq_puts(s, "||||");
1369
	if (flags & TRACE_GRAPH_PRINT_DURATION)
1370 1371
		seq_puts(s, "  DURATION   ");
	seq_puts(s, "               FUNCTION CALLS\n");
1372 1373

	/* 2nd line */
1374
	seq_putc(s, '#');
1375
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1376
		seq_puts(s, "      |         ");
1377
	if (flags & TRACE_GRAPH_PRINT_CPU)
1378
		seq_puts(s, " |  ");
1379
	if (flags & TRACE_GRAPH_PRINT_PROC)
1380
		seq_puts(s, "   |    |        ");
1381
	if (lat)
1382
		seq_puts(s, "||||");
1383
	if (flags & TRACE_GRAPH_PRINT_DURATION)
1384 1385
		seq_puts(s, "   |   |      ");
	seq_puts(s, "               |   |   |   |\n");
1386
}
1387

1388
static void print_graph_headers(struct seq_file *s)
1389 1390 1391 1392
{
	print_graph_headers_flags(s, tracer_flags.val);
}

1393 1394 1395
void print_graph_headers_flags(struct seq_file *s, u32 flags)
{
	struct trace_iterator *iter = s->private;
1396
	struct trace_array *tr = iter->tr;
1397

1398
	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1399 1400
		return;

1401
	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1402 1403 1404 1405 1406
		/* print nothing if the buffers are empty */
		if (trace_empty(iter))
			return;

		print_trace_header(s, iter);
1407
	}
1408

1409
	__print_graph_headers_flags(tr, s, flags);
1410 1411
}

1412
void graph_trace_open(struct trace_iterator *iter)
1413
{
1414
	/* pid and depth on the last trace processed */
1415
	struct fgraph_data *data;
1416
	gfp_t gfpflags;
1417 1418
	int cpu;

1419 1420
	iter->private = NULL;

1421 1422 1423 1424
	/* We can be called in atomic context via ftrace_dump() */
	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;

	data = kzalloc(sizeof(*data), gfpflags);
1425
	if (!data)
1426 1427
		goto out_err;

1428
	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1429 1430 1431 1432 1433 1434 1435
	if (!data->cpu_data)
		goto out_err_free;

	for_each_possible_cpu(cpu) {
		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1436 1437
		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

1438 1439 1440
		*pid = -1;
		*depth = 0;
		*ignore = 0;
1441
		*depth_irq = -1;
1442
	}
1443

1444
	iter->private = data;
1445 1446 1447 1448 1449 1450

	return;

 out_err_free:
	kfree(data);
 out_err:
1451
	pr_warn("function graph tracer: not enough memory\n");
1452 1453
}

1454
void graph_trace_close(struct trace_iterator *iter)
1455
{
1456 1457 1458 1459 1460 1461
	struct fgraph_data *data = iter->private;

	if (data) {
		free_percpu(data->cpu_data);
		kfree(data);
	}
1462 1463
}

1464 1465
static int
func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1466 1467 1468 1469
{
	if (bit == TRACE_GRAPH_PRINT_IRQS)
		ftrace_graph_skip_irqs = !set;

1470 1471 1472 1473 1474 1475
	if (bit == TRACE_GRAPH_SLEEP_TIME)
		ftrace_graph_sleep_time_control(set);

	if (bit == TRACE_GRAPH_GRAPH_TIME)
		ftrace_graph_graph_time_control(set);

1476 1477 1478
	return 0;
}

1479 1480 1481 1482
static struct trace_event_functions graph_functions = {
	.trace		= print_graph_function_event,
};

1483 1484
static struct trace_event graph_trace_entry_event = {
	.type		= TRACE_GRAPH_ENT,
1485
	.funcs		= &graph_functions,
1486 1487 1488 1489
};

static struct trace_event graph_trace_ret_event = {
	.type		= TRACE_GRAPH_RET,
1490
	.funcs		= &graph_functions
1491 1492
};

1493
static struct tracer graph_trace __tracer_data = {
1494
	.name		= "function_graph",
1495
	.update_thresh	= graph_trace_update_thresh,
1496
	.open		= graph_trace_open,
1497
	.pipe_open	= graph_trace_open,
1498
	.close		= graph_trace_close,
1499
	.pipe_close	= graph_trace_close,
1500 1501
	.init		= graph_trace_init,
	.reset		= graph_trace_reset,
1502 1503
	.print_line	= print_graph_function,
	.print_header	= print_graph_headers,
1504
	.flags		= &tracer_flags,
1505
	.set_flag	= func_graph_set_flag,
1506 1507 1508
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest	= trace_selftest_startup_function_graph,
#endif
1509 1510
};

1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522

static ssize_t
graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
		  loff_t *ppos)
{
	unsigned long val;
	int ret;

	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
		return ret;

1523
	fgraph_max_depth = val;
1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536

	*ppos += cnt;

	return cnt;
}

static ssize_t
graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
		 loff_t *ppos)
{
	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
	int n;

1537
	n = sprintf(buf, "%d\n", fgraph_max_depth);
1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548

	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
}

static const struct file_operations graph_depth_fops = {
	.open		= tracing_open_generic,
	.write		= graph_depth_write,
	.read		= graph_depth_read,
	.llseek		= generic_file_llseek,
};

1549
static __init int init_graph_tracefs(void)
1550 1551 1552 1553
{
	struct dentry *d_tracer;

	d_tracer = tracing_init_dentry();
1554
	if (IS_ERR(d_tracer))
1555 1556 1557 1558 1559 1560 1561
		return 0;

	trace_create_file("max_graph_depth", 0644, d_tracer,
			  NULL, &graph_depth_fops);

	return 0;
}
1562
fs_initcall(init_graph_tracefs);
1563

1564 1565
static __init int init_graph_trace(void)
{
1566
	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1567

1568
	if (!register_trace_event(&graph_trace_entry_event)) {
1569
		pr_warn("Warning: could not register graph trace events\n");
1570 1571 1572
		return 1;
	}

1573
	if (!register_trace_event(&graph_trace_ret_event)) {
1574
		pr_warn("Warning: could not register graph trace events\n");
1575 1576 1577
		return 1;
	}

1578 1579 1580
	return register_tracer(&graph_trace);
}

1581
core_initcall(init_graph_trace);