trace_functions_graph.c 38.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4
/*
 *
 * Function graph tracer.
5
 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
6 7 8 9 10 11
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/uaccess.h>
#include <linux/ftrace.h>
12
#include <linux/interrupt.h>
13
#include <linux/slab.h>
14 15 16
#include <linux/fs.h>

#include "trace.h"
17
#include "trace_output.h"
18

19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
static bool kill_ftrace_graph;

/**
 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
 *
 * ftrace_graph_stop() is called when a severe error is detected in
 * the function graph tracing. This function is called by the critical
 * paths of function graph to keep those paths from doing any more harm.
 */
bool ftrace_graph_is_dead(void)
{
	return kill_ftrace_graph;
}

/**
 * ftrace_graph_stop - set to permanently disable function graph tracincg
 *
 * In case of an error int function graph tracing, this is called
 * to try to keep function graph tracing from causing any more harm.
 * Usually this is pretty severe and this is called to try to at least
 * get a warning out to the user.
 */
void ftrace_graph_stop(void)
{
	kill_ftrace_graph = true;
}

46 47 48
/* When set, irq functions will be ignored */
static int ftrace_graph_skip_irqs;

49
struct fgraph_cpu_data {
50 51
	pid_t		last_pid;
	int		depth;
52
	int		depth_irq;
53
	int		ignore;
54
	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
55 56 57
};

struct fgraph_data {
58
	struct fgraph_cpu_data __percpu *cpu_data;
59 60 61 62 63 64

	/* Place to preserve last processed entry. */
	struct ftrace_graph_ent_entry	ent;
	struct ftrace_graph_ret_entry	ret;
	int				failed;
	int				cpu;
65 66
};

67
#define TRACE_GRAPH_INDENT	2
68

69
unsigned int fgraph_max_depth;
70

71
static struct tracer_opt trace_opts[] = {
72
	/* Display overruns? (for self-debug purpose) */
73 74 75 76 77
	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	/* Display CPU ? */
	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
	/* Display Overhead ? */
	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
78 79
	/* Display proc name/pid */
	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
80 81 82 83
	/* Display duration of execution */
	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
	/* Display absolute time of an entry */
	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
84 85
	/* Display interrupts */
	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
86 87
	/* Display function name after trailing } */
	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
88 89 90 91
	/* Include sleep time (scheduled out) between entry and return */
	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
	/* Include time within nested functions */
	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
92 93 94 95
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
96
	/* Don't display overruns, proc, or tail by default */
97
	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
98 99
	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
100 101 102
	.opts = trace_opts
};

103
static struct trace_array *graph_array;
104

105 106 107 108 109 110
/*
 * DURATION column is being also used to display IRQ signs,
 * following values are used by print_graph_irq and others
 * to fill in space into DURATION column.
 */
enum {
111 112 113
	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
114 115
};

116
static void
117 118
print_graph_duration(struct trace_array *tr, unsigned long long duration,
		     struct trace_seq *s, u32 flags);
119

120 121
/* Add a function return address to the trace stack on thread info.*/
int
122
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
123
			 unsigned long frame_pointer, unsigned long *retp)
124
{
125
	unsigned long long calltime;
126 127
	int index;

128 129 130
	if (unlikely(ftrace_graph_is_dead()))
		return -EBUSY;

131 132 133
	if (!current->ret_stack)
		return -EBUSY;

134 135 136 137 138 139
	/*
	 * We must make sure the ret_stack is tested before we read
	 * anything else.
	 */
	smp_rmb();

140 141 142 143 144 145
	/* The return trace stack is full */
	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
		atomic_inc(&current->trace_overrun);
		return -EBUSY;
	}

146 147 148 149 150 151 152 153 154 155 156 157 158 159
	/*
	 * The curr_ret_stack is an index to ftrace return stack of
	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
	 * DEPTH) when the function graph tracer is used.  To support
	 * filtering out specific functions, it makes the index
	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
	 * so when it sees a negative index the ftrace will ignore
	 * the record.  And the index gets recovered when returning
	 * from the filtered function by adding the FTRACE_NOTRACE_
	 * DEPTH and then it'll continue to record functions normally.
	 *
	 * The curr_ret_stack is initialized to -1 and get increased
	 * in this function.  So it can be less than -1 only if it was
	 * filtered out via ftrace_graph_notrace_addr() which can be
160
	 * set from set_graph_notrace file in tracefs by user.
161 162 163 164
	 */
	if (current->curr_ret_stack < -1)
		return -EBUSY;

165 166
	calltime = trace_clock_local();

167
	index = ++current->curr_ret_stack;
168 169
	if (ftrace_graph_notrace_addr(func))
		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
170 171 172
	barrier();
	current->ret_stack[index].ret = ret;
	current->ret_stack[index].func = func;
173
	current->ret_stack[index].calltime = calltime;
174
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
175
	current->ret_stack[index].fp = frame_pointer;
176 177 178
#endif
#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
	current->ret_stack[index].retp = retp;
179
#endif
180
	*depth = current->curr_ret_stack;
181 182 183 184 185

	return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
186
static void
187 188
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
			unsigned long frame_pointer)
189 190 191 192 193
{
	int index;

	index = current->curr_ret_stack;

194 195 196 197 198 199 200 201 202 203 204
	/*
	 * A negative index here means that it's just returned from a
	 * notrace'd function.  Recover index to get an original
	 * return address.  See ftrace_push_return_trace().
	 *
	 * TODO: Need to check whether the stack gets corrupted.
	 */
	if (index < 0)
		index += FTRACE_NOTRACE_DEPTH;

	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
205 206 207 208 209 210 211
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic, otherwise we have no where to go */
		*ret = (unsigned long)panic;
		return;
	}

212
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
213 214 215 216 217 218 219 220 221 222
	/*
	 * The arch may choose to record the frame pointer used
	 * and check it here to make sure that it is what we expect it
	 * to be. If gcc does not set the place holder of the return
	 * address in the frame pointer, and does a copy instead, then
	 * the function graph trace will fail. This test detects this
	 * case.
	 *
	 * Currently, x86_32 with optimize for size (-Os) makes the latest
	 * gcc do the above.
223 224 225
	 *
	 * Note, -mfentry does not use frame pointers, and this test
	 *  is not needed if CC_USING_FENTRY is set.
226 227 228 229
	 */
	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
		ftrace_graph_stop();
		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
230
		     "  from func %ps return to %lx\n",
231 232 233 234 235 236 237 238 239
		     current->ret_stack[index].fp,
		     frame_pointer,
		     (void *)current->ret_stack[index].func,
		     current->ret_stack[index].ret);
		*ret = (unsigned long)panic;
		return;
	}
#endif

240 241 242 243 244 245 246 247 248 249 250
	*ret = current->ret_stack[index].ret;
	trace->func = current->ret_stack[index].func;
	trace->calltime = current->ret_stack[index].calltime;
	trace->overrun = atomic_read(&current->trace_overrun);
	trace->depth = index;
}

/*
 * Send the trace to the ring-buffer.
 * @return the original return address.
 */
251
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
252 253 254 255
{
	struct ftrace_graph_ret trace;
	unsigned long ret;

256
	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
257
	trace.rettime = trace_clock_local();
258 259
	barrier();
	current->curr_ret_stack--;
260 261 262 263 264 265 266 267 268
	/*
	 * The curr_ret_stack can be less than -1 only if it was
	 * filtered out and it's about to return from the function.
	 * Recover the index and continue to trace normal functions.
	 */
	if (current->curr_ret_stack < -1) {
		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
		return ret;
	}
269

270 271 272 273 274 275 276
	/*
	 * The trace should run after decrementing the ret counter
	 * in case an interrupt were to come in. We don't want to
	 * lose the interrupt if max_depth is set.
	 */
	ftrace_graph_return(&trace);

277 278 279 280 281 282 283 284 285 286
	if (unlikely(!ret)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic. What else to do? */
		ret = (unsigned long)panic;
	}

	return ret;
}

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
/**
 * ftrace_graph_ret_addr - convert a potentially modified stack return address
 *			   to its original value
 *
 * This function can be called by stack unwinding code to convert a found stack
 * return address ('ret') to its original value, in case the function graph
 * tracer has modified it to be 'return_to_handler'.  If the address hasn't
 * been modified, the unchanged value of 'ret' is returned.
 *
 * 'idx' is a state variable which should be initialized by the caller to zero
 * before the first call.
 *
 * 'retp' is a pointer to the return address on the stack.  It's ignored if
 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
 */
#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
				    unsigned long ret, unsigned long *retp)
{
	int index = task->curr_ret_stack;
	int i;

	if (ret != (unsigned long)return_to_handler)
		return ret;

	if (index < -1)
		index += FTRACE_NOTRACE_DEPTH;

	if (index < 0)
		return ret;

	for (i = 0; i <= index; i++)
		if (task->ret_stack[i].retp == retp)
			return task->ret_stack[i].ret;

	return ret;
}
#else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
				    unsigned long ret, unsigned long *retp)
{
	int task_idx;

	if (ret != (unsigned long)return_to_handler)
		return ret;

	task_idx = task->curr_ret_stack;

	if (!task->ret_stack || task_idx < *idx)
		return ret;

	task_idx -= *idx;
	(*idx)++;

	return task->ret_stack[task_idx].ret;
}
#endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */

345
int __trace_graph_entry(struct trace_array *tr,
346 347 348 349
				struct ftrace_graph_ent *trace,
				unsigned long flags,
				int pc)
{
350
	struct trace_event_call *call = &event_funcgraph_entry;
351
	struct ring_buffer_event *event;
352
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
353 354
	struct ftrace_graph_ent_entry *entry;

355
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
356 357 358 359 360
					  sizeof(*entry), flags, pc);
	if (!event)
		return 0;
	entry	= ring_buffer_event_data(event);
	entry->graph_ent			= *trace;
361
	if (!call_filter_check_discard(call, entry, buffer, event))
362
		trace_buffer_unlock_commit_nostack(buffer, event);
363 364 365 366

	return 1;
}

367 368
static inline int ftrace_graph_ignore_irqs(void)
{
369
	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
370 371 372 373 374
		return 0;

	return in_irq();
}

375 376 377 378 379 380 381 382 383 384
int trace_graph_entry(struct ftrace_graph_ent *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int ret;
	int cpu;
	int pc;

385
	if (!ftrace_trace_task(tr))
386 387
		return 0;

388 389 390 391
	if (ftrace_graph_ignore_func(trace))
		return 0;

	if (ftrace_graph_ignore_irqs())
392 393
		return 0;

394 395 396 397 398 399 400 401 402 403
	/*
	 * Do not trace a function if it's filtered by set_graph_notrace.
	 * Make the index of ret stack negative to indicate that it should
	 * ignore further functions.  But it needs its own ret stack entry
	 * to recover the original index in order to continue tracing after
	 * returning from the function.
	 */
	if (ftrace_graph_notrace_addr(trace->func))
		return 1;

404 405 406 407 408 409 410
	/*
	 * Stop here if tracing_threshold is set. We only write function return
	 * events to the ring buffer.
	 */
	if (tracing_thresh)
		return 1;

411 412
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
413
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
414 415 416 417 418 419 420 421 422 423 424 425 426 427
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		ret = __trace_graph_entry(tr, trace, flags, pc);
	} else {
		ret = 0;
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);

	return ret;
}

428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
static void
__trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long flags, int pc)
{
	u64 time = trace_clock_local();
	struct ftrace_graph_ent ent = {
		.func  = ip,
		.depth = 0,
	};
	struct ftrace_graph_ret ret = {
		.func     = ip,
		.depth    = 0,
		.calltime = time,
		.rettime  = time,
	};

	__trace_graph_entry(tr, &ent, flags, pc);
	__trace_graph_return(tr, &ret, flags, pc);
}

void
trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long parent_ip,
		unsigned long flags, int pc)
{
	__trace_graph_function(tr, ip, flags, pc);
}

456
void __trace_graph_return(struct trace_array *tr,
457 458 459 460
				struct ftrace_graph_ret *trace,
				unsigned long flags,
				int pc)
{
461
	struct trace_event_call *call = &event_funcgraph_exit;
462
	struct ring_buffer_event *event;
463
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
464 465
	struct ftrace_graph_ret_entry *entry;

466
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
467 468 469 470 471
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->ret				= *trace;
472
	if (!call_filter_check_discard(call, entry, buffer, event))
473
		trace_buffer_unlock_commit_nostack(buffer, event);
474 475 476 477 478 479 480 481 482 483 484 485 486
}

void trace_graph_return(struct ftrace_graph_ret *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
487
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
488 489 490 491 492 493 494 495 496
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		__trace_graph_return(tr, trace, flags, pc);
	}
	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

497 498 499 500 501 502 503 504 505
void set_graph_array(struct trace_array *tr)
{
	graph_array = tr;

	/* Make graph_array visible before we start tracing */

	smp_mb();
}

506
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
507 508 509 510 511 512 513 514
{
	if (tracing_thresh &&
	    (trace->rettime - trace->calltime < tracing_thresh))
		return;
	else
		trace_graph_return(trace);
}

515 516
static int graph_trace_init(struct trace_array *tr)
{
517 518
	int ret;

519
	set_graph_array(tr);
520 521
	if (tracing_thresh)
		ret = register_ftrace_graph(&trace_graph_thresh_return,
522
					    &trace_graph_entry);
523 524 525
	else
		ret = register_ftrace_graph(&trace_graph_return,
					    &trace_graph_entry);
526 527 528 529 530
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
531 532 533 534
}

static void graph_trace_reset(struct trace_array *tr)
{
535 536
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
537 538
}

539
static int graph_trace_update_thresh(struct trace_array *tr)
540 541 542 543 544
{
	graph_trace_reset(tr);
	return graph_trace_init(tr);
}

545
static int max_bytes_for_cpu;
546

547
static void print_graph_cpu(struct trace_seq *s, int cpu)
548
{
549 550 551 552 553
	/*
	 * Start with a space character - to make it stand out
	 * to the right a bit when trace output is pasted into
	 * email:
	 */
554
	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
555 556
}

557 558
#define TRACE_GRAPH_PROCINFO_LENGTH	14

559
static void print_graph_proc(struct trace_seq *s, pid_t pid)
560
{
561
	char comm[TASK_COMM_LEN];
562 563
	/* sign + log10(MAX_INT) + '\0' */
	char pid_str[11];
564 565 566
	int spaces = 0;
	int len;
	int i;
567

568
	trace_find_cmdline(pid, comm);
569 570 571 572 573 574 575 576 577 578
	comm[7] = '\0';
	sprintf(pid_str, "%d", pid);

	/* 1 stands for the "-" character */
	len = strlen(comm) + strlen(pid_str) + 1;

	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;

	/* First spaces to align center */
579 580
	for (i = 0; i < spaces / 2; i++)
		trace_seq_putc(s, ' ');
581

582
	trace_seq_printf(s, "%s-%s", comm, pid_str);
583 584

	/* Last spaces to align center */
585 586
	for (i = 0; i < spaces - (spaces / 2); i++)
		trace_seq_putc(s, ' ');
587 588
}

589

590
static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
591
{
592 593
	trace_seq_putc(s, ' ');
	trace_print_lat_fmt(s, entry);
594 595
}

596
/* If the pid changed since the last trace, output this event */
597
static void
598
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
599
{
600
	pid_t prev_pid;
601
	pid_t *last_pid;
602

603
	if (!data)
604
		return;
605

606
	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
607 608

	if (*last_pid == pid)
609
		return;
610

611 612
	prev_pid = *last_pid;
	*last_pid = pid;
613

614
	if (prev_pid == -1)
615
		return;
616 617 618 619 620 621 622 623
/*
 * Context-switch trace line:

 ------------------------------------------
 | 1)  migration/0--1  =>  sshd-1755
 ------------------------------------------

 */
624 625 626 627 628 629
	trace_seq_puts(s, " ------------------------------------------\n");
	print_graph_cpu(s, cpu);
	print_graph_proc(s, prev_pid);
	trace_seq_puts(s, " => ");
	print_graph_proc(s, pid);
	trace_seq_puts(s, "\n ------------------------------------------\n\n");
630 631
}

632 633
static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
634 635
		struct ftrace_graph_ent_entry *curr)
{
636 637
	struct fgraph_data *data = iter->private;
	struct ring_buffer_iter *ring_iter = NULL;
638 639 640
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *next;

641 642 643 644 645 646 647 648
	/*
	 * If the previous output failed to write to the seq buffer,
	 * then we just reuse the data from before.
	 */
	if (data && data->failed) {
		curr = &data->ent;
		next = &data->ret;
	} else {
649

650
		ring_iter = trace_buffer_iter(iter, iter->cpu);
651 652 653 654 655 656 657 658 659

		/* First peek to compare current entry and the next one */
		if (ring_iter)
			event = ring_buffer_iter_peek(ring_iter, NULL);
		else {
			/*
			 * We need to consume the current entry to see
			 * the next one.
			 */
660
			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
661
					    NULL, NULL);
662
			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
663
						 NULL, NULL);
664
		}
665

666 667 668 669
		if (!event)
			return NULL;

		next = ring_buffer_event_data(event);
670

671 672 673 674 675 676
		if (data) {
			/*
			 * Save current and next entries for later reference
			 * if the output fails.
			 */
			data->ent = *curr;
677 678 679 680 681 682 683 684 685
			/*
			 * If the next event is not a return type, then
			 * we only care about what type it is. Otherwise we can
			 * safely copy the entire event.
			 */
			if (next->ent.type == TRACE_GRAPH_RET)
				data->ret = *next;
			else
				data->ret.ent.type = next->ent.type;
686 687
		}
	}
688 689

	if (next->ent.type != TRACE_GRAPH_RET)
690
		return NULL;
691 692 693

	if (curr->ent.pid != next->ent.pid ||
			curr->graph_ent.func != next->ret.func)
694
		return NULL;
695

696 697 698 699 700
	/* this is a leaf, now advance the iterator */
	if (ring_iter)
		ring_buffer_read(ring_iter, NULL);

	return next;
701 702
}

703
static void print_graph_abs_time(u64 t, struct trace_seq *s)
704 705 706 707 708 709
{
	unsigned long usecs_rem;

	usecs_rem = do_div(t, NSEC_PER_SEC);
	usecs_rem /= 1000;

710 711
	trace_seq_printf(s, "%5lu.%06lu |  ",
			 (unsigned long)t, usecs_rem);
712 713
}

714
static void
715
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
716
		enum trace_type type, int cpu, pid_t pid, u32 flags)
717
{
718
	struct trace_array *tr = iter->tr;
719
	struct trace_seq *s = &iter->seq;
720
	struct trace_entry *ent = iter->ent;
721 722 723

	if (addr < (unsigned long)__irqentry_text_start ||
		addr >= (unsigned long)__irqentry_text_end)
724
		return;
725

726
	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
727
		/* Absolute time */
728 729
		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
			print_graph_abs_time(iter->ts, s);
730

731
		/* Cpu */
732 733
		if (flags & TRACE_GRAPH_PRINT_CPU)
			print_graph_cpu(s, cpu);
734

735 736
		/* Proc */
		if (flags & TRACE_GRAPH_PRINT_PROC) {
737 738
			print_graph_proc(s, pid);
			trace_seq_puts(s, " | ");
739
		}
740 741

		/* Latency format */
742
		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
743
			print_graph_lat_fmt(s, ent);
744
	}
745

746
	/* No overhead */
747
	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
748

749
	if (type == TRACE_GRAPH_ENT)
750
		trace_seq_puts(s, "==========>");
751
	else
752
		trace_seq_puts(s, "<==========");
753

754
	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
755
	trace_seq_putc(s, '\n');
756
}
757

758
void
759
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
760 761
{
	unsigned long nsecs_rem = do_div(duration, 1000);
762
	/* log10(ULONG_MAX) + '\0' */
763
	char usecs_str[21];
764
	char nsecs_str[5];
765
	int len;
766 767
	int i;

768
	sprintf(usecs_str, "%lu", (unsigned long) duration);
769 770

	/* Print msecs */
771
	trace_seq_printf(s, "%s", usecs_str);
772

773
	len = strlen(usecs_str);
774 775 776

	/* Print nsecs (we don't want to exceed 7 numbers) */
	if (len < 7) {
777 778 779
		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);

		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
780
		trace_seq_printf(s, ".%s", nsecs_str);
781
		len += strlen(nsecs_str) + 1;
782 783
	}

784
	trace_seq_puts(s, " us ");
785 786

	/* Print remaining spaces to fit the row's width */
787
	for (i = len; i < 8; i++)
788
		trace_seq_putc(s, ' ');
789 790
}

791
static void
792 793
print_graph_duration(struct trace_array *tr, unsigned long long duration,
		     struct trace_seq *s, u32 flags)
794
{
795
	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
796
	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
797
		return;
798 799

	/* No real adata, just filling the column with spaces */
800 801
	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
	case FLAGS_FILL_FULL:
802 803
		trace_seq_puts(s, "              |  ");
		return;
804
	case FLAGS_FILL_START:
805 806
		trace_seq_puts(s, "  ");
		return;
807
	case FLAGS_FILL_END:
808 809
		trace_seq_puts(s, " |");
		return;
810 811 812
	}

	/* Signal a overhead of time execution to the output */
813 814 815
	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
		trace_seq_printf(s, "%c ", trace_find_mark(duration));
	else
816
		trace_seq_puts(s, "  ");
817

818 819
	trace_print_graph_duration(duration, s);
	trace_seq_puts(s, "|  ");
820 821 822
}

/* Case of a leaf function on its call entry */
823
static enum print_line_t
824
print_graph_entry_leaf(struct trace_iterator *iter,
825
		struct ftrace_graph_ent_entry *entry,
826 827
		struct ftrace_graph_ret_entry *ret_entry,
		struct trace_seq *s, u32 flags)
828
{
829
	struct fgraph_data *data = iter->private;
830
	struct trace_array *tr = iter->tr;
831 832 833
	struct ftrace_graph_ret *graph_ret;
	struct ftrace_graph_ent *call;
	unsigned long long duration;
834
	int cpu = iter->cpu;
835
	int i;
836

837 838 839 840
	graph_ret = &ret_entry->ret;
	call = &entry->graph_ent;
	duration = graph_ret->rettime - graph_ret->calltime;

841
	if (data) {
842 843 844
		struct fgraph_cpu_data *cpu_data;

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
845

846 847 848 849
		/* If a graph tracer ignored set_graph_notrace */
		if (call->depth < -1)
			call->depth += FTRACE_NOTRACE_DEPTH;

850 851 852 853 854
		/*
		 * Comments display at + 1 to depth. Since
		 * this is a leaf function, keep the comments
		 * equal to this depth.
		 */
855 856 857
		cpu_data->depth = call->depth - 1;

		/* No need to keep this function around for this depth */
858 859
		if (call->depth < FTRACE_RETFUNC_DEPTH &&
		    !WARN_ON_ONCE(call->depth < 0))
860
			cpu_data->enter_funcs[call->depth] = 0;
861 862
	}

863
	/* Overhead and duration */
864
	print_graph_duration(tr, duration, s, flags);
865

866
	/* Function */
867 868
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');
869

870
	trace_seq_printf(s, "%ps();\n", (void *)call->func);
871

872 873 874
	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
			cpu, iter->ent->pid, flags);

875
	return trace_handle_return(s);
876 877 878
}

static enum print_line_t
879 880
print_graph_entry_nested(struct trace_iterator *iter,
			 struct ftrace_graph_ent_entry *entry,
881
			 struct trace_seq *s, int cpu, u32 flags)
882 883
{
	struct ftrace_graph_ent *call = &entry->graph_ent;
884
	struct fgraph_data *data = iter->private;
885
	struct trace_array *tr = iter->tr;
886 887 888
	int i;

	if (data) {
889
		struct fgraph_cpu_data *cpu_data;
890 891
		int cpu = iter->cpu;

892 893 894 895
		/* If a graph tracer ignored set_graph_notrace */
		if (call->depth < -1)
			call->depth += FTRACE_NOTRACE_DEPTH;

896 897 898 899
		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
		cpu_data->depth = call->depth;

		/* Save this function pointer to see if the exit matches */
900 901
		if (call->depth < FTRACE_RETFUNC_DEPTH &&
		    !WARN_ON_ONCE(call->depth < 0))
902
			cpu_data->enter_funcs[call->depth] = call->func;
903
	}
904

905
	/* No time */
906
	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
907

908
	/* Function */
909 910 911 912
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');

	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
913

914
	if (trace_seq_has_overflowed(s))
915 916
		return TRACE_TYPE_PARTIAL_LINE;

917 918 919 920 921
	/*
	 * we already consumed the current entry to check the next one
	 * and see if this is a leaf.
	 */
	return TRACE_TYPE_NO_CONSUME;
922 923
}

924
static void
925
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
926
		     int type, unsigned long addr, u32 flags)
927
{
928
	struct fgraph_data *data = iter->private;
929
	struct trace_entry *ent = iter->ent;
930
	struct trace_array *tr = iter->tr;
931
	int cpu = iter->cpu;
932

933
	/* Pid */
934
	verif_pid(s, ent->pid, cpu, data);
935

936
	if (type)
937
		/* Interrupt */
938
		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
939

940
	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
941
		return;
942

943
	/* Absolute time */
944 945
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
		print_graph_abs_time(iter->ts, s);
946

947
	/* Cpu */
948 949
	if (flags & TRACE_GRAPH_PRINT_CPU)
		print_graph_cpu(s, cpu);
950 951

	/* Proc */
952
	if (flags & TRACE_GRAPH_PRINT_PROC) {
953 954
		print_graph_proc(s, ent->pid);
		trace_seq_puts(s, " | ");
955
	}
956

957
	/* Latency format */
958
	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
959
		print_graph_lat_fmt(s, ent);
960

961
	return;
962 963
}

964 965 966 967 968
/*
 * Entry check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
L
Lucas De Marchi 已提交
969
 *  - we just entered irq code
970 971 972 973 974 975 976 977 978 979
 *
 * retunns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_entry(struct trace_iterator *iter, u32 flags,
		unsigned long addr, int depth)
{
	int cpu = iter->cpu;
980
	int *depth_irq;
981 982
	struct fgraph_data *data = iter->private;

983 984 985 986 987 988 989
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
990 991
		return 0;

992 993
	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025
	/*
	 * We are inside the irq code
	 */
	if (*depth_irq >= 0)
		return 1;

	if ((addr < (unsigned long)__irqentry_text_start) ||
	    (addr >= (unsigned long)__irqentry_text_end))
		return 0;

	/*
	 * We are entering irq code.
	 */
	*depth_irq = depth;
	return 1;
}

/*
 * Return check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
 *  - we just left irq code
 *
 * returns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
{
	int cpu = iter->cpu;
1026
	int *depth_irq;
1027 1028
	struct fgraph_data *data = iter->private;

1029 1030 1031 1032 1033 1034 1035
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
1036 1037
		return 0;

1038 1039
	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
	/*
	 * We are not inside the irq code.
	 */
	if (*depth_irq == -1)
		return 0;

	/*
	 * We are inside the irq code, and this is returning entry.
	 * Let's not trace it and clear the entry depth, since
	 * we are out of irq code.
	 *
	 * This condition ensures that we 'leave the irq code' once
	 * we are out of the entry depth. Thus protecting us from
	 * the RETURN entry loss.
	 */
	if (*depth_irq >= depth) {
		*depth_irq = -1;
		return 1;
	}

	/*
	 * We are inside the irq code, and this is not the entry.
	 */
	return 1;
}

1066 1067
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1068
			struct trace_iterator *iter, u32 flags)
1069
{
1070
	struct fgraph_data *data = iter->private;
1071 1072
	struct ftrace_graph_ent *call = &field->graph_ent;
	struct ftrace_graph_ret_entry *leaf_ret;
1073 1074
	static enum print_line_t ret;
	int cpu = iter->cpu;
1075

1076 1077 1078
	if (check_irq_entry(iter, flags, call->func, call->depth))
		return TRACE_TYPE_HANDLED;

1079
	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1080

1081 1082
	leaf_ret = get_return_for_leaf(iter, field);
	if (leaf_ret)
1083
		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1084
	else
1085
		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1086

1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099
	if (data) {
		/*
		 * If we failed to write our output, then we need to make
		 * note of it. Because we already consumed our entry.
		 */
		if (s->full) {
			data->failed = 1;
			data->cpu = cpu;
		} else
			data->failed = 0;
	}

	return ret;
1100 1101
}

1102 1103
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1104 1105
		   struct trace_entry *ent, struct trace_iterator *iter,
		   u32 flags)
1106
{
1107
	unsigned long long duration = trace->rettime - trace->calltime;
1108
	struct fgraph_data *data = iter->private;
1109
	struct trace_array *tr = iter->tr;
1110 1111
	pid_t pid = ent->pid;
	int cpu = iter->cpu;
1112
	int func_match = 1;
1113 1114
	int i;

1115 1116 1117
	if (check_irq_return(iter, flags, trace->depth))
		return TRACE_TYPE_HANDLED;

1118
	if (data) {
1119 1120 1121 1122
		struct fgraph_cpu_data *cpu_data;
		int cpu = iter->cpu;

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1123 1124 1125 1126 1127 1128

		/*
		 * Comments display at + 1 to depth. This is the
		 * return from a function, we now want the comments
		 * to display at the same level of the bracket.
		 */
1129 1130
		cpu_data->depth = trace->depth - 1;

1131 1132
		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
		    !WARN_ON_ONCE(trace->depth < 0)) {
1133 1134 1135 1136
			if (cpu_data->enter_funcs[trace->depth] != trace->func)
				func_match = 0;
			cpu_data->enter_funcs[trace->depth] = 0;
		}
1137
	}
1138

1139
	print_graph_prologue(iter, s, 0, 0, flags);
1140

1141
	/* Overhead and duration */
1142
	print_graph_duration(tr, duration, s, flags);
1143

1144
	/* Closing brace */
1145 1146
	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
		trace_seq_putc(s, ' ');
1147

1148 1149 1150 1151
	/*
	 * If the return function does not have a matching entry,
	 * then the entry was lost. Instead of just printing
	 * the '}' and letting the user guess what function this
1152 1153
	 * belongs to, write out the function name. Always do
	 * that if the funcgraph-tail option is enabled.
1154
	 */
1155 1156 1157 1158
	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
		trace_seq_puts(s, "}\n");
	else
		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1159

1160
	/* Overrun */
1161 1162 1163
	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
		trace_seq_printf(s, " (Overruns: %lu)\n",
				 trace->overrun);
1164

1165 1166
	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
			cpu, pid, flags);
1167

1168
	return trace_handle_return(s);
1169 1170
}

1171
static enum print_line_t
1172 1173
print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
		    struct trace_iterator *iter, u32 flags)
1174
{
1175 1176
	struct trace_array *tr = iter->tr;
	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1177
	struct fgraph_data *data = iter->private;
1178
	struct trace_event *event;
1179
	int depth = 0;
1180
	int ret;
1181 1182 1183
	int i;

	if (data)
1184
		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1185

1186
	print_graph_prologue(iter, s, 0, 0, flags);
1187

1188
	/* No time */
1189
	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1190 1191

	/* Indentation */
1192
	if (depth > 0)
1193 1194
		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
			trace_seq_putc(s, ' ');
1195 1196

	/* The comment */
1197
	trace_seq_puts(s, "/* ");
1198

1199
	switch (iter->ent->type) {
1200 1201 1202 1203 1204
	case TRACE_BPUTS:
		ret = trace_print_bputs_msg_only(iter);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
		break;
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
	case TRACE_BPRINT:
		ret = trace_print_bprintk_msg_only(iter);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
		break;
	case TRACE_PRINT:
		ret = trace_print_printk_msg_only(iter);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
		break;
	default:
		event = ftrace_find_event(ent->type);
		if (!event)
			return TRACE_TYPE_UNHANDLED;

1220
		ret = event->funcs->trace(iter, sym_flags, event);
1221 1222 1223
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
	}
1224

1225 1226 1227
	if (trace_seq_has_overflowed(s))
		goto out;

1228
	/* Strip ending newline */
1229 1230 1231
	if (s->buffer[s->seq.len - 1] == '\n') {
		s->buffer[s->seq.len - 1] = '\0';
		s->seq.len--;
1232 1233
	}

1234
	trace_seq_puts(s, " */\n");
1235
 out:
1236
	return trace_handle_return(s);
1237 1238 1239
}


1240
enum print_line_t
1241
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1242
{
1243 1244
	struct ftrace_graph_ent_entry *field;
	struct fgraph_data *data = iter->private;
1245
	struct trace_entry *entry = iter->ent;
1246
	struct trace_seq *s = &iter->seq;
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
	int cpu = iter->cpu;
	int ret;

	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
		return TRACE_TYPE_HANDLED;
	}

	/*
	 * If the last output failed, there's a possibility we need
	 * to print out the missing entry which would never go out.
	 */
	if (data && data->failed) {
		field = &data->ent;
		iter->cpu = data->cpu;
1262
		ret = print_graph_entry(field, s, iter, flags);
1263 1264 1265 1266 1267 1268 1269
		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
			ret = TRACE_TYPE_NO_CONSUME;
		}
		iter->cpu = cpu;
		return ret;
	}
1270

1271 1272
	switch (entry->type) {
	case TRACE_GRAPH_ENT: {
1273 1274 1275 1276 1277 1278
		/*
		 * print_graph_entry() may consume the current event,
		 * thus @field may become invalid, so we need to save it.
		 * sizeof(struct ftrace_graph_ent_entry) is very small,
		 * it can be safely saved at the stack.
		 */
1279
		struct ftrace_graph_ent_entry saved;
1280
		trace_assign_type(field, entry);
1281
		saved = *field;
1282
		return print_graph_entry(&saved, s, iter, flags);
1283 1284 1285 1286
	}
	case TRACE_GRAPH_RET: {
		struct ftrace_graph_ret_entry *field;
		trace_assign_type(field, entry);
1287
		return print_graph_return(&field->ret, s, entry, iter, flags);
1288
	}
1289 1290 1291 1292 1293
	case TRACE_STACK:
	case TRACE_FN:
		/* dont trace stack and functions as comments */
		return TRACE_TYPE_UNHANDLED;

1294
	default:
1295
		return print_graph_comment(s, entry, iter, flags);
1296
	}
1297 1298

	return TRACE_TYPE_HANDLED;
1299 1300
}

1301 1302 1303
static enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
1304
	return print_graph_function_flags(iter, tracer_flags.val);
1305 1306
}

1307
static enum print_line_t
1308 1309
print_graph_function_event(struct trace_iterator *iter, int flags,
			   struct trace_event *event)
1310 1311 1312 1313
{
	return print_graph_function(iter);
}

1314
static void print_lat_header(struct seq_file *s, u32 flags)
1315 1316 1317 1318 1319 1320
{
	static const char spaces[] = "                "	/* 16 spaces */
		"    "					/* 4 spaces */
		"                 ";			/* 17 spaces */
	int size = 0;

1321
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1322
		size += 16;
1323
	if (flags & TRACE_GRAPH_PRINT_CPU)
1324
		size += 4;
1325
	if (flags & TRACE_GRAPH_PRINT_PROC)
1326 1327 1328 1329 1330 1331
		size += 17;

	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1332
	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1333 1334
}

1335 1336
static void __print_graph_headers_flags(struct trace_array *tr,
					struct seq_file *s, u32 flags)
1337
{
1338
	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1339 1340

	if (lat)
1341
		print_lat_header(s, flags);
1342

1343
	/* 1st line */
1344
	seq_putc(s, '#');
1345
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1346
		seq_puts(s, "     TIME       ");
1347
	if (flags & TRACE_GRAPH_PRINT_CPU)
1348
		seq_puts(s, " CPU");
1349
	if (flags & TRACE_GRAPH_PRINT_PROC)
1350
		seq_puts(s, "  TASK/PID       ");
1351
	if (lat)
1352
		seq_puts(s, "||||");
1353
	if (flags & TRACE_GRAPH_PRINT_DURATION)
1354 1355
		seq_puts(s, "  DURATION   ");
	seq_puts(s, "               FUNCTION CALLS\n");
1356 1357

	/* 2nd line */
1358
	seq_putc(s, '#');
1359
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1360
		seq_puts(s, "      |         ");
1361
	if (flags & TRACE_GRAPH_PRINT_CPU)
1362
		seq_puts(s, " |  ");
1363
	if (flags & TRACE_GRAPH_PRINT_PROC)
1364
		seq_puts(s, "   |    |        ");
1365
	if (lat)
1366
		seq_puts(s, "||||");
1367
	if (flags & TRACE_GRAPH_PRINT_DURATION)
1368 1369
		seq_puts(s, "   |   |      ");
	seq_puts(s, "               |   |   |   |\n");
1370
}
1371

1372
static void print_graph_headers(struct seq_file *s)
1373 1374 1375 1376
{
	print_graph_headers_flags(s, tracer_flags.val);
}

1377 1378 1379
void print_graph_headers_flags(struct seq_file *s, u32 flags)
{
	struct trace_iterator *iter = s->private;
1380
	struct trace_array *tr = iter->tr;
1381

1382
	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1383 1384
		return;

1385
	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1386 1387 1388 1389 1390
		/* print nothing if the buffers are empty */
		if (trace_empty(iter))
			return;

		print_trace_header(s, iter);
1391
	}
1392

1393
	__print_graph_headers_flags(tr, s, flags);
1394 1395
}

1396
void graph_trace_open(struct trace_iterator *iter)
1397
{
1398
	/* pid and depth on the last trace processed */
1399
	struct fgraph_data *data;
1400
	gfp_t gfpflags;
1401 1402
	int cpu;

1403 1404
	iter->private = NULL;

1405 1406 1407 1408
	/* We can be called in atomic context via ftrace_dump() */
	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;

	data = kzalloc(sizeof(*data), gfpflags);
1409
	if (!data)
1410 1411
		goto out_err;

1412
	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1413 1414 1415 1416 1417 1418 1419
	if (!data->cpu_data)
		goto out_err_free;

	for_each_possible_cpu(cpu) {
		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1420 1421
		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

1422 1423 1424
		*pid = -1;
		*depth = 0;
		*ignore = 0;
1425
		*depth_irq = -1;
1426
	}
1427

1428
	iter->private = data;
1429 1430 1431 1432 1433 1434

	return;

 out_err_free:
	kfree(data);
 out_err:
1435
	pr_warn("function graph tracer: not enough memory\n");
1436 1437
}

1438
void graph_trace_close(struct trace_iterator *iter)
1439
{
1440 1441 1442 1443 1444 1445
	struct fgraph_data *data = iter->private;

	if (data) {
		free_percpu(data->cpu_data);
		kfree(data);
	}
1446 1447
}

1448 1449
static int
func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1450 1451 1452 1453
{
	if (bit == TRACE_GRAPH_PRINT_IRQS)
		ftrace_graph_skip_irqs = !set;

1454 1455 1456 1457 1458 1459
	if (bit == TRACE_GRAPH_SLEEP_TIME)
		ftrace_graph_sleep_time_control(set);

	if (bit == TRACE_GRAPH_GRAPH_TIME)
		ftrace_graph_graph_time_control(set);

1460 1461 1462
	return 0;
}

1463 1464 1465 1466
static struct trace_event_functions graph_functions = {
	.trace		= print_graph_function_event,
};

1467 1468
static struct trace_event graph_trace_entry_event = {
	.type		= TRACE_GRAPH_ENT,
1469
	.funcs		= &graph_functions,
1470 1471 1472 1473
};

static struct trace_event graph_trace_ret_event = {
	.type		= TRACE_GRAPH_RET,
1474
	.funcs		= &graph_functions
1475 1476
};

1477
static struct tracer graph_trace __tracer_data = {
1478
	.name		= "function_graph",
1479
	.update_thresh	= graph_trace_update_thresh,
1480
	.open		= graph_trace_open,
1481
	.pipe_open	= graph_trace_open,
1482
	.close		= graph_trace_close,
1483
	.pipe_close	= graph_trace_close,
1484 1485
	.init		= graph_trace_init,
	.reset		= graph_trace_reset,
1486 1487
	.print_line	= print_graph_function,
	.print_header	= print_graph_headers,
1488
	.flags		= &tracer_flags,
1489
	.set_flag	= func_graph_set_flag,
1490 1491 1492
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest	= trace_selftest_startup_function_graph,
#endif
1493 1494
};

1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506

static ssize_t
graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
		  loff_t *ppos)
{
	unsigned long val;
	int ret;

	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
		return ret;

1507
	fgraph_max_depth = val;
1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520

	*ppos += cnt;

	return cnt;
}

static ssize_t
graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
		 loff_t *ppos)
{
	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
	int n;

1521
	n = sprintf(buf, "%d\n", fgraph_max_depth);
1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532

	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
}

static const struct file_operations graph_depth_fops = {
	.open		= tracing_open_generic,
	.write		= graph_depth_write,
	.read		= graph_depth_read,
	.llseek		= generic_file_llseek,
};

1533
static __init int init_graph_tracefs(void)
1534 1535 1536 1537
{
	struct dentry *d_tracer;

	d_tracer = tracing_init_dentry();
1538
	if (IS_ERR(d_tracer))
1539 1540 1541 1542 1543 1544 1545
		return 0;

	trace_create_file("max_graph_depth", 0644, d_tracer,
			  NULL, &graph_depth_fops);

	return 0;
}
1546
fs_initcall(init_graph_tracefs);
1547

1548 1549
static __init int init_graph_trace(void)
{
1550
	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1551

1552
	if (!register_trace_event(&graph_trace_entry_event)) {
1553
		pr_warn("Warning: could not register graph trace events\n");
1554 1555 1556
		return 1;
	}

1557
	if (!register_trace_event(&graph_trace_ret_event)) {
1558
		pr_warn("Warning: could not register graph trace events\n");
1559 1560 1561
		return 1;
	}

1562 1563 1564
	return register_tracer(&graph_trace);
}

1565
core_initcall(init_graph_trace);