trace_functions_graph.c 38.2 KB
Newer Older
1 2 3
/*
 *
 * Function graph tracer.
4
 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5 6 7 8 9 10 11
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
12
#include <linux/slab.h>
13 14 15
#include <linux/fs.h>

#include "trace.h"
16
#include "trace_output.h"
17

18 19 20
/* When set, irq functions will be ignored */
static int ftrace_graph_skip_irqs;

21
struct fgraph_cpu_data {
22 23
	pid_t		last_pid;
	int		depth;
24
	int		depth_irq;
25
	int		ignore;
26
	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
27 28 29
};

struct fgraph_data {
30
	struct fgraph_cpu_data __percpu *cpu_data;
31 32 33 34 35 36

	/* Place to preserve last processed entry. */
	struct ftrace_graph_ent_entry	ent;
	struct ftrace_graph_ret_entry	ret;
	int				failed;
	int				cpu;
37 38
};

39
#define TRACE_GRAPH_INDENT	2
40

41
/* Flag options */
42
#define TRACE_GRAPH_PRINT_OVERRUN	0x1
43 44
#define TRACE_GRAPH_PRINT_CPU		0x2
#define TRACE_GRAPH_PRINT_OVERHEAD	0x4
45
#define TRACE_GRAPH_PRINT_PROC		0x8
46
#define TRACE_GRAPH_PRINT_DURATION	0x10
47
#define TRACE_GRAPH_PRINT_ABS_TIME	0x20
48
#define TRACE_GRAPH_PRINT_IRQS		0x40
49

50 51
static unsigned int max_depth;

52
static struct tracer_opt trace_opts[] = {
53
	/* Display overruns? (for self-debug purpose) */
54 55 56 57 58
	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	/* Display CPU ? */
	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
	/* Display Overhead ? */
	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
59 60
	/* Display proc name/pid */
	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
61 62 63 64
	/* Display duration of execution */
	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
	/* Display absolute time of an entry */
	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
65 66
	/* Display interrupts */
	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
67 68 69 70
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
71
	/* Don't display overruns and proc by default */
72
	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
73
	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
74 75 76
	.opts = trace_opts
};

77
static struct trace_array *graph_array;
78

79 80 81 82 83 84 85 86 87 88 89 90 91 92
/*
 * DURATION column is being also used to display IRQ signs,
 * following values are used by print_graph_irq and others
 * to fill in space into DURATION column.
 */
enum {
	DURATION_FILL_FULL  = -1,
	DURATION_FILL_START = -2,
	DURATION_FILL_END   = -3,
};

static enum print_line_t
print_graph_duration(unsigned long long duration, struct trace_seq *s,
		     u32 flags);
93

94 95
/* Add a function return address to the trace stack on thread info.*/
int
96 97
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
			 unsigned long frame_pointer)
98
{
99
	unsigned long long calltime;
100 101 102 103 104
	int index;

	if (!current->ret_stack)
		return -EBUSY;

105 106 107 108 109 110
	/*
	 * We must make sure the ret_stack is tested before we read
	 * anything else.
	 */
	smp_rmb();

111 112 113 114 115 116
	/* The return trace stack is full */
	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
		atomic_inc(&current->trace_overrun);
		return -EBUSY;
	}

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
	/*
	 * The curr_ret_stack is an index to ftrace return stack of
	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
	 * DEPTH) when the function graph tracer is used.  To support
	 * filtering out specific functions, it makes the index
	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
	 * so when it sees a negative index the ftrace will ignore
	 * the record.  And the index gets recovered when returning
	 * from the filtered function by adding the FTRACE_NOTRACE_
	 * DEPTH and then it'll continue to record functions normally.
	 *
	 * The curr_ret_stack is initialized to -1 and get increased
	 * in this function.  So it can be less than -1 only if it was
	 * filtered out via ftrace_graph_notrace_addr() which can be
	 * set from set_graph_notrace file in debugfs by user.
	 */
	if (current->curr_ret_stack < -1)
		return -EBUSY;

136 137
	calltime = trace_clock_local();

138
	index = ++current->curr_ret_stack;
139 140
	if (ftrace_graph_notrace_addr(func))
		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
141 142 143
	barrier();
	current->ret_stack[index].ret = ret;
	current->ret_stack[index].func = func;
144
	current->ret_stack[index].calltime = calltime;
145
	current->ret_stack[index].subtime = 0;
146
	current->ret_stack[index].fp = frame_pointer;
147
	*depth = current->curr_ret_stack;
148 149 150 151 152

	return 0;
}

/* Retrieve a function return address to the trace stack on thread info.*/
153
static void
154 155
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
			unsigned long frame_pointer)
156 157 158 159 160
{
	int index;

	index = current->curr_ret_stack;

161 162 163 164 165 166 167 168 169 170 171
	/*
	 * A negative index here means that it's just returned from a
	 * notrace'd function.  Recover index to get an original
	 * return address.  See ftrace_push_return_trace().
	 *
	 * TODO: Need to check whether the stack gets corrupted.
	 */
	if (index < 0)
		index += FTRACE_NOTRACE_DEPTH;

	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
172 173 174 175 176 177 178
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic, otherwise we have no where to go */
		*ret = (unsigned long)panic;
		return;
	}

179
#if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
180 181 182 183 184 185 186 187 188 189
	/*
	 * The arch may choose to record the frame pointer used
	 * and check it here to make sure that it is what we expect it
	 * to be. If gcc does not set the place holder of the return
	 * address in the frame pointer, and does a copy instead, then
	 * the function graph trace will fail. This test detects this
	 * case.
	 *
	 * Currently, x86_32 with optimize for size (-Os) makes the latest
	 * gcc do the above.
190 191 192
	 *
	 * Note, -mfentry does not use frame pointers, and this test
	 *  is not needed if CC_USING_FENTRY is set.
193 194 195 196
	 */
	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
		ftrace_graph_stop();
		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
197
		     "  from func %ps return to %lx\n",
198 199 200 201 202 203 204 205 206
		     current->ret_stack[index].fp,
		     frame_pointer,
		     (void *)current->ret_stack[index].func,
		     current->ret_stack[index].ret);
		*ret = (unsigned long)panic;
		return;
	}
#endif

207 208 209 210 211 212 213 214 215 216 217
	*ret = current->ret_stack[index].ret;
	trace->func = current->ret_stack[index].func;
	trace->calltime = current->ret_stack[index].calltime;
	trace->overrun = atomic_read(&current->trace_overrun);
	trace->depth = index;
}

/*
 * Send the trace to the ring-buffer.
 * @return the original return address.
 */
218
unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
219 220 221 222
{
	struct ftrace_graph_ret trace;
	unsigned long ret;

223
	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
224
	trace.rettime = trace_clock_local();
225 226
	barrier();
	current->curr_ret_stack--;
227 228 229 230 231 232 233 234 235
	/*
	 * The curr_ret_stack can be less than -1 only if it was
	 * filtered out and it's about to return from the function.
	 * Recover the index and continue to trace normal functions.
	 */
	if (current->curr_ret_stack < -1) {
		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
		return ret;
	}
236

237 238 239 240 241 242 243
	/*
	 * The trace should run after decrementing the ret counter
	 * in case an interrupt were to come in. We don't want to
	 * lose the interrupt if max_depth is set.
	 */
	ftrace_graph_return(&trace);

244 245 246 247 248 249 250 251 252 253
	if (unlikely(!ret)) {
		ftrace_graph_stop();
		WARN_ON(1);
		/* Might as well panic. What else to do? */
		ret = (unsigned long)panic;
	}

	return ret;
}

254
int __trace_graph_entry(struct trace_array *tr,
255 256 257 258 259 260
				struct ftrace_graph_ent *trace,
				unsigned long flags,
				int pc)
{
	struct ftrace_event_call *call = &event_funcgraph_entry;
	struct ring_buffer_event *event;
261
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
262 263
	struct ftrace_graph_ent_entry *entry;

R
Rusty Russell 已提交
264
	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
265 266
		return 0;

267
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
268 269 270 271 272
					  sizeof(*entry), flags, pc);
	if (!event)
		return 0;
	entry	= ring_buffer_event_data(event);
	entry->graph_ent			= *trace;
273
	if (!call_filter_check_discard(call, entry, buffer, event))
274
		__buffer_unlock_commit(buffer, event);
275 276 277 278

	return 1;
}

279 280
static inline int ftrace_graph_ignore_irqs(void)
{
281
	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
282 283 284 285 286
		return 0;

	return in_irq();
}

287 288 289 290 291 292 293 294 295 296 297 298 299
int trace_graph_entry(struct ftrace_graph_ent *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int ret;
	int cpu;
	int pc;

	if (!ftrace_trace_task(current))
		return 0;

300
	/* trace it when it is-nested-in or is a function enabled. */
301
	if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
302
	     ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
303
	    (max_depth && trace->depth >= max_depth))
304 305
		return 0;

306 307 308 309 310 311 312 313 314 315
	/*
	 * Do not trace a function if it's filtered by set_graph_notrace.
	 * Make the index of ret stack negative to indicate that it should
	 * ignore further functions.  But it needs its own ret stack entry
	 * to recover the original index in order to continue tracing after
	 * returning from the function.
	 */
	if (ftrace_graph_notrace_addr(trace->func))
		return 1;

316 317
	local_irq_save(flags);
	cpu = raw_smp_processor_id();
318
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
319 320 321 322 323 324 325 326 327 328 329 330 331 332
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		ret = __trace_graph_entry(tr, trace, flags, pc);
	} else {
		ret = 0;
	}

	atomic_dec(&data->disabled);
	local_irq_restore(flags);

	return ret;
}

333 334 335 336 337 338 339 340
int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
{
	if (tracing_thresh)
		return 1;
	else
		return trace_graph_entry(trace);
}

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
static void
__trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long flags, int pc)
{
	u64 time = trace_clock_local();
	struct ftrace_graph_ent ent = {
		.func  = ip,
		.depth = 0,
	};
	struct ftrace_graph_ret ret = {
		.func     = ip,
		.depth    = 0,
		.calltime = time,
		.rettime  = time,
	};

	__trace_graph_entry(tr, &ent, flags, pc);
	__trace_graph_return(tr, &ret, flags, pc);
}

void
trace_graph_function(struct trace_array *tr,
		unsigned long ip, unsigned long parent_ip,
		unsigned long flags, int pc)
{
	__trace_graph_function(tr, ip, flags, pc);
}

369
void __trace_graph_return(struct trace_array *tr,
370 371 372 373 374 375
				struct ftrace_graph_ret *trace,
				unsigned long flags,
				int pc)
{
	struct ftrace_event_call *call = &event_funcgraph_exit;
	struct ring_buffer_event *event;
376
	struct ring_buffer *buffer = tr->trace_buffer.buffer;
377 378
	struct ftrace_graph_ret_entry *entry;

R
Rusty Russell 已提交
379
	if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
380 381
		return;

382
	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
383 384 385 386 387
					  sizeof(*entry), flags, pc);
	if (!event)
		return;
	entry	= ring_buffer_event_data(event);
	entry->ret				= *trace;
388
	if (!call_filter_check_discard(call, entry, buffer, event))
389
		__buffer_unlock_commit(buffer, event);
390 391 392 393 394 395 396 397 398 399 400 401 402
}

void trace_graph_return(struct ftrace_graph_ret *trace)
{
	struct trace_array *tr = graph_array;
	struct trace_array_cpu *data;
	unsigned long flags;
	long disabled;
	int cpu;
	int pc;

	local_irq_save(flags);
	cpu = raw_smp_processor_id();
403
	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
404 405 406 407 408 409 410 411 412
	disabled = atomic_inc_return(&data->disabled);
	if (likely(disabled == 1)) {
		pc = preempt_count();
		__trace_graph_return(tr, trace, flags, pc);
	}
	atomic_dec(&data->disabled);
	local_irq_restore(flags);
}

413 414 415 416 417 418 419 420 421
void set_graph_array(struct trace_array *tr)
{
	graph_array = tr;

	/* Make graph_array visible before we start tracing */

	smp_mb();
}

422 423 424 425 426 427 428 429 430
void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
{
	if (tracing_thresh &&
	    (trace->rettime - trace->calltime < tracing_thresh))
		return;
	else
		trace_graph_return(trace);
}

431 432
static int graph_trace_init(struct trace_array *tr)
{
433 434
	int ret;

435
	set_graph_array(tr);
436 437 438 439 440 441
	if (tracing_thresh)
		ret = register_ftrace_graph(&trace_graph_thresh_return,
					    &trace_graph_thresh_entry);
	else
		ret = register_ftrace_graph(&trace_graph_return,
					    &trace_graph_entry);
442 443 444 445 446
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
447 448 449 450
}

static void graph_trace_reset(struct trace_array *tr)
{
451 452
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
453 454
}

455
static int max_bytes_for_cpu;
456 457 458 459 460 461

static enum print_line_t
print_graph_cpu(struct trace_seq *s, int cpu)
{
	int ret;

462 463 464 465 466
	/*
	 * Start with a space character - to make it stand out
	 * to the right a bit when trace output is pasted into
	 * email:
	 */
467
	ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
468
	if (!ret)
469 470
		return TRACE_TYPE_PARTIAL_LINE;

471 472 473
	return TRACE_TYPE_HANDLED;
}

474 475 476 477 478
#define TRACE_GRAPH_PROCINFO_LENGTH	14

static enum print_line_t
print_graph_proc(struct trace_seq *s, pid_t pid)
{
479
	char comm[TASK_COMM_LEN];
480 481
	/* sign + log10(MAX_INT) + '\0' */
	char pid_str[11];
482 483 484 485
	int spaces = 0;
	int ret;
	int len;
	int i;
486

487
	trace_find_cmdline(pid, comm);
488 489 490 491 492 493 494 495 496 497 498
	comm[7] = '\0';
	sprintf(pid_str, "%d", pid);

	/* 1 stands for the "-" character */
	len = strlen(comm) + strlen(pid_str) + 1;

	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;

	/* First spaces to align center */
	for (i = 0; i < spaces / 2; i++) {
499
		ret = trace_seq_putc(s, ' ');
500 501 502 503 504 505 506 507 508 509
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Last spaces to align center */
	for (i = 0; i < spaces - (spaces / 2); i++) {
510
		ret = trace_seq_putc(s, ' ');
511 512 513 514 515 516
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
	return TRACE_TYPE_HANDLED;
}

517

518 519 520
static enum print_line_t
print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
{
521
	if (!trace_seq_putc(s, ' '))
522 523
		return 0;

524
	return trace_print_lat_fmt(s, entry);
525 526
}

527
/* If the pid changed since the last trace, output this event */
528
static enum print_line_t
529
verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
530
{
531
	pid_t prev_pid;
532
	pid_t *last_pid;
533
	int ret;
534

535
	if (!data)
536 537
		return TRACE_TYPE_HANDLED;

538
	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
539 540

	if (*last_pid == pid)
541
		return TRACE_TYPE_HANDLED;
542

543 544
	prev_pid = *last_pid;
	*last_pid = pid;
545

546 547
	if (prev_pid == -1)
		return TRACE_TYPE_HANDLED;
548 549 550 551 552 553 554 555
/*
 * Context-switch trace line:

 ------------------------------------------
 | 1)  migration/0--1  =>  sshd-1755
 ------------------------------------------

 */
556
	ret = trace_seq_puts(s,
557
		" ------------------------------------------\n");
558
	if (!ret)
559
		return TRACE_TYPE_PARTIAL_LINE;
560 561 562

	ret = print_graph_cpu(s, cpu);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
563
		return TRACE_TYPE_PARTIAL_LINE;
564 565 566

	ret = print_graph_proc(s, prev_pid);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
567
		return TRACE_TYPE_PARTIAL_LINE;
568

569
	ret = trace_seq_puts(s, " => ");
570
	if (!ret)
571
		return TRACE_TYPE_PARTIAL_LINE;
572 573 574

	ret = print_graph_proc(s, pid);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
575
		return TRACE_TYPE_PARTIAL_LINE;
576

577
	ret = trace_seq_puts(s,
578 579
		"\n ------------------------------------------\n\n");
	if (!ret)
580
		return TRACE_TYPE_PARTIAL_LINE;
581

582
	return TRACE_TYPE_HANDLED;
583 584
}

585 586
static struct ftrace_graph_ret_entry *
get_return_for_leaf(struct trace_iterator *iter,
587 588
		struct ftrace_graph_ent_entry *curr)
{
589 590
	struct fgraph_data *data = iter->private;
	struct ring_buffer_iter *ring_iter = NULL;
591 592 593
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *next;

594 595 596 597 598 599 600 601
	/*
	 * If the previous output failed to write to the seq buffer,
	 * then we just reuse the data from before.
	 */
	if (data && data->failed) {
		curr = &data->ent;
		next = &data->ret;
	} else {
602

603
		ring_iter = trace_buffer_iter(iter, iter->cpu);
604 605 606 607 608 609 610 611 612

		/* First peek to compare current entry and the next one */
		if (ring_iter)
			event = ring_buffer_iter_peek(ring_iter, NULL);
		else {
			/*
			 * We need to consume the current entry to see
			 * the next one.
			 */
613
			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
614
					    NULL, NULL);
615
			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
616
						 NULL, NULL);
617
		}
618

619 620 621 622
		if (!event)
			return NULL;

		next = ring_buffer_event_data(event);
623

624 625 626 627 628 629
		if (data) {
			/*
			 * Save current and next entries for later reference
			 * if the output fails.
			 */
			data->ent = *curr;
630 631 632 633 634 635 636 637 638
			/*
			 * If the next event is not a return type, then
			 * we only care about what type it is. Otherwise we can
			 * safely copy the entire event.
			 */
			if (next->ent.type == TRACE_GRAPH_RET)
				data->ret = *next;
			else
				data->ret.ent.type = next->ent.type;
639 640
		}
	}
641 642

	if (next->ent.type != TRACE_GRAPH_RET)
643
		return NULL;
644 645 646

	if (curr->ent.pid != next->ent.pid ||
			curr->graph_ent.func != next->ret.func)
647
		return NULL;
648

649 650 651 652 653
	/* this is a leaf, now advance the iterator */
	if (ring_iter)
		ring_buffer_read(ring_iter, NULL);

	return next;
654 655
}

656 657 658 659 660 661 662 663 664 665 666
static int print_graph_abs_time(u64 t, struct trace_seq *s)
{
	unsigned long usecs_rem;

	usecs_rem = do_div(t, NSEC_PER_SEC);
	usecs_rem /= 1000;

	return trace_seq_printf(s, "%5lu.%06lu |  ",
			(unsigned long)t, usecs_rem);
}

667
static enum print_line_t
668
print_graph_irq(struct trace_iterator *iter, unsigned long addr,
669
		enum trace_type type, int cpu, pid_t pid, u32 flags)
670 671
{
	int ret;
672
	struct trace_seq *s = &iter->seq;
673 674 675 676 677

	if (addr < (unsigned long)__irqentry_text_start ||
		addr >= (unsigned long)__irqentry_text_end)
		return TRACE_TYPE_UNHANDLED;

678 679 680 681 682 683 684
	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
		/* Absolute time */
		if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
			ret = print_graph_abs_time(iter->ts, s);
			if (!ret)
				return TRACE_TYPE_PARTIAL_LINE;
		}
685

686 687 688 689 690 691
		/* Cpu */
		if (flags & TRACE_GRAPH_PRINT_CPU) {
			ret = print_graph_cpu(s, cpu);
			if (ret == TRACE_TYPE_PARTIAL_LINE)
				return TRACE_TYPE_PARTIAL_LINE;
		}
692

693 694 695 696 697
		/* Proc */
		if (flags & TRACE_GRAPH_PRINT_PROC) {
			ret = print_graph_proc(s, pid);
			if (ret == TRACE_TYPE_PARTIAL_LINE)
				return TRACE_TYPE_PARTIAL_LINE;
698
			ret = trace_seq_puts(s, " | ");
699 700 701
			if (!ret)
				return TRACE_TYPE_PARTIAL_LINE;
		}
702
	}
703

704
	/* No overhead */
705 706 707
	ret = print_graph_duration(DURATION_FILL_START, s, flags);
	if (ret != TRACE_TYPE_HANDLED)
		return ret;
708

709
	if (type == TRACE_GRAPH_ENT)
710
		ret = trace_seq_puts(s, "==========>");
711
	else
712
		ret = trace_seq_puts(s, "<==========");
713 714 715 716

	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

717 718 719 720
	ret = print_graph_duration(DURATION_FILL_END, s, flags);
	if (ret != TRACE_TYPE_HANDLED)
		return ret;

721
	ret = trace_seq_putc(s, '\n');
722 723 724 725 726

	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
	return TRACE_TYPE_HANDLED;
}
727

728 729
enum print_line_t
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
730 731
{
	unsigned long nsecs_rem = do_div(duration, 1000);
732 733 734 735 736 737 738 739 740
	/* log10(ULONG_MAX) + '\0' */
	char msecs_str[21];
	char nsecs_str[5];
	int ret, len;
	int i;

	sprintf(msecs_str, "%lu", (unsigned long) duration);

	/* Print msecs */
741
	ret = trace_seq_printf(s, "%s", msecs_str);
742 743 744 745 746 747 748
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	len = strlen(msecs_str);

	/* Print nsecs (we don't want to exceed 7 numbers) */
	if (len < 7) {
749 750 751
		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);

		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
752 753 754 755 756 757
		ret = trace_seq_printf(s, ".%s", nsecs_str);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
		len += strlen(nsecs_str);
	}

758
	ret = trace_seq_puts(s, " us ");
759 760 761 762 763
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Print remaining spaces to fit the row's width */
	for (i = len; i < 7; i++) {
764
		ret = trace_seq_putc(s, ' ');
765 766 767
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
768 769 770 771
	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
772 773
print_graph_duration(unsigned long long duration, struct trace_seq *s,
		     u32 flags)
774
{
775 776
	int ret = -1;

777 778 779
	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
	    !(trace_flags & TRACE_ITER_CONTEXT_INFO))
			return TRACE_TYPE_HANDLED;
780 781 782 783

	/* No real adata, just filling the column with spaces */
	switch (duration) {
	case DURATION_FILL_FULL:
784
		ret = trace_seq_puts(s, "              |  ");
785 786
		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
	case DURATION_FILL_START:
787
		ret = trace_seq_puts(s, "  ");
788 789
		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
	case DURATION_FILL_END:
790
		ret = trace_seq_puts(s, " |");
791 792 793 794 795 796 797
		return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
	}

	/* Signal a overhead of time execution to the output */
	if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
		/* Duration exceeded 100 msecs */
		if (duration > 100000ULL)
798
			ret = trace_seq_puts(s, "! ");
799 800
		/* Duration exceeded 10 msecs */
		else if (duration > 10000ULL)
801
			ret = trace_seq_puts(s, "+ ");
802 803 804 805 806 807 808 809
	}

	/*
	 * The -1 means we either did not exceed the duration tresholds
	 * or we dont want to print out the overhead. Either way we need
	 * to fill out the space.
	 */
	if (ret == -1)
810
		ret = trace_seq_puts(s, "  ");
811 812 813 814

	/* Catching here any failure happenned above */
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
815 816 817 818

	ret = trace_print_graph_duration(duration, s);
	if (ret != TRACE_TYPE_HANDLED)
		return ret;
819

820
	ret = trace_seq_puts(s, "|  ");
821 822 823
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

824
	return TRACE_TYPE_HANDLED;
825 826 827
}

/* Case of a leaf function on its call entry */
828
static enum print_line_t
829
print_graph_entry_leaf(struct trace_iterator *iter,
830
		struct ftrace_graph_ent_entry *entry,
831 832
		struct ftrace_graph_ret_entry *ret_entry,
		struct trace_seq *s, u32 flags)
833
{
834
	struct fgraph_data *data = iter->private;
835 836 837
	struct ftrace_graph_ret *graph_ret;
	struct ftrace_graph_ent *call;
	unsigned long long duration;
838
	int ret;
839
	int i;
840

841 842 843 844
	graph_ret = &ret_entry->ret;
	call = &entry->graph_ent;
	duration = graph_ret->rettime - graph_ret->calltime;

845
	if (data) {
846
		struct fgraph_cpu_data *cpu_data;
847
		int cpu = iter->cpu;
848 849

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
850 851 852 853 854 855

		/*
		 * Comments display at + 1 to depth. Since
		 * this is a leaf function, keep the comments
		 * equal to this depth.
		 */
856 857 858 859 860
		cpu_data->depth = call->depth - 1;

		/* No need to keep this function around for this depth */
		if (call->depth < FTRACE_RETFUNC_DEPTH)
			cpu_data->enter_funcs[call->depth] = 0;
861 862
	}

863 864 865
	/* Overhead and duration */
	ret = print_graph_duration(duration, s, flags);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
866
		return TRACE_TYPE_PARTIAL_LINE;
867

868 869
	/* Function */
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
870
		ret = trace_seq_putc(s, ' ');
871 872 873 874
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

875
	ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
876 877 878 879 880 881 882
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
883 884
print_graph_entry_nested(struct trace_iterator *iter,
			 struct ftrace_graph_ent_entry *entry,
885
			 struct trace_seq *s, int cpu, u32 flags)
886 887
{
	struct ftrace_graph_ent *call = &entry->graph_ent;
888 889 890 891 892
	struct fgraph_data *data = iter->private;
	int ret;
	int i;

	if (data) {
893
		struct fgraph_cpu_data *cpu_data;
894 895
		int cpu = iter->cpu;

896 897 898 899 900 901
		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
		cpu_data->depth = call->depth;

		/* Save this function pointer to see if the exit matches */
		if (call->depth < FTRACE_RETFUNC_DEPTH)
			cpu_data->enter_funcs[call->depth] = call->func;
902
	}
903

904
	/* No time */
905 906 907
	ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
	if (ret != TRACE_TYPE_HANDLED)
		return ret;
908

909
	/* Function */
910
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
911
		ret = trace_seq_putc(s, ' ');
912 913
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
914 915
	}

916
	ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
917 918 919
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

920 921 922 923 924
	/*
	 * we already consumed the current entry to check the next one
	 * and see if this is a leaf.
	 */
	return TRACE_TYPE_NO_CONSUME;
925 926
}

927
static enum print_line_t
928
print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
929
		     int type, unsigned long addr, u32 flags)
930
{
931
	struct fgraph_data *data = iter->private;
932
	struct trace_entry *ent = iter->ent;
933 934
	int cpu = iter->cpu;
	int ret;
935

936
	/* Pid */
937
	if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
938 939
		return TRACE_TYPE_PARTIAL_LINE;

940 941
	if (type) {
		/* Interrupt */
942
		ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
943 944 945
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}
946

947 948 949
	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
		return 0;

950
	/* Absolute time */
951
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
952 953 954 955 956
		ret = print_graph_abs_time(iter->ts, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

957
	/* Cpu */
958
	if (flags & TRACE_GRAPH_PRINT_CPU) {
959
		ret = print_graph_cpu(s, cpu);
960 961 962 963 964
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Proc */
965
	if (flags & TRACE_GRAPH_PRINT_PROC) {
966
		ret = print_graph_proc(s, ent->pid);
967 968 969
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;

970
		ret = trace_seq_puts(s, " | ");
971 972 973
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
974

975 976 977 978 979 980 981
	/* Latency format */
	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
		ret = print_graph_lat_fmt(s, ent);
		if (ret == TRACE_TYPE_PARTIAL_LINE)
			return TRACE_TYPE_PARTIAL_LINE;
	}

982 983 984
	return 0;
}

985 986 987 988 989
/*
 * Entry check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
L
Lucas De Marchi 已提交
990
 *  - we just entered irq code
991 992 993 994 995 996 997 998 999 1000
 *
 * retunns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_entry(struct trace_iterator *iter, u32 flags,
		unsigned long addr, int depth)
{
	int cpu = iter->cpu;
1001
	int *depth_irq;
1002 1003
	struct fgraph_data *data = iter->private;

1004 1005 1006 1007 1008 1009 1010
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
1011 1012
		return 0;

1013 1014
	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
	/*
	 * We are inside the irq code
	 */
	if (*depth_irq >= 0)
		return 1;

	if ((addr < (unsigned long)__irqentry_text_start) ||
	    (addr >= (unsigned long)__irqentry_text_end))
		return 0;

	/*
	 * We are entering irq code.
	 */
	*depth_irq = depth;
	return 1;
}

/*
 * Return check for irq code
 *
 * returns 1 if
 *  - we are inside irq code
 *  - we just left irq code
 *
 * returns 0 if
 *  - funcgraph-interrupts option is set
 *  - we are not inside irq code
 */
static int
check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
{
	int cpu = iter->cpu;
1047
	int *depth_irq;
1048 1049
	struct fgraph_data *data = iter->private;

1050 1051 1052 1053 1054 1055 1056
	/*
	 * If we are either displaying irqs, or we got called as
	 * a graph event and private data does not exist,
	 * then we bypass the irq check.
	 */
	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
	    (!data))
1057 1058
		return 0;

1059 1060
	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
	/*
	 * We are not inside the irq code.
	 */
	if (*depth_irq == -1)
		return 0;

	/*
	 * We are inside the irq code, and this is returning entry.
	 * Let's not trace it and clear the entry depth, since
	 * we are out of irq code.
	 *
	 * This condition ensures that we 'leave the irq code' once
	 * we are out of the entry depth. Thus protecting us from
	 * the RETURN entry loss.
	 */
	if (*depth_irq >= depth) {
		*depth_irq = -1;
		return 1;
	}

	/*
	 * We are inside the irq code, and this is not the entry.
	 */
	return 1;
}

1087 1088
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1089
			struct trace_iterator *iter, u32 flags)
1090
{
1091
	struct fgraph_data *data = iter->private;
1092 1093
	struct ftrace_graph_ent *call = &field->graph_ent;
	struct ftrace_graph_ret_entry *leaf_ret;
1094 1095
	static enum print_line_t ret;
	int cpu = iter->cpu;
1096

1097 1098 1099
	if (check_irq_entry(iter, flags, call->func, call->depth))
		return TRACE_TYPE_HANDLED;

1100
	if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
1101 1102
		return TRACE_TYPE_PARTIAL_LINE;

1103 1104
	leaf_ret = get_return_for_leaf(iter, field);
	if (leaf_ret)
1105
		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1106
	else
1107
		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1108

1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
	if (data) {
		/*
		 * If we failed to write our output, then we need to make
		 * note of it. Because we already consumed our entry.
		 */
		if (s->full) {
			data->failed = 1;
			data->cpu = cpu;
		} else
			data->failed = 0;
	}

	return ret;
1122 1123
}

1124 1125
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1126 1127
		   struct trace_entry *ent, struct trace_iterator *iter,
		   u32 flags)
1128
{
1129
	unsigned long long duration = trace->rettime - trace->calltime;
1130 1131 1132
	struct fgraph_data *data = iter->private;
	pid_t pid = ent->pid;
	int cpu = iter->cpu;
1133
	int func_match = 1;
1134 1135 1136
	int ret;
	int i;

1137 1138 1139
	if (check_irq_return(iter, flags, trace->depth))
		return TRACE_TYPE_HANDLED;

1140
	if (data) {
1141 1142 1143 1144
		struct fgraph_cpu_data *cpu_data;
		int cpu = iter->cpu;

		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1145 1146 1147 1148 1149 1150

		/*
		 * Comments display at + 1 to depth. This is the
		 * return from a function, we now want the comments
		 * to display at the same level of the bracket.
		 */
1151 1152 1153 1154 1155 1156 1157
		cpu_data->depth = trace->depth - 1;

		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
			if (cpu_data->enter_funcs[trace->depth] != trace->func)
				func_match = 0;
			cpu_data->enter_funcs[trace->depth] = 0;
		}
1158
	}
1159

1160
	if (print_graph_prologue(iter, s, 0, 0, flags))
1161 1162
		return TRACE_TYPE_PARTIAL_LINE;

1163 1164 1165
	/* Overhead and duration */
	ret = print_graph_duration(duration, s, flags);
	if (ret == TRACE_TYPE_PARTIAL_LINE)
1166
		return TRACE_TYPE_PARTIAL_LINE;
1167

1168
	/* Closing brace */
1169
	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
1170
		ret = trace_seq_putc(s, ' ');
1171 1172
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
1173 1174
	}

1175 1176 1177 1178 1179 1180 1181
	/*
	 * If the return function does not have a matching entry,
	 * then the entry was lost. Instead of just printing
	 * the '}' and letting the user guess what function this
	 * belongs to, write out the function name.
	 */
	if (func_match) {
1182
		ret = trace_seq_puts(s, "}\n");
1183 1184 1185
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	} else {
1186
		ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1187 1188 1189
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
1190

1191
	/* Overrun */
1192
	if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
1193 1194
		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
					trace->overrun);
1195 1196
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
1197
	}
1198

1199 1200
	ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
			      cpu, pid, flags);
1201 1202 1203
	if (ret == TRACE_TYPE_PARTIAL_LINE)
		return TRACE_TYPE_PARTIAL_LINE;

1204 1205 1206
	return TRACE_TYPE_HANDLED;
}

1207
static enum print_line_t
1208 1209
print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
		    struct trace_iterator *iter, u32 flags)
1210
{
1211
	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1212
	struct fgraph_data *data = iter->private;
1213
	struct trace_event *event;
1214
	int depth = 0;
1215
	int ret;
1216 1217 1218
	int i;

	if (data)
1219
		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1220

1221
	if (print_graph_prologue(iter, s, 0, 0, flags))
1222 1223
		return TRACE_TYPE_PARTIAL_LINE;

1224
	/* No time */
1225 1226 1227
	ret = print_graph_duration(DURATION_FILL_FULL, s, flags);
	if (ret != TRACE_TYPE_HANDLED)
		return ret;
1228 1229

	/* Indentation */
1230 1231
	if (depth > 0)
		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
1232
			ret = trace_seq_putc(s, ' ');
1233 1234 1235 1236 1237
			if (!ret)
				return TRACE_TYPE_PARTIAL_LINE;
		}

	/* The comment */
1238
	ret = trace_seq_puts(s, "/* ");
1239 1240 1241
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
	switch (iter->ent->type) {
	case TRACE_BPRINT:
		ret = trace_print_bprintk_msg_only(iter);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
		break;
	case TRACE_PRINT:
		ret = trace_print_printk_msg_only(iter);
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
		break;
	default:
		event = ftrace_find_event(ent->type);
		if (!event)
			return TRACE_TYPE_UNHANDLED;

1258
		ret = event->funcs->trace(iter, sym_flags, event);
1259 1260 1261
		if (ret != TRACE_TYPE_HANDLED)
			return ret;
	}
1262

1263 1264 1265 1266 1267 1268
	/* Strip ending newline */
	if (s->buffer[s->len - 1] == '\n') {
		s->buffer[s->len - 1] = '\0';
		s->len--;
	}

1269
	ret = trace_seq_puts(s, " */\n");
1270 1271 1272 1273 1274 1275 1276
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}


1277
enum print_line_t
1278
print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1279
{
1280 1281
	struct ftrace_graph_ent_entry *field;
	struct fgraph_data *data = iter->private;
1282
	struct trace_entry *entry = iter->ent;
1283
	struct trace_seq *s = &iter->seq;
1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
	int cpu = iter->cpu;
	int ret;

	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
		return TRACE_TYPE_HANDLED;
	}

	/*
	 * If the last output failed, there's a possibility we need
	 * to print out the missing entry which would never go out.
	 */
	if (data && data->failed) {
		field = &data->ent;
		iter->cpu = data->cpu;
1299
		ret = print_graph_entry(field, s, iter, flags);
1300 1301 1302 1303 1304 1305 1306
		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
			ret = TRACE_TYPE_NO_CONSUME;
		}
		iter->cpu = cpu;
		return ret;
	}
1307

1308 1309
	switch (entry->type) {
	case TRACE_GRAPH_ENT: {
1310 1311 1312 1313 1314 1315
		/*
		 * print_graph_entry() may consume the current event,
		 * thus @field may become invalid, so we need to save it.
		 * sizeof(struct ftrace_graph_ent_entry) is very small,
		 * it can be safely saved at the stack.
		 */
1316
		struct ftrace_graph_ent_entry saved;
1317
		trace_assign_type(field, entry);
1318
		saved = *field;
1319
		return print_graph_entry(&saved, s, iter, flags);
1320 1321 1322 1323
	}
	case TRACE_GRAPH_RET: {
		struct ftrace_graph_ret_entry *field;
		trace_assign_type(field, entry);
1324
		return print_graph_return(&field->ret, s, entry, iter, flags);
1325
	}
1326 1327 1328 1329 1330
	case TRACE_STACK:
	case TRACE_FN:
		/* dont trace stack and functions as comments */
		return TRACE_TYPE_UNHANDLED;

1331
	default:
1332
		return print_graph_comment(s, entry, iter, flags);
1333
	}
1334 1335

	return TRACE_TYPE_HANDLED;
1336 1337
}

1338 1339 1340
static enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
1341
	return print_graph_function_flags(iter, tracer_flags.val);
1342 1343
}

1344
static enum print_line_t
1345 1346
print_graph_function_event(struct trace_iterator *iter, int flags,
			   struct trace_event *event)
1347 1348 1349 1350
{
	return print_graph_function(iter);
}

1351
static void print_lat_header(struct seq_file *s, u32 flags)
1352 1353 1354 1355 1356 1357
{
	static const char spaces[] = "                "	/* 16 spaces */
		"    "					/* 4 spaces */
		"                 ";			/* 17 spaces */
	int size = 0;

1358
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1359
		size += 16;
1360
	if (flags & TRACE_GRAPH_PRINT_CPU)
1361
		size += 4;
1362
	if (flags & TRACE_GRAPH_PRINT_PROC)
1363 1364 1365 1366 1367 1368
		size += 17;

	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1369
	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1370 1371
}

1372
static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
1373
{
1374 1375 1376
	int lat = trace_flags & TRACE_ITER_LATENCY_FMT;

	if (lat)
1377
		print_lat_header(s, flags);
1378

1379
	/* 1st line */
1380
	seq_printf(s, "#");
1381
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1382
		seq_printf(s, "     TIME       ");
1383
	if (flags & TRACE_GRAPH_PRINT_CPU)
1384
		seq_printf(s, " CPU");
1385
	if (flags & TRACE_GRAPH_PRINT_PROC)
1386 1387
		seq_printf(s, "  TASK/PID       ");
	if (lat)
1388
		seq_printf(s, "||||");
1389
	if (flags & TRACE_GRAPH_PRINT_DURATION)
1390 1391
		seq_printf(s, "  DURATION   ");
	seq_printf(s, "               FUNCTION CALLS\n");
1392 1393

	/* 2nd line */
1394
	seq_printf(s, "#");
1395
	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1396
		seq_printf(s, "      |         ");
1397
	if (flags & TRACE_GRAPH_PRINT_CPU)
1398
		seq_printf(s, " |  ");
1399
	if (flags & TRACE_GRAPH_PRINT_PROC)
1400 1401
		seq_printf(s, "   |    |        ");
	if (lat)
1402
		seq_printf(s, "||||");
1403
	if (flags & TRACE_GRAPH_PRINT_DURATION)
1404 1405
		seq_printf(s, "   |   |      ");
	seq_printf(s, "               |   |   |   |\n");
1406
}
1407

1408
void print_graph_headers(struct seq_file *s)
1409 1410 1411 1412
{
	print_graph_headers_flags(s, tracer_flags.val);
}

1413 1414 1415 1416
void print_graph_headers_flags(struct seq_file *s, u32 flags)
{
	struct trace_iterator *iter = s->private;

1417 1418 1419
	if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
		return;

1420 1421 1422 1423 1424 1425
	if (trace_flags & TRACE_ITER_LATENCY_FMT) {
		/* print nothing if the buffers are empty */
		if (trace_empty(iter))
			return;

		print_trace_header(s, iter);
1426
	}
1427 1428 1429 1430

	__print_graph_headers_flags(s, flags);
}

1431
void graph_trace_open(struct trace_iterator *iter)
1432
{
1433
	/* pid and depth on the last trace processed */
1434
	struct fgraph_data *data;
1435 1436
	int cpu;

1437 1438 1439
	iter->private = NULL;

	data = kzalloc(sizeof(*data), GFP_KERNEL);
1440
	if (!data)
1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
		goto out_err;

	data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
	if (!data->cpu_data)
		goto out_err_free;

	for_each_possible_cpu(cpu) {
		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1451 1452
		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);

1453 1454 1455
		*pid = -1;
		*depth = 0;
		*ignore = 0;
1456
		*depth_irq = -1;
1457
	}
1458

1459
	iter->private = data;
1460 1461 1462 1463 1464 1465 1466

	return;

 out_err_free:
	kfree(data);
 out_err:
	pr_warning("function graph tracer: not enough memory\n");
1467 1468
}

1469
void graph_trace_close(struct trace_iterator *iter)
1470
{
1471 1472 1473 1474 1475 1476
	struct fgraph_data *data = iter->private;

	if (data) {
		free_percpu(data->cpu_data);
		kfree(data);
	}
1477 1478
}

1479 1480 1481 1482 1483 1484 1485 1486
static int func_graph_set_flag(u32 old_flags, u32 bit, int set)
{
	if (bit == TRACE_GRAPH_PRINT_IRQS)
		ftrace_graph_skip_irqs = !set;

	return 0;
}

1487 1488 1489 1490
static struct trace_event_functions graph_functions = {
	.trace		= print_graph_function_event,
};

1491 1492
static struct trace_event graph_trace_entry_event = {
	.type		= TRACE_GRAPH_ENT,
1493
	.funcs		= &graph_functions,
1494 1495 1496 1497
};

static struct trace_event graph_trace_ret_event = {
	.type		= TRACE_GRAPH_RET,
1498
	.funcs		= &graph_functions
1499 1500
};

1501
static struct tracer graph_trace __tracer_data = {
1502
	.name		= "function_graph",
1503
	.open		= graph_trace_open,
1504
	.pipe_open	= graph_trace_open,
1505
	.close		= graph_trace_close,
1506
	.pipe_close	= graph_trace_close,
1507
	.wait_pipe	= poll_wait_pipe,
1508 1509
	.init		= graph_trace_init,
	.reset		= graph_trace_reset,
1510 1511
	.print_line	= print_graph_function,
	.print_header	= print_graph_headers,
1512
	.flags		= &tracer_flags,
1513
	.set_flag	= func_graph_set_flag,
1514 1515 1516
#ifdef CONFIG_FTRACE_SELFTEST
	.selftest	= trace_selftest_startup_function_graph,
#endif
1517 1518
};

1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571

static ssize_t
graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
		  loff_t *ppos)
{
	unsigned long val;
	int ret;

	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
	if (ret)
		return ret;

	max_depth = val;

	*ppos += cnt;

	return cnt;
}

static ssize_t
graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
		 loff_t *ppos)
{
	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
	int n;

	n = sprintf(buf, "%d\n", max_depth);

	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
}

static const struct file_operations graph_depth_fops = {
	.open		= tracing_open_generic,
	.write		= graph_depth_write,
	.read		= graph_depth_read,
	.llseek		= generic_file_llseek,
};

static __init int init_graph_debugfs(void)
{
	struct dentry *d_tracer;

	d_tracer = tracing_init_dentry();
	if (!d_tracer)
		return 0;

	trace_create_file("max_graph_depth", 0644, d_tracer,
			  NULL, &graph_depth_fops);

	return 0;
}
fs_initcall(init_graph_debugfs);

1572 1573
static __init int init_graph_trace(void)
{
1574 1575
	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);

1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
	if (!register_ftrace_event(&graph_trace_entry_event)) {
		pr_warning("Warning: could not register graph trace events\n");
		return 1;
	}

	if (!register_ftrace_event(&graph_trace_ret_event)) {
		pr_warning("Warning: could not register graph trace events\n");
		return 1;
	}

1586 1587 1588
	return register_tracer(&graph_trace);
}

1589
core_initcall(init_graph_trace);