trace_functions_graph.c 9.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 *
 * Function graph tracer.
 * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com>
 * Mostly borrowed from function tracer which
 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
 *
 */
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <linux/fs.h>

#include "trace.h"

16
#define TRACE_GRAPH_INDENT	2
17

18
/* Flag options */
19
#define TRACE_GRAPH_PRINT_OVERRUN	0x1
20 21 22
#define TRACE_GRAPH_PRINT_CPU		0x2
#define TRACE_GRAPH_PRINT_OVERHEAD	0x4

23
static struct tracer_opt trace_opts[] = {
24 25 26 27 28 29
	/* Display overruns ? */
	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
	/* Display CPU ? */
	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
	/* Display Overhead ? */
	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
30 31 32 33
	{ } /* Empty entry */
};

static struct tracer_flags tracer_flags = {
34 35
	/* Don't display overruns by default */
	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD,
36 37 38
	.opts = trace_opts
};

39
/* pid on the last trace processed */
40
static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
41 42 43

static int graph_trace_init(struct trace_array *tr)
{
44 45
	int cpu, ret;

46 47 48
	for_each_online_cpu(cpu)
		tracing_reset(tr, cpu);

49
	ret = register_ftrace_graph(&trace_graph_return,
50
					&trace_graph_entry);
51 52 53 54 55
	if (ret)
		return ret;
	tracing_start_cmdline_record();

	return 0;
56 57 58 59
}

static void graph_trace_reset(struct trace_array *tr)
{
60 61
	tracing_stop_cmdline_record();
	unregister_ftrace_graph();
62 63
}

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
static inline int log10_cpu(int nb)
{
	if (nb / 100)
		return 3;
	if (nb / 10)
		return 2;
	return 1;
}

static enum print_line_t
print_graph_cpu(struct trace_seq *s, int cpu)
{
	int i;
	int ret;
	int log10_this = log10_cpu(cpu);
	int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));


82 83 84 85 86 87 88 89 90 91 92 93 94
	/*
	 * Start with a space character - to make it stand out
	 * to the right a bit when trace output is pasted into
	 * email:
	 */
	ret = trace_seq_printf(s, " ");

	/*
	 * Tricky - we space the CPU field according to the max
	 * number of online CPUs. On a 2-cpu system it would take
	 * a maximum of 1 digit - on a 128 cpu system it would
	 * take up to 3 digits:
	 */
95 96 97 98 99 100 101
	for (i = 0; i < log10_all - log10_this; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
	ret = trace_seq_printf(s, "%d) ", cpu);
	if (!ret)
102 103
		return TRACE_TYPE_PARTIAL_LINE;

104 105 106 107
	return TRACE_TYPE_HANDLED;
}


108
/* If the pid changed since the last trace, output this event */
109
static int verif_pid(struct trace_seq *s, pid_t pid, int cpu)
110
{
111 112 113
	char *comm, *prev_comm;
	pid_t prev_pid;
	int ret;
114

115
	if (last_pid[cpu] != -1 && last_pid[cpu] == pid)
116
		return 1;
117

118
	prev_pid = last_pid[cpu];
119
	last_pid[cpu] = pid;
120

121
	comm = trace_find_cmdline(pid);
122
	prev_comm = trace_find_cmdline(prev_pid);
123

124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
/*
 * Context-switch trace line:

 ------------------------------------------
 | 1)  migration/0--1  =>  sshd-1755
 ------------------------------------------

 */
	ret = trace_seq_printf(s,
		" ------------------------------------------\n");
	ret += trace_seq_printf(s, " | %d)  %s-%d  =>  %s-%d\n",
				  cpu, prev_comm, prev_pid, comm, pid);
	ret += trace_seq_printf(s,
		" ------------------------------------------\n\n");
	return ret;
139 140
}

141 142 143 144 145 146 147 148 149 150 151 152 153
static bool
trace_branch_is_leaf(struct trace_iterator *iter,
		struct ftrace_graph_ent_entry *curr)
{
	struct ring_buffer_iter *ring_iter;
	struct ring_buffer_event *event;
	struct ftrace_graph_ret_entry *next;

	ring_iter = iter->buffer_iter[iter->cpu];

	if (!ring_iter)
		return false;

154
	event = ring_buffer_iter_peek(ring_iter, NULL);
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175

	if (!event)
		return false;

	next = ring_buffer_event_data(event);

	if (next->ent.type != TRACE_GRAPH_RET)
		return false;

	if (curr->ent.pid != next->ent.pid ||
			curr->graph_ent.func != next->ret.func)
		return false;

	return true;
}


static inline int
print_graph_duration(unsigned long long duration, struct trace_seq *s)
{
	unsigned long nsecs_rem = do_div(duration, 1000);
176
	return trace_seq_printf(s, "%4llu.%3lu us |  ", duration, nsecs_rem);
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
}

/* Signal a overhead of time execution to the output */
static int
print_graph_overhead(unsigned long long duration, struct trace_seq *s)
{
	/* Duration exceeded 100 msecs */
	if (duration > 100000ULL)
		return trace_seq_printf(s, "! ");

	/* Duration exceeded 10 msecs */
	if (duration > 10000ULL)
		return trace_seq_printf(s, "+ ");

	return trace_seq_printf(s, "  ");
}

/* Case of a leaf function on its call entry */
195
static enum print_line_t
196 197
print_graph_entry_leaf(struct trace_iterator *iter,
		struct ftrace_graph_ent_entry *entry, struct trace_seq *s)
198
{
199 200 201 202 203
	struct ftrace_graph_ret_entry *ret_entry;
	struct ftrace_graph_ret *graph_ret;
	struct ring_buffer_event *event;
	struct ftrace_graph_ent *call;
	unsigned long long duration;
204
	int ret;
205
	int i;
206

207 208 209 210 211 212
	event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
	ret_entry = ring_buffer_event_data(event);
	graph_ret = &ret_entry->ret;
	call = &entry->graph_ent;
	duration = graph_ret->rettime - graph_ret->calltime;

213 214 215 216
	/* Must not exceed 8 characters: 9999.999 us */
	if (duration > 10000000ULL)
		duration = 9999999ULL;

217
	/* Overhead */
218 219 220 221 222 223 224 225
	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
		ret = print_graph_overhead(duration, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Duration */
	ret = print_graph_duration(duration, s);
226
	if (!ret)
227 228
		return TRACE_TYPE_PARTIAL_LINE;

229 230 231 232 233 234 235 236 237 238 239
	/* Function */
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	ret = seq_print_ip_sym(s, call->func, 0);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

240
	ret = trace_seq_printf(s, "();\n");
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}

static enum print_line_t
print_graph_entry_nested(struct ftrace_graph_ent_entry *entry,
			struct trace_seq *s)
{
	int i;
	int ret;
	struct ftrace_graph_ent *call = &entry->graph_ent;

	/* No overhead */
256 257 258 259 260 261 262
	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
		ret = trace_seq_printf(s, "  ");
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* No time */
263
	ret = trace_seq_printf(s, "            |  ");
264 265

	/* Function */
266 267
	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
268 269
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
270 271 272 273 274 275
	}

	ret = seq_print_ip_sym(s, call->func, 0);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

276
	ret = trace_seq_printf(s, "() {\n");
277 278 279
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

280 281 282
	return TRACE_TYPE_HANDLED;
}

283 284 285 286 287 288 289
static enum print_line_t
print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
			struct trace_iterator *iter, int cpu)
{
	int ret;
	struct trace_entry *ent = iter->ent;

290
	/* Pid */
291 292 293
	if (!verif_pid(s, ent->pid, cpu))
		return TRACE_TYPE_PARTIAL_LINE;

294 295 296 297 298 299
	/* Cpu */
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
		ret = print_graph_cpu(s, cpu);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
300 301 302 303 304 305 306 307

	if (trace_branch_is_leaf(iter, field))
		return print_graph_entry_leaf(iter, field, s);
	else
		return print_graph_entry_nested(field, s);

}

308 309
static enum print_line_t
print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
310
		   struct trace_entry *ent, int cpu)
311 312 313
{
	int i;
	int ret;
314
	unsigned long long duration = trace->rettime - trace->calltime;
315

316 317 318 319
	/* Must not exceed 8 characters: xxxx.yyy us */
	if (duration > 10000000ULL)
		duration = 9999999ULL;

320
	/* Pid */
321 322 323
	if (!verif_pid(s, ent->pid, cpu))
		return TRACE_TYPE_PARTIAL_LINE;

324
	/* Cpu */
325 326 327 328 329
	if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) {
		ret = print_graph_cpu(s, cpu);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}
330

331
	/* Overhead */
332 333 334 335 336 337 338 339
	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) {
		ret = print_graph_overhead(duration, s);
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

	/* Duration */
	ret = print_graph_duration(duration, s);
340 341 342 343
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	/* Closing brace */
344 345
	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
		ret = trace_seq_printf(s, " ");
346 347
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
348 349
	}

350
	ret = trace_seq_printf(s, "}\n");
351 352
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;
353

354
	/* Overrun */
355 356 357
	if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) {
		ret = trace_seq_printf(s, " (Overruns: %lu)\n",
					trace->overrun);
358 359
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
360 361 362 363 364 365 366 367 368
	}
	return TRACE_TYPE_HANDLED;
}

enum print_line_t
print_graph_function(struct trace_iterator *iter)
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *entry = iter->ent;
369

370 371 372 373
	switch (entry->type) {
	case TRACE_GRAPH_ENT: {
		struct ftrace_graph_ent_entry *field;
		trace_assign_type(field, entry);
374
		return print_graph_entry(field, s, iter,
375
					 iter->cpu);
376 377 378 379
	}
	case TRACE_GRAPH_RET: {
		struct ftrace_graph_ret_entry *field;
		trace_assign_type(field, entry);
380
		return print_graph_return(&field->ret, s, entry, iter->cpu);
381 382 383
	}
	default:
		return TRACE_TYPE_UNHANDLED;
384 385 386 387
	}
}

static struct tracer graph_trace __read_mostly = {
388
	.name	     = "function_graph",
389 390 391 392 393 394 395 396 397 398 399 400
	.init	     = graph_trace_init,
	.reset	     = graph_trace_reset,
	.print_line = print_graph_function,
	.flags		= &tracer_flags,
};

static __init int init_graph_trace(void)
{
	return register_tracer(&graph_trace);
}

device_initcall(init_graph_trace);