builtin-report.c 38.9 KB
Newer Older
1 2 3 4 5 6 7
/*
 * builtin-report.c
 *
 * Builtin report command: Analyze the perf.data input file,
 * look up and read DSOs and symbol information and display
 * a histogram of results, along various sorting keys.
 */
8
#include "builtin.h"
9

10 11
#include "util/util.h"

12
#include "util/color.h"
13
#include <linux/list.h>
14
#include "util/cache.h"
15
#include <linux/rbtree.h>
16
#include "util/symbol.h"
17
#include "util/string.h"
18
#include "util/callchain.h"
19
#include "util/strlist.h"
20

21
#include "perf.h"
22
#include "util/header.h"
23 24 25 26

#include "util/parse-options.h"
#include "util/parse-events.h"

27 28 29 30
#define SHOW_KERNEL	1
#define SHOW_USER	2
#define SHOW_HV		4

31
static char		const *input_name = "perf.data";
32
static char		*vmlinux = NULL;
33 34 35

static char		default_sort_order[] = "comm,dso";
static char		*sort_order = default_sort_order;
36 37
static char		*dso_list_str, *comm_list_str, *sym_list_str;
static struct strlist	*dso_list, *comm_list, *sym_list;
38

39 40 41
static int		input;
static int		show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;

42
static int		dump_trace = 0;
I
Ingo Molnar 已提交
43
#define dprintf(x...)	do { if (dump_trace) printf(x); } while (0)
44
#define cdprintf(x...)	do { if (dump_trace) color_fprintf(stdout, color, x); } while (0)
I
Ingo Molnar 已提交
45

46
static int		verbose;
47 48
#define eprintf(x...)	do { if (verbose) fprintf(stderr, x); } while (0)

49 50
static int		modules;

51
static int		full_paths;
52

53 54 55
static unsigned long	page_size;
static unsigned long	mmap_window = 32;

56 57
static char		default_parent_pattern[] = "^sys_|^do_page_fault";
static char		*parent_pattern = default_parent_pattern;
58
static regex_t		parent_regex;
59

60
static int		exclude_other = 1;
61
static int		callchain;
62
static enum chain_mode	callchain_mode;
63
static double		callchain_min_percent = 0.0;
64

65 66
static u64		sample_type;

67 68
struct ip_event {
	struct perf_event_header header;
69 70
	u64 ip;
	u32 pid, tid;
71
	unsigned char __more_data[];
72
};
73

74 75
struct mmap_event {
	struct perf_event_header header;
76 77 78 79
	u32 pid, tid;
	u64 start;
	u64 len;
	u64 pgoff;
80 81
	char filename[PATH_MAX];
};
82

83 84
struct comm_event {
	struct perf_event_header header;
85
	u32 pid, tid;
86 87 88
	char comm[16];
};

89 90
struct fork_event {
	struct perf_event_header header;
91
	u32 pid, ppid;
92 93
};

94
struct period_event {
95
	struct perf_event_header header;
96 97 98
	u64 time;
	u64 id;
	u64 sample_period;
99 100
};

101 102
struct lost_event {
	struct perf_event_header header;
103 104
	u64 id;
	u64 lost;
105 106
};

107 108 109 110 111 112 113
struct read_event {
	struct perf_event_header header;
	u32 pid,tid;
	u64 value;
	u64 format[3];
};

114 115 116 117 118 119 120
typedef union event_union {
	struct perf_event_header	header;
	struct ip_event			ip;
	struct mmap_event		mmap;
	struct comm_event		comm;
	struct fork_event		fork;
	struct period_event		period;
121
	struct lost_event		lost;
122
	struct read_event		read;
123 124 125 126
} event_t;

static LIST_HEAD(dsos);
static struct dso *kernel_dso;
P
Peter Zijlstra 已提交
127
static struct dso *vdso;
128
static struct dso *hypervisor_dso;
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147

static void dsos__add(struct dso *dso)
{
	list_add_tail(&dso->node, &dsos);
}

static struct dso *dsos__find(const char *name)
{
	struct dso *pos;

	list_for_each_entry(pos, &dsos, node)
		if (strcmp(pos->name, name) == 0)
			return pos;
	return NULL;
}

static struct dso *dsos__findnew(const char *name)
{
	struct dso *dso = dsos__find(name);
148
	int nr;
149

150 151 152 153 154 155
	if (dso)
		return dso;

	dso = dso__new(name, 0);
	if (!dso)
		goto out_delete_dso;
156

157
	nr = dso__load(dso, NULL, verbose);
158
	if (nr < 0) {
159
		eprintf("Failed to open: %s\n", name);
160
		goto out_delete_dso;
161
	}
162 163
	if (!nr)
		eprintf("No symbols found in: %s, maybe install a debug package?\n", name);
164 165

	dsos__add(dso);
166 167 168 169 170 171 172 173

	return dso;

out_delete_dso:
	dso__delete(dso);
	return NULL;
}

174
static void dsos__fprintf(FILE *fp)
175 176 177 178 179 180 181
{
	struct dso *pos;

	list_for_each_entry(pos, &dsos, node)
		dso__fprintf(pos, fp);
}

182
static struct symbol *vdso__find_symbol(struct dso *dso, u64 ip)
P
Peter Zijlstra 已提交
183
{
184
	return dso__find_symbol(dso, ip);
P
Peter Zijlstra 已提交
185 186
}

187 188
static int load_kernel(void)
{
189
	int err;
190

191
	kernel_dso = dso__new("[kernel]", 0);
192
	if (!kernel_dso)
193
		return -1;
194

195
	err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose, modules);
196
	if (err <= 0) {
197 198 199 200
		dso__delete(kernel_dso);
		kernel_dso = NULL;
	} else
		dsos__add(kernel_dso);
201

P
Peter Zijlstra 已提交
202 203 204 205 206 207 208 209
	vdso = dso__new("[vdso]", 0);
	if (!vdso)
		return -1;

	vdso->find_symbol = vdso__find_symbol;

	dsos__add(vdso);

210 211 212 213 214
	hypervisor_dso = dso__new("[hypervisor]", 0);
	if (!hypervisor_dso)
		return -1;
	dsos__add(hypervisor_dso);

215
	return err;
216 217
}

218 219 220 221 222
static char __cwd[PATH_MAX];
static char *cwd = __cwd;
static int cwdlen;

static int strcommon(const char *pathname)
223 224 225 226 227 228 229 230 231
{
	int n = 0;

	while (pathname[n] == cwd[n] && n < cwdlen)
		++n;

	return n;
}

232 233
struct map {
	struct list_head node;
234 235 236 237
	u64	 start;
	u64	 end;
	u64	 pgoff;
	u64	 (*map_ip)(struct map *, u64);
238 239 240
	struct dso	 *dso;
};

241
static u64 map__map_ip(struct map *map, u64 ip)
P
Peter Zijlstra 已提交
242 243 244 245
{
	return ip - map->start + map->pgoff;
}

246
static u64 vdso__map_ip(struct map *map __used, u64 ip)
P
Peter Zijlstra 已提交
247 248 249 250
{
	return ip;
}

251 252
static inline int is_anon_memory(const char *filename)
{
253
	return strcmp(filename, "//anon") == 0;
254 255
}

256
static struct map *map__new(struct mmap_event *event)
257 258 259 260
{
	struct map *self = malloc(sizeof(*self));

	if (self != NULL) {
261 262
		const char *filename = event->filename;
		char newfilename[PATH_MAX];
263
		int anon;
264 265

		if (cwd) {
266 267
			int n = strcommon(filename);

268 269 270 271 272 273 274
			if (n == cwdlen) {
				snprintf(newfilename, sizeof(newfilename),
					 ".%s", filename + n);
				filename = newfilename;
			}
		}

275 276 277 278 279 280 281
		anon = is_anon_memory(filename);

		if (anon) {
			snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
			filename = newfilename;
		}

282 283 284 285
		self->start = event->start;
		self->end   = event->start + event->len;
		self->pgoff = event->pgoff;

286
		self->dso = dsos__findnew(filename);
287 288
		if (self->dso == NULL)
			goto out_delete;
P
Peter Zijlstra 已提交
289

290
		if (self->dso == vdso || anon)
P
Peter Zijlstra 已提交
291 292 293
			self->map_ip = vdso__map_ip;
		else
			self->map_ip = map__map_ip;
294 295 296 297 298 299 300
	}
	return self;
out_delete:
	free(self);
	return NULL;
}

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
static struct map *map__clone(struct map *self)
{
	struct map *map = malloc(sizeof(*self));

	if (!map)
		return NULL;

	memcpy(map, self, sizeof(*self));

	return map;
}

static int map__overlap(struct map *l, struct map *r)
{
	if (l->start > r->start) {
		struct map *t = l;
		l = r;
		r = t;
	}

	if (l->end > r->start)
		return 1;

	return 0;
}
326

327 328
static size_t map__fprintf(struct map *self, FILE *fp)
{
329
	return fprintf(fp, " %Lx-%Lx %Lx %s\n",
330 331 332 333
		       self->start, self->end, self->pgoff, self->dso->name);
}


334
struct thread {
335
	struct rb_node	 rb_node;
336 337 338 339 340 341 342 343 344 345 346
	struct list_head maps;
	pid_t		 pid;
	char		 *comm;
};

static struct thread *thread__new(pid_t pid)
{
	struct thread *self = malloc(sizeof(*self));

	if (self != NULL) {
		self->pid = pid;
P
Peter Zijlstra 已提交
347
		self->comm = malloc(32);
348
		if (self->comm)
P
Peter Zijlstra 已提交
349
			snprintf(self->comm, 32, ":%d", self->pid);
350 351 352 353 354 355 356 357
		INIT_LIST_HEAD(&self->maps);
	}

	return self;
}

static int thread__set_comm(struct thread *self, const char *comm)
{
P
Peter Zijlstra 已提交
358 359
	if (self->comm)
		free(self->comm);
360 361 362 363
	self->comm = strdup(comm);
	return self->comm ? 0 : -ENOMEM;
}

364 365 366 367 368 369 370 371 372 373 374 375
static size_t thread__fprintf(struct thread *self, FILE *fp)
{
	struct map *pos;
	size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);

	list_for_each_entry(pos, &self->maps, node)
		ret += map__fprintf(pos, fp);

	return ret;
}


376
static struct rb_root threads;
377
static struct thread *last_match;
378

379
static struct thread *threads__findnew(pid_t pid)
380
{
381 382 383
	struct rb_node **p = &threads.rb_node;
	struct rb_node *parent = NULL;
	struct thread *th;
384

385 386 387 388 389 390 391 392
	/*
	 * Font-end cache - PID lookups come in blocks,
	 * so most of the time we dont have to look up
	 * the full rbtree:
	 */
	if (last_match && last_match->pid == pid)
		return last_match;

393 394 395
	while (*p != NULL) {
		parent = *p;
		th = rb_entry(parent, struct thread, rb_node);
396

397 398
		if (th->pid == pid) {
			last_match = th;
399
			return th;
400
		}
401

402 403 404 405
		if (pid < th->pid)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
406 407
	}

408 409 410 411
	th = thread__new(pid);
	if (th != NULL) {
		rb_link_node(&th->rb_node, parent, p);
		rb_insert_color(&th->rb_node, &threads);
412
		last_match = th;
413
	}
414

415
	return th;
416 417 418 419
}

static void thread__insert_map(struct thread *self, struct map *map)
{
420 421 422 423
	struct map *pos, *tmp;

	list_for_each_entry_safe(pos, tmp, &self->maps, node) {
		if (map__overlap(pos, map)) {
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
			if (verbose >= 2) {
				printf("overlapping maps:\n");
				map__fprintf(map, stdout);
				map__fprintf(pos, stdout);
			}

			if (map->start <= pos->start && map->end > pos->start)
				pos->start = map->end;

			if (map->end >= pos->end && map->start < pos->end)
				pos->end = map->start;

			if (verbose >= 2) {
				printf("after collision:\n");
				map__fprintf(pos, stdout);
			}

			if (pos->start >= pos->end) {
				list_del_init(&pos->node);
				free(pos);
			}
445 446 447
		}
	}

448 449 450
	list_add_tail(&map->node, &self->maps);
}

451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
static int thread__fork(struct thread *self, struct thread *parent)
{
	struct map *map;

	if (self->comm)
		free(self->comm);
	self->comm = strdup(parent->comm);
	if (!self->comm)
		return -ENOMEM;

	list_for_each_entry(map, &parent->maps, node) {
		struct map *new = map__clone(map);
		if (!new)
			return -ENOMEM;
		thread__insert_map(self, new);
	}

	return 0;
}

471
static struct map *thread__find_map(struct thread *self, u64 ip)
472
{
473 474
	struct map *pos;

475 476 477 478 479 480 481 482 483 484
	if (self == NULL)
		return NULL;

	list_for_each_entry(pos, &self->maps, node)
		if (ip >= pos->start && ip <= pos->end)
			return pos;

	return NULL;
}

485 486 487 488 489 490 491 492 493 494 495 496 497 498
static size_t threads__fprintf(FILE *fp)
{
	size_t ret = 0;
	struct rb_node *nd;

	for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
		struct thread *pos = rb_entry(nd, struct thread, rb_node);

		ret += thread__fprintf(pos, fp);
	}

	return ret;
}

499 500 501 502 503 504 505
/*
 * histogram, sorted on item, collects counts
 */

static struct rb_root hist;

struct hist_entry {
506 507 508 509 510 511 512 513 514 515 516 517 518
	struct rb_node		rb_node;

	struct thread		*thread;
	struct map		*map;
	struct dso		*dso;
	struct symbol		*sym;
	struct symbol		*parent;
	u64			ip;
	char			level;
	struct callchain_node	callchain;
	struct rb_root		sorted_chain;

	u64			count;
519 520
};

521 522 523 524 525 526 527
/*
 * configurable sorting bits
 */

struct sort_entry {
	struct list_head list;

528 529
	char *header;

530
	int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
P
Peter Zijlstra 已提交
531
	int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
532 533 534
	size_t	(*print)(FILE *fp, struct hist_entry *);
};

535 536 537 538 539 540 541 542 543 544
static int64_t cmp_null(void *l, void *r)
{
	if (!l && !r)
		return 0;
	else if (!l)
		return -1;
	else
		return 1;
}

P
Peter Zijlstra 已提交
545 546
/* --sort pid */

547
static int64_t
548
sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
549
{
550 551 552 553 554 555
	return right->thread->pid - left->thread->pid;
}

static size_t
sort__thread_print(FILE *fp, struct hist_entry *self)
{
556
	return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid);
557
}
558

559
static struct sort_entry sort_thread = {
560
	.header = "         Command:  Pid",
561 562 563 564
	.cmp	= sort__thread_cmp,
	.print	= sort__thread_print,
};

P
Peter Zijlstra 已提交
565 566
/* --sort comm */

567 568
static int64_t
sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
P
Peter Zijlstra 已提交
569 570 571 572 573 574
{
	return right->thread->pid - left->thread->pid;
}

static int64_t
sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
575 576 577 578
{
	char *comm_l = left->thread->comm;
	char *comm_r = right->thread->comm;

579 580
	if (!comm_l || !comm_r)
		return cmp_null(comm_l, comm_r);
581 582 583 584 585 586 587

	return strcmp(comm_l, comm_r);
}

static size_t
sort__comm_print(FILE *fp, struct hist_entry *self)
{
588
	return fprintf(fp, "%16s", self->thread->comm);
589 590 591
}

static struct sort_entry sort_comm = {
592
	.header		= "         Command",
P
Peter Zijlstra 已提交
593 594 595
	.cmp		= sort__comm_cmp,
	.collapse	= sort__comm_collapse,
	.print		= sort__comm_print,
596 597
};

P
Peter Zijlstra 已提交
598 599
/* --sort dso */

600 601 602 603 604 605
static int64_t
sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
{
	struct dso *dso_l = left->dso;
	struct dso *dso_r = right->dso;

606 607
	if (!dso_l || !dso_r)
		return cmp_null(dso_l, dso_r);
608 609 610 611 612 613 614

	return strcmp(dso_l->name, dso_r->name);
}

static size_t
sort__dso_print(FILE *fp, struct hist_entry *self)
{
615
	if (self->dso)
616
		return fprintf(fp, "%-25s", self->dso->name);
617

618
	return fprintf(fp, "%016llx         ", (u64)self->ip);
619 620 621
}

static struct sort_entry sort_dso = {
622
	.header = "Shared Object            ",
623 624 625 626
	.cmp	= sort__dso_cmp,
	.print	= sort__dso_print,
};

P
Peter Zijlstra 已提交
627 628
/* --sort symbol */

629 630 631
static int64_t
sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
{
632
	u64 ip_l, ip_r;
633 634 635 636 637 638 639 640 641 642

	if (left->sym == right->sym)
		return 0;

	ip_l = left->sym ? left->sym->start : left->ip;
	ip_r = right->sym ? right->sym->start : right->ip;

	return (int64_t)(ip_r - ip_l);
}

643 644 645 646 647 648
static size_t
sort__sym_print(FILE *fp, struct hist_entry *self)
{
	size_t ret = 0;

	if (verbose)
649
		ret += fprintf(fp, "%#018llx  ", (u64)self->ip);
650

651 652
	if (self->sym) {
		ret += fprintf(fp, "[%c] %s",
653 654
			self->dso == kernel_dso ? 'k' :
			self->dso == hypervisor_dso ? 'h' : '.', self->sym->name);
655 656 657

		if (self->sym->module)
			ret += fprintf(fp, "\t[%s]", self->sym->module->name);
658
	} else {
659
		ret += fprintf(fp, "%#016llx", (u64)self->ip);
660
	}
661 662 663 664 665

	return ret;
}

static struct sort_entry sort_sym = {
666
	.header = "Symbol",
667 668
	.cmp	= sort__sym_cmp,
	.print	= sort__sym_print,
669 670
};

671
/* --sort parent */
672 673

static int64_t
674
sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
675
{
676 677
	struct symbol *sym_l = left->parent;
	struct symbol *sym_r = right->parent;
678 679 680 681 682 683 684 685

	if (!sym_l || !sym_r)
		return cmp_null(sym_l, sym_r);

	return strcmp(sym_l->name, sym_r->name);
}

static size_t
686
sort__parent_print(FILE *fp, struct hist_entry *self)
687 688 689
{
	size_t ret = 0;

690
	ret += fprintf(fp, "%-20s", self->parent ? self->parent->name : "[other]");
691 692 693 694

	return ret;
}

695 696 697 698
static struct sort_entry sort_parent = {
	.header = "Parent symbol       ",
	.cmp	= sort__parent_cmp,
	.print	= sort__parent_print,
699 700
};

P
Peter Zijlstra 已提交
701
static int sort__need_collapse = 0;
702
static int sort__has_parent = 0;
P
Peter Zijlstra 已提交
703

704
struct sort_dimension {
705 706 707
	char			*name;
	struct sort_entry	*entry;
	int			taken;
708 709 710 711
};

static struct sort_dimension sort_dimensions[] = {
	{ .name = "pid",	.entry = &sort_thread,	},
712
	{ .name = "comm",	.entry = &sort_comm,	},
713
	{ .name = "dso",	.entry = &sort_dso,	},
714
	{ .name = "symbol",	.entry = &sort_sym,	},
715
	{ .name = "parent",	.entry = &sort_parent,	},
716 717
};

718 719
static LIST_HEAD(hist_entry__sort_list);

720 721
static int sort_dimension__add(char *tok)
{
722
	unsigned int i;
723 724 725 726 727 728 729

	for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
		struct sort_dimension *sd = &sort_dimensions[i];

		if (sd->taken)
			continue;

730
		if (strncasecmp(tok, sd->name, strlen(tok)))
731 732
			continue;

P
Peter Zijlstra 已提交
733 734 735
		if (sd->entry->collapse)
			sort__need_collapse = 1;

736 737
		if (sd->entry == &sort_parent) {
			int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
738 739 740
			if (ret) {
				char err[BUFSIZ];

741 742 743
				regerror(ret, &parent_regex, err, sizeof(err));
				fprintf(stderr, "Invalid regex: %s\n%s",
					parent_pattern, err);
744 745
				exit(-1);
			}
746
			sort__has_parent = 1;
747 748
		}

749 750
		list_add_tail(&sd->entry->list, &hist_entry__sort_list);
		sd->taken = 1;
751

752 753 754 755 756 757
		return 0;
	}

	return -ESRCH;
}

758 759 760 761 762 763 764 765 766 767 768 769 770 771 772
static int64_t
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
		cmp = se->cmp(left, right);
		if (cmp)
			break;
	}

	return cmp;
}

P
Peter Zijlstra 已提交
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791
static int64_t
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
		int64_t (*f)(struct hist_entry *, struct hist_entry *);

		f = se->collapse ?: se->cmp;

		cmp = f(left, right);
		if (cmp)
			break;
	}

	return cmp;
}

792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask)
{
	int i;
	size_t ret = 0;

	ret += fprintf(fp, "%s", "                ");

	for (i = 0; i < depth; i++)
		if (depth_mask & (1 << i))
			ret += fprintf(fp, "|          ");
		else
			ret += fprintf(fp, "           ");

	ret += fprintf(fp, "\n");

	return ret;
}
809
static size_t
810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888
ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, int depth,
		       int depth_mask, int count, u64 total_samples,
		       int hits)
{
	int i;
	size_t ret = 0;

	ret += fprintf(fp, "%s", "                ");
	for (i = 0; i < depth; i++) {
		if (depth_mask & (1 << i))
			ret += fprintf(fp, "|");
		else
			ret += fprintf(fp, " ");
		if (!count && i == depth - 1) {
			double percent;

			percent = hits * 100.0 / total_samples;
			ret += fprintf(fp, "--%2.2f%%-- ", percent);
		} else
			ret += fprintf(fp, "%s", "          ");
	}
	if (chain->sym)
		ret += fprintf(fp, "%s\n", chain->sym->name);
	else
		ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);

	return ret;
}

static size_t
callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
			u64 total_samples, int depth, int depth_mask)
{
	struct rb_node *node, *next;
	struct callchain_node *child;
	struct callchain_list *chain;
	int new_depth_mask = depth_mask;
	size_t ret = 0;
	int i;

	node = rb_first(&self->rb_root);
	while (node) {
		child = rb_entry(node, struct callchain_node, rb_node);

		/*
		 * The depth mask manages the output of pipes that show
		 * the depth. We don't want to keep the pipes of the current
		 * level for the last child of this depth
		 */
		next = rb_next(node);
		if (!next)
			new_depth_mask &= ~(1 << (depth - 1));

		/*
		 * But we keep the older depth mask for the line seperator
		 * to keep the level link until we reach the last child
		 */
		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask);
		i = 0;
		list_for_each_entry(chain, &child->val, list) {
			if (chain->ip >= PERF_CONTEXT_MAX)
				continue;
			ret += ipchain__fprintf_graph(fp, chain, depth,
						      new_depth_mask, i++,
						      total_samples,
						      child->cumul_hit);
		}
		ret += callchain__fprintf_graph(fp, child, total_samples,
						depth + 1,
						new_depth_mask | (1 << depth));
		node = next;
	}

	return ret;
}

static size_t
callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
			u64 total_samples)
889 890 891 892 893 894 895
{
	struct callchain_list *chain;
	size_t ret = 0;

	if (!self)
		return 0;

896
	ret += callchain__fprintf_flat(fp, self->parent, total_samples);
897 898


899 900 901 902 903 904 905
	list_for_each_entry(chain, &self->val, list) {
		if (chain->ip >= PERF_CONTEXT_MAX)
			continue;
		if (chain->sym)
			ret += fprintf(fp, "                %s\n", chain->sym->name);
		else
			ret += fprintf(fp, "                %p\n",
906
					(void *)(long)chain->ip);
907
	}
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925

	return ret;
}

static size_t
hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
			      u64 total_samples)
{
	struct rb_node *rb_node;
	struct callchain_node *chain;
	size_t ret = 0;

	rb_node = rb_first(&self->sorted_chain);
	while (rb_node) {
		double percent;

		chain = rb_entry(rb_node, struct callchain_node, rb_node);
		percent = chain->hit * 100.0 / total_samples;
926 927 928 929 930 931 932
		if (callchain_mode == FLAT) {
			ret += fprintf(fp, "           %6.2f%%\n", percent);
			ret += callchain__fprintf_flat(fp, chain, total_samples);
		} else if (callchain_mode == GRAPH) {
			ret += callchain__fprintf_graph(fp, chain,
							total_samples, 1, 1);
		}
933 934 935 936 937 938 939 940
		ret += fprintf(fp, "\n");
		rb_node = rb_next(rb_node);
	}

	return ret;
}


941
static size_t
942
hist_entry__fprintf(FILE *fp, struct hist_entry *self, u64 total_samples)
943 944 945 946
{
	struct sort_entry *se;
	size_t ret;

947 948 949
	if (exclude_other && !self->parent)
		return 0;

950 951
	if (total_samples)
		ret = percent_color_fprintf(fp, "   %6.2f%%",
952
				(self->count * 100.0) / total_samples);
953
	else
954
		ret = fprintf(fp, "%12Ld ", self->count);
955

956
	list_for_each_entry(se, &hist_entry__sort_list, list) {
957 958 959
		if (exclude_other && (se == &sort_parent))
			continue;

960
		fprintf(fp, "  ");
961
		ret += se->print(fp, self);
962
	}
963 964 965

	ret += fprintf(fp, "\n");

966 967 968
	if (callchain)
		hist_entry_callchain__fprintf(fp, self, total_samples);

969 970 971
	return ret;
}

972 973 974 975 976 977
/*
 *
 */

static struct symbol *
resolve_symbol(struct thread *thread, struct map **mapp,
978
	       struct dso **dsop, u64 *ipp)
979 980 981
{
	struct dso *dso = dsop ? *dsop : NULL;
	struct map *map = mapp ? *mapp : NULL;
982
	u64 ip = *ipp;
983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011

	if (!thread)
		return NULL;

	if (dso)
		goto got_dso;

	if (map)
		goto got_map;

	map = thread__find_map(thread, ip);
	if (map != NULL) {
		if (mapp)
			*mapp = map;
got_map:
		ip = map->map_ip(map, ip);

		dso = map->dso;
	} else {
		/*
		 * If this is outside of all known maps,
		 * and is a negative address, try to look it
		 * up in the kernel dso, as it might be a
		 * vsyscall (which executes in user-mode):
		 */
		if ((long long)ip < 0)
		dso = kernel_dso;
	}
	dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");
1012 1013
	dprintf(" ...... map: %Lx -> %Lx\n", *ipp, ip);
	*ipp  = ip;
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023

	if (dsop)
		*dsop = dso;

	if (!dso)
		return NULL;
got_dso:
	return dso->find_symbol(dso, ip);
}

1024
static int call__match(struct symbol *sym)
1025
{
1026
	if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
1027
		return 1;
1028

1029
	return 0;
1030 1031
}

1032
static struct symbol **
1033
resolve_callchain(struct thread *thread, struct map *map __used,
1034 1035 1036
		    struct ip_callchain *chain, struct hist_entry *entry)
{
	u64 context = PERF_CONTEXT_MAX;
1037 1038
	struct symbol **syms;
	unsigned int i;
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058

	if (callchain) {
		syms = calloc(chain->nr, sizeof(*syms));
		if (!syms) {
			fprintf(stderr, "Can't allocate memory for symbols\n");
			exit(-1);
		}
	}

	for (i = 0; i < chain->nr; i++) {
		u64 ip = chain->ips[i];
		struct dso *dso = NULL;
		struct symbol *sym;

		if (ip >= PERF_CONTEXT_MAX) {
			context = ip;
			continue;
		}

		switch (context) {
I
Ingo Molnar 已提交
1059 1060 1061
		case PERF_CONTEXT_HV:
			dso = hypervisor_dso;
			break;
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
		case PERF_CONTEXT_KERNEL:
			dso = kernel_dso;
			break;
		default:
			break;
		}

		sym = resolve_symbol(thread, NULL, &dso, &ip);

		if (sym) {
			if (sort__has_parent && call__match(sym) &&
			    !entry->parent)
				entry->parent = sym;
			if (!callchain)
				break;
			syms[i] = sym;
		}
	}

	return syms;
}

1084 1085 1086 1087
/*
 * collect histogram counts
 */

1088 1089
static int
hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
1090 1091
		struct symbol *sym, u64 ip, struct ip_callchain *chain,
		char level, u64 count)
1092
{
1093 1094 1095
	struct rb_node **p = &hist.rb_node;
	struct rb_node *parent = NULL;
	struct hist_entry *he;
1096
	struct symbol **syms = NULL;
1097 1098 1099 1100 1101 1102 1103
	struct hist_entry entry = {
		.thread	= thread,
		.map	= map,
		.dso	= dso,
		.sym	= sym,
		.ip	= ip,
		.level	= level,
1104
		.count	= count,
1105
		.parent = NULL,
1106
		.sorted_chain = RB_ROOT
1107 1108 1109
	};
	int cmp;

1110 1111
	if ((sort__has_parent || callchain) && chain)
		syms = resolve_callchain(thread, map, chain, &entry);
1112

1113 1114 1115 1116 1117 1118 1119
	while (*p != NULL) {
		parent = *p;
		he = rb_entry(parent, struct hist_entry, rb_node);

		cmp = hist_entry__cmp(&entry, he);

		if (!cmp) {
1120
			he->count += count;
1121 1122 1123 1124
			if (callchain) {
				append_chain(&he->callchain, chain, syms);
				free(syms);
			}
1125 1126 1127 1128 1129 1130 1131
			return 0;
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
1132
	}
1133 1134 1135 1136 1137

	he = malloc(sizeof(*he));
	if (!he)
		return -ENOMEM;
	*he = entry;
1138 1139
	if (callchain) {
		callchain_init(&he->callchain);
1140 1141
		append_chain(&he->callchain, chain, syms);
		free(syms);
1142
	}
1143 1144 1145 1146
	rb_link_node(&he->rb_node, parent, p);
	rb_insert_color(&he->rb_node, &hist);

	return 0;
1147 1148
}

P
Peter Zijlstra 已提交
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
static void hist_entry__free(struct hist_entry *he)
{
	free(he);
}

/*
 * collapse the histogram
 */

static struct rb_root collapse_hists;

static void collapse__insert_entry(struct hist_entry *he)
{
	struct rb_node **p = &collapse_hists.rb_node;
	struct rb_node *parent = NULL;
	struct hist_entry *iter;
	int64_t cmp;

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct hist_entry, rb_node);

		cmp = hist_entry__collapse(iter, he);

		if (!cmp) {
			iter->count += he->count;
			hist_entry__free(he);
			return;
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&he->rb_node, parent, p);
	rb_insert_color(&he->rb_node, &collapse_hists);
}

static void collapse__resort(void)
{
	struct rb_node *next;
	struct hist_entry *n;

	if (!sort__need_collapse)
		return;

	next = rb_first(&hist);
	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		next = rb_next(&n->rb_node);

		rb_erase(&n->rb_node, &hist);
		collapse__insert_entry(n);
	}
}

1207 1208 1209 1210 1211 1212
/*
 * reverse the map, sort on count.
 */

static struct rb_root output_hists;

1213
static void output__insert_entry(struct hist_entry *he, u64 min_callchain_hits)
1214
{
1215
	struct rb_node **p = &output_hists.rb_node;
1216
	struct rb_node *parent = NULL;
1217
	struct hist_entry *iter;
1218

1219 1220
	if (callchain) {
		if (callchain_mode == FLAT)
1221 1222
			sort_chain_flat(&he->sorted_chain, &he->callchain,
					min_callchain_hits);
1223
		else if (callchain_mode == GRAPH)
1224 1225
			sort_chain_graph(&he->sorted_chain, &he->callchain,
					 min_callchain_hits);
1226
	}
1227

1228 1229
	while (*p != NULL) {
		parent = *p;
1230
		iter = rb_entry(parent, struct hist_entry, rb_node);
1231

1232
		if (he->count > iter->count)
1233 1234 1235 1236 1237
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

1238 1239
	rb_link_node(&he->rb_node, parent, p);
	rb_insert_color(&he->rb_node, &output_hists);
1240 1241
}

1242
static void output__resort(u64 total_samples)
1243
{
P
Peter Zijlstra 已提交
1244
	struct rb_node *next;
1245
	struct hist_entry *n;
1246
	struct rb_root *tree = &hist;
1247 1248 1249
	u64 min_callchain_hits;

	min_callchain_hits = total_samples * (callchain_min_percent / 100);
1250

P
Peter Zijlstra 已提交
1251
	if (sort__need_collapse)
1252 1253 1254
		tree = &collapse_hists;

	next = rb_first(tree);
P
Peter Zijlstra 已提交
1255

1256 1257 1258
	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		next = rb_next(&n->rb_node);
1259

1260
		rb_erase(&n->rb_node, tree);
1261
		output__insert_entry(n, min_callchain_hits);
1262 1263 1264
	}
}

1265
static size_t output__fprintf(FILE *fp, u64 total_samples)
1266
{
1267
	struct hist_entry *pos;
1268
	struct sort_entry *se;
1269 1270 1271
	struct rb_node *nd;
	size_t ret = 0;

1272
	fprintf(fp, "\n");
1273
	fprintf(fp, "#\n");
1274
	fprintf(fp, "# (%Ld samples)\n", (u64)total_samples);
1275 1276 1277
	fprintf(fp, "#\n");

	fprintf(fp, "# Overhead");
1278 1279 1280
	list_for_each_entry(se, &hist_entry__sort_list, list) {
		if (exclude_other && (se == &sort_parent))
			continue;
1281
		fprintf(fp, "  %s", se->header);
1282
	}
1283 1284 1285
	fprintf(fp, "\n");

	fprintf(fp, "# ........");
1286
	list_for_each_entry(se, &hist_entry__sort_list, list) {
1287
		unsigned int i;
1288

1289 1290 1291
		if (exclude_other && (se == &sort_parent))
			continue;

1292
		fprintf(fp, "  ");
1293
		for (i = 0; i < strlen(se->header); i++)
1294
			fprintf(fp, ".");
1295
	}
1296 1297 1298
	fprintf(fp, "\n");

	fprintf(fp, "#\n");
1299

1300 1301 1302
	for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
		pos = rb_entry(nd, struct hist_entry, rb_node);
		ret += hist_entry__fprintf(fp, pos, total_samples);
1303 1304
	}

1305 1306
	if (sort_order == default_sort_order &&
			parent_pattern == default_parent_pattern) {
1307
		fprintf(fp, "#\n");
1308
		fprintf(fp, "# (For more details, try: perf report --sort comm,dso,symbol)\n");
1309 1310
		fprintf(fp, "#\n");
	}
1311
	fprintf(fp, "\n");
1312

1313 1314 1315
	return ret;
}

1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
static void register_idle_thread(void)
{
	struct thread *thread = threads__findnew(0);

	if (thread == NULL ||
			thread__set_comm(thread, "[idle]")) {
		fprintf(stderr, "problem inserting idle task.\n");
		exit(-1);
	}
}

1327 1328 1329 1330
static unsigned long total = 0,
		     total_mmap = 0,
		     total_comm = 0,
		     total_fork = 0,
1331 1332
		     total_unknown = 0,
		     total_lost = 0;
1333

1334
static int validate_chain(struct ip_callchain *chain, event_t *event)
1335 1336 1337 1338 1339 1340
{
	unsigned int chain_size;

	chain_size = event->header.size;
	chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;

1341
	if (chain->nr*sizeof(u64) > chain_size)
1342 1343 1344 1345 1346
		return -1;

	return 0;
}

1347
static int
1348
process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1349 1350 1351 1352 1353
{
	char level;
	int show = 0;
	struct dso *dso = NULL;
	struct thread *thread = threads__findnew(event->ip.pid);
1354 1355
	u64 ip = event->ip.ip;
	u64 period = 1;
1356
	struct map *map = NULL;
1357
	void *more_data = event->ip.__more_data;
1358
	struct ip_callchain *chain = NULL;
1359
	int cpumode;
1360

1361
	if (sample_type & PERF_SAMPLE_PERIOD) {
1362 1363
		period = *(u64 *)more_data;
		more_data += sizeof(u64);
1364
	}
1365

1366
	dprintf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d: %p period: %Ld\n",
1367 1368 1369 1370
		(void *)(offset + head),
		(void *)(long)(event->header.size),
		event->header.misc,
		event->ip.pid,
1371
		(void *)(long)ip,
1372
		(long long)period);
1373

1374
	if (sample_type & PERF_SAMPLE_CALLCHAIN) {
1375
		unsigned int i;
1376 1377 1378

		chain = (void *)more_data;

1379
		dprintf("... chain: nr:%Lu\n", chain->nr);
1380

1381 1382 1383 1384 1385 1386
		if (validate_chain(chain, event) < 0) {
			eprintf("call-chain problem with event, skipping it.\n");
			return 0;
		}

		if (dump_trace) {
1387
			for (i = 0; i < chain->nr; i++)
1388
				dprintf("..... %2d: %016Lx\n", i, chain->ips[i]);
1389 1390 1391
		}
	}

1392 1393 1394
	dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);

	if (thread == NULL) {
1395
		eprintf("problem processing %d event, skipping it.\n",
1396 1397 1398
			event->header.type);
		return -1;
	}
1399

1400 1401 1402
	if (comm_list && !strlist__has_entry(comm_list, thread->comm))
		return 0;

1403 1404 1405
	cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK;

	if (cpumode == PERF_EVENT_MISC_KERNEL) {
1406 1407
		show = SHOW_KERNEL;
		level = 'k';
1408

1409
		dso = kernel_dso;
1410

1411
		dprintf(" ...... dso: %s\n", dso->name);
1412

1413
	} else if (cpumode == PERF_EVENT_MISC_USER) {
1414

1415 1416
		show = SHOW_USER;
		level = '.';
1417

1418 1419 1420
	} else {
		show = SHOW_HV;
		level = 'H';
1421 1422 1423

		dso = hypervisor_dso;

1424 1425
		dprintf(" ...... dso: [hypervisor]\n");
	}
1426

1427
	if (show & show_mask) {
1428
		struct symbol *sym = resolve_symbol(thread, &map, &dso, &ip);
1429

1430 1431 1432
		if (dso_list && dso && dso->name && !strlist__has_entry(dso_list, dso->name))
			return 0;

1433 1434 1435
		if (sym_list && sym && !strlist__has_entry(sym_list, sym->name))
			return 0;

1436
		if (hist_entry__add(thread, map, dso, sym, ip, chain, level, period)) {
1437
			eprintf("problem incrementing symbol count, skipping event\n");
1438
			return -1;
1439
		}
1440
	}
1441
	total += period;
1442

1443 1444
	return 0;
}
I
Ingo Molnar 已提交
1445

1446 1447 1448 1449 1450 1451
static int
process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
{
	struct thread *thread = threads__findnew(event->mmap.pid);
	struct map *map = map__new(&event->mmap);

1452
	dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
1453 1454
		(void *)(offset + head),
		(void *)(long)(event->header.size),
1455
		event->mmap.pid,
1456 1457 1458 1459 1460 1461 1462
		(void *)(long)event->mmap.start,
		(void *)(long)event->mmap.len,
		(void *)(long)event->mmap.pgoff,
		event->mmap.filename);

	if (thread == NULL || map == NULL) {
		dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
1463
		return 0;
1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
	}

	thread__insert_map(thread, map);
	total_mmap++;

	return 0;
}

static int
process_comm_event(event_t *event, unsigned long offset, unsigned long head)
{
	struct thread *thread = threads__findnew(event->comm.pid);

	dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
		(void *)(offset + head),
		(void *)(long)(event->header.size),
		event->comm.comm, event->comm.pid);

	if (thread == NULL ||
	    thread__set_comm(thread, event->comm.comm)) {
		dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
		return -1;
1486
	}
1487 1488 1489 1490 1491
	total_comm++;

	return 0;
}

1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511
static int
process_fork_event(event_t *event, unsigned long offset, unsigned long head)
{
	struct thread *thread = threads__findnew(event->fork.pid);
	struct thread *parent = threads__findnew(event->fork.ppid);

	dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
		(void *)(offset + head),
		(void *)(long)(event->header.size),
		event->fork.pid, event->fork.ppid);

	if (!thread || !parent || thread__fork(thread, parent)) {
		dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
		return -1;
	}
	total_fork++;

	return 0;
}

1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
static int
process_period_event(event_t *event, unsigned long offset, unsigned long head)
{
	dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n",
		(void *)(offset + head),
		(void *)(long)(event->header.size),
		event->period.time,
		event->period.id,
		event->period.sample_period);

	return 0;
}

1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538
static int
process_lost_event(event_t *event, unsigned long offset, unsigned long head)
{
	dprintf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n",
		(void *)(offset + head),
		(void *)(long)(event->header.size),
		event->lost.id,
		event->lost.lost);

	total_lost += event->lost.lost;

	return 0;
}

1539 1540 1541
static void trace_event(event_t *event)
{
	unsigned char *raw_event = (void *)event;
1542
	char *color = PERF_COLOR_BLUE;
1543 1544 1545 1546 1547
	int i, j;

	if (!dump_trace)
		return;

1548 1549
	dprintf(".");
	cdprintf("\n. ... raw event: size %d bytes\n", event->header.size);
1550 1551

	for (i = 0; i < event->header.size; i++) {
1552 1553 1554 1555
		if ((i & 15) == 0) {
			dprintf(".");
			cdprintf("  %04x: ", i);
		}
1556

1557
		cdprintf(" %02x", raw_event[i]);
1558 1559

		if (((i & 15) == 15) || i == event->header.size-1) {
1560
			cdprintf("  ");
1561
			for (j = 0; j < 15-(i & 15); j++)
1562
				cdprintf("   ");
1563
			for (j = 0; j < (i & 15); j++) {
1564
				if (isprint(raw_event[i-15+j]))
1565
					cdprintf("%c", raw_event[i-15+j]);
1566
				else
1567
					cdprintf(".");
1568
			}
1569
			cdprintf("\n");
1570 1571 1572 1573 1574
		}
	}
	dprintf(".\n");
}

1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587
static int
process_read_event(event_t *event, unsigned long offset, unsigned long head)
{
	dprintf("%p [%p]: PERF_EVENT_READ: %d %d %Lu\n",
			(void *)(offset + head),
			(void *)(long)(event->header.size),
			event->read.pid,
			event->read.tid,
			event->read.value);

	return 0;
}

1588 1589 1590
static int
process_event(event_t *event, unsigned long offset, unsigned long head)
{
1591 1592
	trace_event(event);

1593
	switch (event->header.type) {
1594 1595 1596
	case PERF_EVENT_SAMPLE:
		return process_sample_event(event, offset, head);

1597 1598 1599 1600 1601 1602
	case PERF_EVENT_MMAP:
		return process_mmap_event(event, offset, head);

	case PERF_EVENT_COMM:
		return process_comm_event(event, offset, head);

1603 1604 1605
	case PERF_EVENT_FORK:
		return process_fork_event(event, offset, head);

1606 1607
	case PERF_EVENT_PERIOD:
		return process_period_event(event, offset, head);
1608 1609 1610 1611

	case PERF_EVENT_LOST:
		return process_lost_event(event, offset, head);

1612 1613 1614
	case PERF_EVENT_READ:
		return process_read_event(event, offset, head);

1615 1616 1617
	/*
	 * We dont process them right now but they are fine:
	 */
1618

1619 1620 1621 1622
	case PERF_EVENT_THROTTLE:
	case PERF_EVENT_UNTHROTTLE:
		return 0;

1623 1624 1625 1626 1627 1628 1629
	default:
		return -1;
	}

	return 0;
}

1630 1631
static struct perf_header	*header;

1632
static u64 perf_header__sample_type(void)
1633
{
1634
	u64 sample_type = 0;
1635 1636 1637 1638 1639
	int i;

	for (i = 0; i < header->attrs; i++) {
		struct perf_header_attr *attr = header->attr[i];

1640 1641 1642 1643
		if (!sample_type)
			sample_type = attr->attr.sample_type;
		else if (sample_type != attr->attr.sample_type)
			die("non matching sample_type");
1644 1645
	}

1646
	return sample_type;
1647
}
1648

1649 1650
static int __cmd_report(void)
{
1651
	int ret, rc = EXIT_FAILURE;
1652
	unsigned long offset = 0;
1653
	unsigned long head, shift;
1654 1655 1656
	struct stat stat;
	event_t *event;
	uint32_t size;
1657
	char *buf;
1658 1659 1660 1661 1662

	register_idle_thread();

	input = open(input_name, O_RDONLY);
	if (input < 0) {
1663 1664 1665 1666
		fprintf(stderr, " failed to open file: %s", input_name);
		if (!strcmp(input_name, "perf.data"))
			fprintf(stderr, "  (try 'perf record' first)");
		fprintf(stderr, "\n");
1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680
		exit(-1);
	}

	ret = fstat(input, &stat);
	if (ret < 0) {
		perror("failed to stat file");
		exit(-1);
	}

	if (!stat.st_size) {
		fprintf(stderr, "zero-sized file, nothing to do!\n");
		exit(0);
	}

1681 1682
	header = perf_header__read(input);
	head = header->data_offset;
1683

1684 1685 1686
	sample_type = perf_header__sample_type();

	if (sort__has_parent && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
1687 1688 1689 1690
		fprintf(stderr, "selected --sort parent, but no callchain data\n");
		exit(-1);
	}

1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705
	if (load_kernel() < 0) {
		perror("failed to load kernel symbols");
		return EXIT_FAILURE;
	}

	if (!full_paths) {
		if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
			perror("failed to get the current directory");
			return EXIT_FAILURE;
		}
		cwdlen = strlen(cwd);
	} else {
		cwd = NULL;
		cwdlen = 0;
	}
1706 1707 1708 1709 1710

	shift = page_size * (head / page_size);
	offset += shift;
	head -= shift;

1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728
remap:
	buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
			   MAP_SHARED, input, offset);
	if (buf == MAP_FAILED) {
		perror("failed to mmap file");
		exit(-1);
	}

more:
	event = (event_t *)(buf + head);

	size = event->header.size;
	if (!size)
		size = 8;

	if (head + event->header.size >= page_size * mmap_window) {
		int ret;

1729 1730
		shift = page_size * (head / page_size);

1731 1732 1733 1734 1735 1736 1737 1738 1739 1740
		ret = munmap(buf, page_size * mmap_window);
		assert(ret == 0);

		offset += shift;
		head -= shift;
		goto remap;
	}

	size = event->header.size;

1741
	dprintf("\n%p [%p]: event: %d\n",
1742 1743 1744 1745
			(void *)(offset + head),
			(void *)(long)event->header.size,
			event->header.type);

1746 1747
	if (!size || process_event(event, offset, head) < 0) {

I
Ingo Molnar 已提交
1748 1749 1750 1751
		dprintf("%p [%p]: skipping unknown header type: %d\n",
			(void *)(offset + head),
			(void *)(long)(event->header.size),
			event->header.type);
1752

1753
		total_unknown++;
1754 1755 1756 1757 1758 1759 1760 1761 1762 1763

		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */

		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
1764
	}
1765

1766
	head += size;
I
Ingo Molnar 已提交
1767

1768
	if (offset + head >= header->data_offset + header->data_size)
1769 1770
		goto done;

1771
	if (offset + head < (unsigned long)stat.st_size)
1772 1773
		goto more;

1774
done:
1775 1776
	rc = EXIT_SUCCESS;
	close(input);
1777

I
Ingo Molnar 已提交
1778 1779 1780
	dprintf("      IP events: %10ld\n", total);
	dprintf("    mmap events: %10ld\n", total_mmap);
	dprintf("    comm events: %10ld\n", total_comm);
1781
	dprintf("    fork events: %10ld\n", total_fork);
1782
	dprintf("    lost events: %10ld\n", total_lost);
I
Ingo Molnar 已提交
1783
	dprintf(" unknown events: %10ld\n", total_unknown);
1784

I
Ingo Molnar 已提交
1785
	if (dump_trace)
1786 1787
		return 0;

1788 1789 1790
	if (verbose >= 3)
		threads__fprintf(stdout);

1791
	if (verbose >= 2)
1792 1793
		dsos__fprintf(stdout);

P
Peter Zijlstra 已提交
1794
	collapse__resort();
1795
	output__resort(total);
1796
	output__fprintf(stdout, total);
1797 1798 1799 1800

	return rc;
}

1801 1802 1803 1804
static int
parse_callchain_opt(const struct option *opt __used, const char *arg,
		    int unset __used)
{
1805 1806 1807
	char *tok;
	char *endptr;

1808 1809 1810 1811 1812
	callchain = 1;

	if (!arg)
		return 0;

1813 1814 1815 1816 1817 1818
	tok = strtok((char *)arg, ",");
	if (!tok)
		return -1;

	/* get the output mode */
	if (!strncmp(tok, "graph", strlen(arg)))
1819 1820
		callchain_mode = GRAPH;

1821
	else if (!strncmp(tok, "flat", strlen(arg)))
1822 1823 1824 1825
		callchain_mode = FLAT;
	else
		return -1;

1826 1827 1828 1829 1830 1831 1832 1833 1834
	/* get the min percentage */
	tok = strtok(NULL, ",");
	if (!tok)
		return 0;

	callchain_min_percent = strtod(tok, &endptr);
	if (tok == endptr)
		return -1;

1835 1836 1837
	return 0;
}

1838 1839 1840 1841 1842 1843 1844 1845
static const char * const report_usage[] = {
	"perf report [<options>] <command>",
	NULL
};

static const struct option options[] = {
	OPT_STRING('i', "input", &input_name, "file",
		    "input file name"),
1846 1847
	OPT_BOOLEAN('v', "verbose", &verbose,
		    "be more verbose (show symbol address, etc)"),
1848 1849
	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
		    "dump raw trace in ASCII"),
1850
	OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
1851 1852
	OPT_BOOLEAN('m', "modules", &modules,
		    "load module symbols - WARNING: use only with -k and LIVE kernel"),
1853
	OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1854
		   "sort by key(s): pid, comm, dso, symbol, parent"),
1855 1856
	OPT_BOOLEAN('P', "full-paths", &full_paths,
		    "Don't shorten the pathnames taking into account the cwd"),
1857 1858
	OPT_STRING('p', "parent", &parent_pattern, "regex",
		   "regex filter to identify parent, see: '--sort parent'"),
1859 1860
	OPT_BOOLEAN('x', "exclude-other", &exclude_other,
		    "Only display entries with parent-match"),
1861 1862 1863
	OPT_CALLBACK_DEFAULT('c', "callchain", NULL, "output_type,min_percent",
		     "Display callchains using output_type and min percent threshold. "
		     "Default: flat,0", &parse_callchain_opt, "flat,100"),
1864 1865
	OPT_STRING('d', "dsos", &dso_list_str, "dso[,dso...]",
		   "only consider symbols in these dsos"),
1866 1867
	OPT_STRING('C', "comms", &comm_list_str, "comm[,comm...]",
		   "only consider symbols in these comms"),
1868 1869
	OPT_STRING('S', "symbols", &sym_list_str, "symbol[,symbol...]",
		   "only consider these symbols"),
1870 1871 1872
	OPT_END()
};

1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887
static void setup_sorting(void)
{
	char *tmp, *tok, *str = strdup(sort_order);

	for (tok = strtok_r(str, ", ", &tmp);
			tok; tok = strtok_r(NULL, ", ", &tmp)) {
		if (sort_dimension__add(tok) < 0) {
			error("Unknown --sort key: `%s'", tok);
			usage_with_options(report_usage, options);
		}
	}

	free(str);
}

1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
static void setup_list(struct strlist **list, const char *list_str,
		       const char *list_name)
{
	if (list_str) {
		*list = strlist__new(true, list_str);
		if (!*list) {
			fprintf(stderr, "problems parsing %s list\n",
				list_name);
			exit(129);
		}
	}
}

1901
int cmd_report(int argc, const char **argv, const char *prefix __used)
1902
{
1903
	symbol__init();
1904 1905 1906

	page_size = getpagesize();

1907
	argc = parse_options(argc, argv, options, report_usage, 0);
1908

1909 1910
	setup_sorting();

1911 1912 1913 1914 1915
	if (parent_pattern != default_parent_pattern)
		sort_dimension__add("parent");
	else
		exclude_other = 0;

1916 1917 1918 1919 1920 1921
	/*
	 * Any (unrecognized) arguments left?
	 */
	if (argc)
		usage_with_options(report_usage, options);

1922 1923
	setup_list(&dso_list, dso_list_str, "dso");
	setup_list(&comm_list, comm_list_str, "comm");
1924
	setup_list(&sym_list, sym_list_str, "symbol");
1925

1926 1927
	setup_pager();

1928 1929
	return __cmd_report();
}