builtin-report.c 25.3 KB
Newer Older
1 2 3 4 5 6 7
/*
 * builtin-report.c
 *
 * Builtin report command: Analyze the perf.data input file,
 * look up and read DSOs and symbol information and display
 * a histogram of results, along various sorting keys.
 */
8
#include "builtin.h"
9

10 11
#include "util/util.h"

12
#include "util/color.h"
13
#include "util/list.h"
14
#include "util/cache.h"
15
#include "util/rbtree.h"
16
#include "util/symbol.h"
17
#include "util/string.h"
18

19 20 21 22 23
#include "perf.h"

#include "util/parse-options.h"
#include "util/parse-events.h"

24 25 26 27
#define SHOW_KERNEL	1
#define SHOW_USER	2
#define SHOW_HV		4

28
static char		const *input_name = "perf.data";
29
static char		*vmlinux = NULL;
30 31 32 33

static char		default_sort_order[] = "comm,dso";
static char		*sort_order = default_sort_order;

34 35 36
static int		input;
static int		show_mask = SHOW_KERNEL | SHOW_USER | SHOW_HV;

37
static int		dump_trace = 0;
I
Ingo Molnar 已提交
38 39
#define dprintf(x...)	do { if (dump_trace) printf(x); } while (0)

40
static int		verbose;
41
static int		full_paths;
42

43 44 45 46 47 48 49
static unsigned long	page_size;
static unsigned long	mmap_window = 32;

struct ip_event {
	struct perf_event_header header;
	__u64 ip;
	__u32 pid, tid;
50
	__u64 period;
51
};
52

53 54 55 56 57 58 59 60
struct mmap_event {
	struct perf_event_header header;
	__u32 pid, tid;
	__u64 start;
	__u64 len;
	__u64 pgoff;
	char filename[PATH_MAX];
};
61

62 63
struct comm_event {
	struct perf_event_header header;
64
	__u32 pid, tid;
65 66 67
	char comm[16];
};

68 69 70 71 72
struct fork_event {
	struct perf_event_header header;
	__u32 pid, ppid;
};

73
struct period_event {
74
	struct perf_event_header header;
75 76 77 78 79 80 81 82 83 84 85 86
	__u64 time;
	__u64 id;
	__u64 sample_period;
};

typedef union event_union {
	struct perf_event_header	header;
	struct ip_event			ip;
	struct mmap_event		mmap;
	struct comm_event		comm;
	struct fork_event		fork;
	struct period_event		period;
87 88 89 90
} event_t;

static LIST_HEAD(dsos);
static struct dso *kernel_dso;
P
Peter Zijlstra 已提交
91
static struct dso *vdso;
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110

static void dsos__add(struct dso *dso)
{
	list_add_tail(&dso->node, &dsos);
}

static struct dso *dsos__find(const char *name)
{
	struct dso *pos;

	list_for_each_entry(pos, &dsos, node)
		if (strcmp(pos->name, name) == 0)
			return pos;
	return NULL;
}

static struct dso *dsos__findnew(const char *name)
{
	struct dso *dso = dsos__find(name);
111
	int nr;
112

113 114 115 116 117 118
	if (dso)
		return dso;

	dso = dso__new(name, 0);
	if (!dso)
		goto out_delete_dso;
119

120
	nr = dso__load(dso, NULL, verbose);
121
	if (nr < 0) {
122 123
		if (verbose)
			fprintf(stderr, "Failed to open: %s\n", name);
124
		goto out_delete_dso;
125
	}
126 127 128 129 130 131 132
	if (!nr && verbose) {
		fprintf(stderr,
		"No symbols found in: %s, maybe install a debug package?\n",
				name);
	}

	dsos__add(dso);
133 134 135 136 137 138 139 140

	return dso;

out_delete_dso:
	dso__delete(dso);
	return NULL;
}

141
static void dsos__fprintf(FILE *fp)
142 143 144 145 146 147 148
{
	struct dso *pos;

	list_for_each_entry(pos, &dsos, node)
		dso__fprintf(pos, fp);
}

149
static struct symbol *vdso__find_symbol(struct dso *dso, __u64 ip)
P
Peter Zijlstra 已提交
150 151 152 153
{
	return dso__find_symbol(kernel_dso, ip);
}

154 155
static int load_kernel(void)
{
156
	int err;
157

158
	kernel_dso = dso__new("[kernel]", 0);
159
	if (!kernel_dso)
160
		return -1;
161

162
	err = dso__load_kernel(kernel_dso, vmlinux, NULL, verbose);
163 164 165 166 167
	if (err) {
		dso__delete(kernel_dso);
		kernel_dso = NULL;
	} else
		dsos__add(kernel_dso);
168

P
Peter Zijlstra 已提交
169 170 171 172 173 174 175 176
	vdso = dso__new("[vdso]", 0);
	if (!vdso)
		return -1;

	vdso->find_symbol = vdso__find_symbol;

	dsos__add(vdso);

177
	return err;
178 179
}

180 181 182 183 184
static char __cwd[PATH_MAX];
static char *cwd = __cwd;
static int cwdlen;

static int strcommon(const char *pathname)
185 186 187 188 189 190 191 192 193
{
	int n = 0;

	while (pathname[n] == cwd[n] && n < cwdlen)
		++n;

	return n;
}

194 195
struct map {
	struct list_head node;
196 197 198 199
	__u64	 start;
	__u64	 end;
	__u64	 pgoff;
	__u64	 (*map_ip)(struct map *, __u64);
200 201 202
	struct dso	 *dso;
};

203
static __u64 map__map_ip(struct map *map, __u64 ip)
P
Peter Zijlstra 已提交
204 205 206 207
{
	return ip - map->start + map->pgoff;
}

208
static __u64 vdso__map_ip(struct map *map, __u64 ip)
P
Peter Zijlstra 已提交
209 210 211 212
{
	return ip;
}

213 214 215 216 217
static inline int is_anon_memory(const char *filename)
{
     return strcmp(filename, "//anon") == 0;
}

218
static struct map *map__new(struct mmap_event *event)
219 220 221 222
{
	struct map *self = malloc(sizeof(*self));

	if (self != NULL) {
223 224
		const char *filename = event->filename;
		char newfilename[PATH_MAX];
225
		int anon;
226 227

		if (cwd) {
228 229
			int n = strcommon(filename);

230 231 232 233 234 235 236
			if (n == cwdlen) {
				snprintf(newfilename, sizeof(newfilename),
					 ".%s", filename + n);
				filename = newfilename;
			}
		}

237 238 239 240 241 242 243
		anon = is_anon_memory(filename);

		if (anon) {
			snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", event->pid);
			filename = newfilename;
		}

244 245 246 247
		self->start = event->start;
		self->end   = event->start + event->len;
		self->pgoff = event->pgoff;

248
		self->dso = dsos__findnew(filename);
249 250
		if (self->dso == NULL)
			goto out_delete;
P
Peter Zijlstra 已提交
251

252
		if (self->dso == vdso || anon)
P
Peter Zijlstra 已提交
253 254 255
			self->map_ip = vdso__map_ip;
		else
			self->map_ip = map__map_ip;
256 257 258 259 260 261 262
	}
	return self;
out_delete:
	free(self);
	return NULL;
}

263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
static struct map *map__clone(struct map *self)
{
	struct map *map = malloc(sizeof(*self));

	if (!map)
		return NULL;

	memcpy(map, self, sizeof(*self));

	return map;
}

static int map__overlap(struct map *l, struct map *r)
{
	if (l->start > r->start) {
		struct map *t = l;
		l = r;
		r = t;
	}

	if (l->end > r->start)
		return 1;

	return 0;
}
288

289 290
static size_t map__fprintf(struct map *self, FILE *fp)
{
291
	return fprintf(fp, " %Lx-%Lx %Lx %s\n",
292 293 294 295
		       self->start, self->end, self->pgoff, self->dso->name);
}


296
struct thread {
297
	struct rb_node	 rb_node;
298 299 300 301 302 303 304 305 306 307 308
	struct list_head maps;
	pid_t		 pid;
	char		 *comm;
};

static struct thread *thread__new(pid_t pid)
{
	struct thread *self = malloc(sizeof(*self));

	if (self != NULL) {
		self->pid = pid;
P
Peter Zijlstra 已提交
309
		self->comm = malloc(32);
310
		if (self->comm)
P
Peter Zijlstra 已提交
311
			snprintf(self->comm, 32, ":%d", self->pid);
312 313 314 315 316 317 318 319
		INIT_LIST_HEAD(&self->maps);
	}

	return self;
}

static int thread__set_comm(struct thread *self, const char *comm)
{
P
Peter Zijlstra 已提交
320 321
	if (self->comm)
		free(self->comm);
322 323 324 325
	self->comm = strdup(comm);
	return self->comm ? 0 : -ENOMEM;
}

326 327 328 329 330 331 332 333 334 335 336 337
static size_t thread__fprintf(struct thread *self, FILE *fp)
{
	struct map *pos;
	size_t ret = fprintf(fp, "Thread %d %s\n", self->pid, self->comm);

	list_for_each_entry(pos, &self->maps, node)
		ret += map__fprintf(pos, fp);

	return ret;
}


338
static struct rb_root threads;
339
static struct thread *last_match;
340

341
static struct thread *threads__findnew(pid_t pid)
342
{
343 344 345
	struct rb_node **p = &threads.rb_node;
	struct rb_node *parent = NULL;
	struct thread *th;
346

347 348 349 350 351 352 353 354
	/*
	 * Font-end cache - PID lookups come in blocks,
	 * so most of the time we dont have to look up
	 * the full rbtree:
	 */
	if (last_match && last_match->pid == pid)
		return last_match;

355 356 357
	while (*p != NULL) {
		parent = *p;
		th = rb_entry(parent, struct thread, rb_node);
358

359 360
		if (th->pid == pid) {
			last_match = th;
361
			return th;
362
		}
363

364 365 366 367
		if (pid < th->pid)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
368 369
	}

370 371 372 373
	th = thread__new(pid);
	if (th != NULL) {
		rb_link_node(&th->rb_node, parent, p);
		rb_insert_color(&th->rb_node, &threads);
374
		last_match = th;
375
	}
376

377
	return th;
378 379 380 381
}

static void thread__insert_map(struct thread *self, struct map *map)
{
382 383 384 385 386 387 388 389 390 391
	struct map *pos, *tmp;

	list_for_each_entry_safe(pos, tmp, &self->maps, node) {
		if (map__overlap(pos, map)) {
			list_del_init(&pos->node);
			/* XXX leaks dsos */
			free(pos);
		}
	}

392 393 394
	list_add_tail(&map->node, &self->maps);
}

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
static int thread__fork(struct thread *self, struct thread *parent)
{
	struct map *map;

	if (self->comm)
		free(self->comm);
	self->comm = strdup(parent->comm);
	if (!self->comm)
		return -ENOMEM;

	list_for_each_entry(map, &parent->maps, node) {
		struct map *new = map__clone(map);
		if (!new)
			return -ENOMEM;
		thread__insert_map(self, new);
	}

	return 0;
}

415
static struct map *thread__find_map(struct thread *self, __u64 ip)
416
{
417 418
	struct map *pos;

419 420 421 422 423 424 425 426 427 428
	if (self == NULL)
		return NULL;

	list_for_each_entry(pos, &self->maps, node)
		if (ip >= pos->start && ip <= pos->end)
			return pos;

	return NULL;
}

429 430 431 432 433 434 435 436 437 438 439 440 441 442
static size_t threads__fprintf(FILE *fp)
{
	size_t ret = 0;
	struct rb_node *nd;

	for (nd = rb_first(&threads); nd; nd = rb_next(nd)) {
		struct thread *pos = rb_entry(nd, struct thread, rb_node);

		ret += thread__fprintf(pos, fp);
	}

	return ret;
}

443 444 445 446 447 448 449 450 451 452 453 454 455
/*
 * histogram, sorted on item, collects counts
 */

static struct rb_root hist;

struct hist_entry {
	struct rb_node	 rb_node;

	struct thread	 *thread;
	struct map	 *map;
	struct dso	 *dso;
	struct symbol	 *sym;
456
	__u64		 ip;
457 458
	char		 level;

459
	__u64		 count;
460 461
};

462 463 464 465 466 467 468
/*
 * configurable sorting bits
 */

struct sort_entry {
	struct list_head list;

469 470
	char *header;

471
	int64_t (*cmp)(struct hist_entry *, struct hist_entry *);
P
Peter Zijlstra 已提交
472
	int64_t (*collapse)(struct hist_entry *, struct hist_entry *);
473 474 475
	size_t	(*print)(FILE *fp, struct hist_entry *);
};

P
Peter Zijlstra 已提交
476 477
/* --sort pid */

478
static int64_t
479
sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
480
{
481 482 483 484 485 486
	return right->thread->pid - left->thread->pid;
}

static size_t
sort__thread_print(FILE *fp, struct hist_entry *self)
{
487
	return fprintf(fp, "%16s:%5d", self->thread->comm ?: "", self->thread->pid);
488
}
489

490
static struct sort_entry sort_thread = {
491
	.header = "         Command:  Pid",
492 493 494 495
	.cmp	= sort__thread_cmp,
	.print	= sort__thread_print,
};

P
Peter Zijlstra 已提交
496 497
/* --sort comm */

498 499
static int64_t
sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
P
Peter Zijlstra 已提交
500 501 502 503 504 505
{
	return right->thread->pid - left->thread->pid;
}

static int64_t
sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
{
	char *comm_l = left->thread->comm;
	char *comm_r = right->thread->comm;

	if (!comm_l || !comm_r) {
		if (!comm_l && !comm_r)
			return 0;
		else if (!comm_l)
			return -1;
		else
			return 1;
	}

	return strcmp(comm_l, comm_r);
}

static size_t
sort__comm_print(FILE *fp, struct hist_entry *self)
{
525
	return fprintf(fp, "%16s", self->thread->comm);
526 527 528
}

static struct sort_entry sort_comm = {
529
	.header		= "         Command",
P
Peter Zijlstra 已提交
530 531 532
	.cmp		= sort__comm_cmp,
	.collapse	= sort__comm_collapse,
	.print		= sort__comm_print,
533 534
};

P
Peter Zijlstra 已提交
535 536
/* --sort dso */

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
static int64_t
sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
{
	struct dso *dso_l = left->dso;
	struct dso *dso_r = right->dso;

	if (!dso_l || !dso_r) {
		if (!dso_l && !dso_r)
			return 0;
		else if (!dso_l)
			return -1;
		else
			return 1;
	}

	return strcmp(dso_l->name, dso_r->name);
}

static size_t
sort__dso_print(FILE *fp, struct hist_entry *self)
{
558
	if (self->dso)
559
		return fprintf(fp, "%-25s", self->dso->name);
560

561
	return fprintf(fp, "%016llx         ", (__u64)self->ip);
562 563 564
}

static struct sort_entry sort_dso = {
565
	.header = "Shared Object            ",
566 567 568 569
	.cmp	= sort__dso_cmp,
	.print	= sort__dso_print,
};

P
Peter Zijlstra 已提交
570 571
/* --sort symbol */

572 573 574
static int64_t
sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
{
575
	__u64 ip_l, ip_r;
576 577 578 579 580 581 582 583 584 585

	if (left->sym == right->sym)
		return 0;

	ip_l = left->sym ? left->sym->start : left->ip;
	ip_r = right->sym ? right->sym->start : right->ip;

	return (int64_t)(ip_r - ip_l);
}

586 587 588 589 590 591
static size_t
sort__sym_print(FILE *fp, struct hist_entry *self)
{
	size_t ret = 0;

	if (verbose)
592
		ret += fprintf(fp, "%#018llx  ", (__u64)self->ip);
593

594 595 596 597
	if (self->sym) {
		ret += fprintf(fp, "[%c] %s",
			self->dso == kernel_dso ? 'k' : '.', self->sym->name);
	} else {
598
		ret += fprintf(fp, "%#016llx", (__u64)self->ip);
599
	}
600 601 602 603 604

	return ret;
}

static struct sort_entry sort_sym = {
605
	.header = "Symbol",
606 607
	.cmp	= sort__sym_cmp,
	.print	= sort__sym_print,
608 609
};

P
Peter Zijlstra 已提交
610 611
static int sort__need_collapse = 0;

612
struct sort_dimension {
613 614 615
	char			*name;
	struct sort_entry	*entry;
	int			taken;
616 617 618 619
};

static struct sort_dimension sort_dimensions[] = {
	{ .name = "pid",	.entry = &sort_thread,	},
620
	{ .name = "comm",	.entry = &sort_comm,	},
621
	{ .name = "dso",	.entry = &sort_dso,	},
622 623 624
	{ .name = "symbol",	.entry = &sort_sym,	},
};

625 626
static LIST_HEAD(hist_entry__sort_list);

627 628 629 630 631 632 633 634 635 636
static int sort_dimension__add(char *tok)
{
	int i;

	for (i = 0; i < ARRAY_SIZE(sort_dimensions); i++) {
		struct sort_dimension *sd = &sort_dimensions[i];

		if (sd->taken)
			continue;

637
		if (strncasecmp(tok, sd->name, strlen(tok)))
638 639
			continue;

P
Peter Zijlstra 已提交
640 641 642
		if (sd->entry->collapse)
			sort__need_collapse = 1;

643 644
		list_add_tail(&sd->entry->list, &hist_entry__sort_list);
		sd->taken = 1;
645

646 647 648 649 650 651
		return 0;
	}

	return -ESRCH;
}

652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
static int64_t
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
		cmp = se->cmp(left, right);
		if (cmp)
			break;
	}

	return cmp;
}

P
Peter Zijlstra 已提交
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
static int64_t
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
		int64_t (*f)(struct hist_entry *, struct hist_entry *);

		f = se->collapse ?: se->cmp;

		cmp = f(left, right);
		if (cmp)
			break;
	}

	return cmp;
}

686
static size_t
687
hist_entry__fprintf(FILE *fp, struct hist_entry *self, __u64 total_samples)
688 689 690 691 692
{
	struct sort_entry *se;
	size_t ret;

	if (total_samples) {
693 694 695 696
		double percent = self->count * 100.0 / total_samples;
		char *color = PERF_COLOR_NORMAL;

		/*
697 698 699
		 * We color high-overhead entries in red, mid-overhead
		 * entries in green - and keep the low overhead places
		 * normal:
700
		 */
701
		if (percent >= 5.0) {
702
			color = PERF_COLOR_RED;
703 704 705 706
		} else {
			if (percent >= 0.5)
				color = PERF_COLOR_GREEN;
		}
707 708

		ret = color_fprintf(fp, color, "   %6.2f%%",
709 710
				(self->count * 100.0) / total_samples);
	} else
711
		ret = fprintf(fp, "%12Ld ", self->count);
712

713 714
	list_for_each_entry(se, &hist_entry__sort_list, list) {
		fprintf(fp, "  ");
715
		ret += se->print(fp, self);
716
	}
717 718 719 720 721 722 723 724 725 726

	ret += fprintf(fp, "\n");

	return ret;
}

/*
 * collect histogram counts
 */

727 728
static int
hist_entry__add(struct thread *thread, struct map *map, struct dso *dso,
729
		struct symbol *sym, __u64 ip, char level, __u64 count)
730
{
731 732 733 734 735 736 737 738 739 740
	struct rb_node **p = &hist.rb_node;
	struct rb_node *parent = NULL;
	struct hist_entry *he;
	struct hist_entry entry = {
		.thread	= thread,
		.map	= map,
		.dso	= dso,
		.sym	= sym,
		.ip	= ip,
		.level	= level,
741
		.count	= count,
742 743 744 745 746 747 748 749 750 751
	};
	int cmp;

	while (*p != NULL) {
		parent = *p;
		he = rb_entry(parent, struct hist_entry, rb_node);

		cmp = hist_entry__cmp(&entry, he);

		if (!cmp) {
752
			he->count += count;
753 754 755 756 757 758 759
			return 0;
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
760
	}
761 762 763 764 765 766 767 768 769

	he = malloc(sizeof(*he));
	if (!he)
		return -ENOMEM;
	*he = entry;
	rb_link_node(&he->rb_node, parent, p);
	rb_insert_color(&he->rb_node, &hist);

	return 0;
770 771
}

P
Peter Zijlstra 已提交
772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829
static void hist_entry__free(struct hist_entry *he)
{
	free(he);
}

/*
 * collapse the histogram
 */

static struct rb_root collapse_hists;

static void collapse__insert_entry(struct hist_entry *he)
{
	struct rb_node **p = &collapse_hists.rb_node;
	struct rb_node *parent = NULL;
	struct hist_entry *iter;
	int64_t cmp;

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct hist_entry, rb_node);

		cmp = hist_entry__collapse(iter, he);

		if (!cmp) {
			iter->count += he->count;
			hist_entry__free(he);
			return;
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&he->rb_node, parent, p);
	rb_insert_color(&he->rb_node, &collapse_hists);
}

static void collapse__resort(void)
{
	struct rb_node *next;
	struct hist_entry *n;

	if (!sort__need_collapse)
		return;

	next = rb_first(&hist);
	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		next = rb_next(&n->rb_node);

		rb_erase(&n->rb_node, &hist);
		collapse__insert_entry(n);
	}
}

830 831 832 833 834 835 836
/*
 * reverse the map, sort on count.
 */

static struct rb_root output_hists;

static void output__insert_entry(struct hist_entry *he)
837
{
838
	struct rb_node **p = &output_hists.rb_node;
839
	struct rb_node *parent = NULL;
840
	struct hist_entry *iter;
841 842 843

	while (*p != NULL) {
		parent = *p;
844
		iter = rb_entry(parent, struct hist_entry, rb_node);
845

846
		if (he->count > iter->count)
847 848 849 850 851
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

852 853
	rb_link_node(&he->rb_node, parent, p);
	rb_insert_color(&he->rb_node, &output_hists);
854 855
}

856
static void output__resort(void)
857
{
P
Peter Zijlstra 已提交
858
	struct rb_node *next;
859
	struct hist_entry *n;
860
	struct rb_root *tree = &hist;
861

P
Peter Zijlstra 已提交
862
	if (sort__need_collapse)
863 864 865
		tree = &collapse_hists;

	next = rb_first(tree);
P
Peter Zijlstra 已提交
866

867 868 869
	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		next = rb_next(&n->rb_node);
870

871
		rb_erase(&n->rb_node, tree);
872
		output__insert_entry(n);
873 874 875
	}
}

876
static size_t output__fprintf(FILE *fp, __u64 total_samples)
877
{
878
	struct hist_entry *pos;
879
	struct sort_entry *se;
880 881 882
	struct rb_node *nd;
	size_t ret = 0;

883
	fprintf(fp, "\n");
884
	fprintf(fp, "#\n");
885
	fprintf(fp, "# (%Ld samples)\n", (__u64)total_samples);
886 887 888 889
	fprintf(fp, "#\n");

	fprintf(fp, "# Overhead");
	list_for_each_entry(se, &hist_entry__sort_list, list)
890
		fprintf(fp, "  %s", se->header);
891 892 893
	fprintf(fp, "\n");

	fprintf(fp, "# ........");
894
	list_for_each_entry(se, &hist_entry__sort_list, list) {
895 896
		int i;

897
		fprintf(fp, "  ");
898
		for (i = 0; i < strlen(se->header); i++)
899
			fprintf(fp, ".");
900
	}
901 902 903
	fprintf(fp, "\n");

	fprintf(fp, "#\n");
904

905 906 907
	for (nd = rb_first(&output_hists); nd; nd = rb_next(nd)) {
		pos = rb_entry(nd, struct hist_entry, rb_node);
		ret += hist_entry__fprintf(fp, pos, total_samples);
908 909
	}

910 911
	if (!strcmp(sort_order, default_sort_order)) {
		fprintf(fp, "#\n");
912
		fprintf(fp, "# (For more details, try: perf report --sort comm,dso,symbol)\n");
913 914
		fprintf(fp, "#\n");
	}
915
	fprintf(fp, "\n");
916

917 918 919
	return ret;
}

920 921 922 923 924 925 926 927 928 929 930
static void register_idle_thread(void)
{
	struct thread *thread = threads__findnew(0);

	if (thread == NULL ||
			thread__set_comm(thread, "[idle]")) {
		fprintf(stderr, "problem inserting idle task.\n");
		exit(-1);
	}
}

931 932 933 934 935
static unsigned long total = 0,
		     total_mmap = 0,
		     total_comm = 0,
		     total_fork = 0,
		     total_unknown = 0;
936

937
static int
938 939 940 941 942 943
process_overflow_event(event_t *event, unsigned long offset, unsigned long head)
{
	char level;
	int show = 0;
	struct dso *dso = NULL;
	struct thread *thread = threads__findnew(event->ip.pid);
944 945
	__u64 ip = event->ip.ip;
	__u64 period = 1;
946 947
	struct map *map = NULL;

948 949 950
	if (event->header.type & PERF_SAMPLE_PERIOD)
		period = event->ip.period;

951
	dprintf("%p [%p]: PERF_EVENT (IP, %d): %d: %p period: %Ld\n",
952 953 954 955
		(void *)(offset + head),
		(void *)(long)(event->header.size),
		event->header.misc,
		event->ip.pid,
956
		(void *)(long)ip,
957
		(long long)period);
958 959 960 961 962 963 964 965

	dprintf(" ... thread: %s:%d\n", thread->comm, thread->pid);

	if (thread == NULL) {
		fprintf(stderr, "problem processing %d event, skipping it.\n",
			event->header.type);
		return -1;
	}
966

967 968 969
	if (event->header.misc & PERF_EVENT_MISC_KERNEL) {
		show = SHOW_KERNEL;
		level = 'k';
970

971
		dso = kernel_dso;
972

973
		dprintf(" ...... dso: %s\n", dso->name);
974

975
	} else if (event->header.misc & PERF_EVENT_MISC_USER) {
976

977 978
		show = SHOW_USER;
		level = '.';
979

980 981
		map = thread__find_map(thread, ip);
		if (map != NULL) {
P
Peter Zijlstra 已提交
982
			ip = map->map_ip(map, ip);
983
			dso = map->dso;
984
		} else {
985 986 987 988 989 990 991 992
			/*
			 * If this is outside of all known maps,
			 * and is a negative address, try to look it
			 * up in the kernel dso, as it might be a
			 * vsyscall (which executes in user-mode):
			 */
			if ((long long)ip < 0)
				dso = kernel_dso;
993
		}
994 995 996 997 998 999 1000
		dprintf(" ...... dso: %s\n", dso ? dso->name : "<not found>");

	} else {
		show = SHOW_HV;
		level = 'H';
		dprintf(" ...... dso: [hypervisor]\n");
	}
1001

1002
	if (show & show_mask) {
P
Peter Zijlstra 已提交
1003 1004 1005 1006
		struct symbol *sym = NULL;

		if (dso)
			sym = dso->find_symbol(dso, ip);
1007

1008
		if (hist_entry__add(thread, map, dso, sym, ip, level, period)) {
1009
			fprintf(stderr,
1010
		"problem incrementing symbol count, skipping event\n");
1011
			return -1;
1012
		}
1013
	}
1014
	total += period;
1015

1016 1017
	return 0;
}
I
Ingo Molnar 已提交
1018

1019 1020 1021 1022 1023 1024
static int
process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
{
	struct thread *thread = threads__findnew(event->mmap.pid);
	struct map *map = map__new(&event->mmap);

1025
	dprintf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n",
1026 1027
		(void *)(offset + head),
		(void *)(long)(event->header.size),
1028
		event->mmap.pid,
1029 1030 1031 1032 1033 1034 1035
		(void *)(long)event->mmap.start,
		(void *)(long)event->mmap.len,
		(void *)(long)event->mmap.pgoff,
		event->mmap.filename);

	if (thread == NULL || map == NULL) {
		dprintf("problem processing PERF_EVENT_MMAP, skipping event.\n");
1036
		return 0;
1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
	}

	thread__insert_map(thread, map);
	total_mmap++;

	return 0;
}

static int
process_comm_event(event_t *event, unsigned long offset, unsigned long head)
{
	struct thread *thread = threads__findnew(event->comm.pid);

	dprintf("%p [%p]: PERF_EVENT_COMM: %s:%d\n",
		(void *)(offset + head),
		(void *)(long)(event->header.size),
		event->comm.comm, event->comm.pid);

	if (thread == NULL ||
	    thread__set_comm(thread, event->comm.comm)) {
		dprintf("problem processing PERF_EVENT_COMM, skipping event.\n");
		return -1;
1059
	}
1060 1061 1062 1063 1064
	total_comm++;

	return 0;
}

1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
static int
process_fork_event(event_t *event, unsigned long offset, unsigned long head)
{
	struct thread *thread = threads__findnew(event->fork.pid);
	struct thread *parent = threads__findnew(event->fork.ppid);

	dprintf("%p [%p]: PERF_EVENT_FORK: %d:%d\n",
		(void *)(offset + head),
		(void *)(long)(event->header.size),
		event->fork.pid, event->fork.ppid);

	if (!thread || !parent || thread__fork(thread, parent)) {
		dprintf("problem processing PERF_EVENT_FORK, skipping event.\n");
		return -1;
	}
	total_fork++;

	return 0;
}

1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
static int
process_period_event(event_t *event, unsigned long offset, unsigned long head)
{
	dprintf("%p [%p]: PERF_EVENT_PERIOD: time:%Ld, id:%Ld: period:%Ld\n",
		(void *)(offset + head),
		(void *)(long)(event->header.size),
		event->period.time,
		event->period.id,
		event->period.sample_period);

	return 0;
}

1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
static int
process_event(event_t *event, unsigned long offset, unsigned long head)
{
	if (event->header.misc & PERF_EVENT_MISC_OVERFLOW)
		return process_overflow_event(event, offset, head);

	switch (event->header.type) {
	case PERF_EVENT_MMAP:
		return process_mmap_event(event, offset, head);

	case PERF_EVENT_COMM:
		return process_comm_event(event, offset, head);

1111 1112 1113
	case PERF_EVENT_FORK:
		return process_fork_event(event, offset, head);

1114 1115
	case PERF_EVENT_PERIOD:
		return process_period_event(event, offset, head);
1116 1117 1118
	/*
	 * We dont process them right now but they are fine:
	 */
1119

1120 1121 1122 1123
	case PERF_EVENT_THROTTLE:
	case PERF_EVENT_UNTHROTTLE:
		return 0;

1124 1125 1126 1127 1128 1129 1130 1131 1132
	default:
		return -1;
	}

	return 0;
}

static int __cmd_report(void)
{
1133
	int ret, rc = EXIT_FAILURE;
1134 1135 1136 1137 1138
	unsigned long offset = 0;
	unsigned long head = 0;
	struct stat stat;
	event_t *event;
	uint32_t size;
1139
	char *buf;
1140 1141 1142 1143 1144

	register_idle_thread();

	input = open(input_name, O_RDONLY);
	if (input < 0) {
1145 1146 1147 1148
		fprintf(stderr, " failed to open file: %s", input_name);
		if (!strcmp(input_name, "perf.data"))
			fprintf(stderr, "  (try 'perf record' first)");
		fprintf(stderr, "\n");
1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206
		exit(-1);
	}

	ret = fstat(input, &stat);
	if (ret < 0) {
		perror("failed to stat file");
		exit(-1);
	}

	if (!stat.st_size) {
		fprintf(stderr, "zero-sized file, nothing to do!\n");
		exit(0);
	}

	if (load_kernel() < 0) {
		perror("failed to load kernel symbols");
		return EXIT_FAILURE;
	}

	if (!full_paths) {
		if (getcwd(__cwd, sizeof(__cwd)) == NULL) {
			perror("failed to get the current directory");
			return EXIT_FAILURE;
		}
		cwdlen = strlen(cwd);
	} else {
		cwd = NULL;
		cwdlen = 0;
	}
remap:
	buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
			   MAP_SHARED, input, offset);
	if (buf == MAP_FAILED) {
		perror("failed to mmap file");
		exit(-1);
	}

more:
	event = (event_t *)(buf + head);

	size = event->header.size;
	if (!size)
		size = 8;

	if (head + event->header.size >= page_size * mmap_window) {
		unsigned long shift = page_size * (head / page_size);
		int ret;

		ret = munmap(buf, page_size * mmap_window);
		assert(ret == 0);

		offset += shift;
		head -= shift;
		goto remap;
	}

	size = event->header.size;

1207 1208 1209 1210 1211
	dprintf("%p [%p]: event: %d\n",
			(void *)(offset + head),
			(void *)(long)event->header.size,
			event->header.type);

1212 1213
	if (!size || process_event(event, offset, head) < 0) {

I
Ingo Molnar 已提交
1214 1215 1216 1217
		dprintf("%p [%p]: skipping unknown header type: %d\n",
			(void *)(offset + head),
			(void *)(long)(event->header.size),
			event->header.type);
1218

1219
		total_unknown++;
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229

		/*
		 * assume we lost track of the stream, check alignment, and
		 * increment a single u64 in the hope to catch on again 'soon'.
		 */

		if (unlikely(head & 7))
			head &= ~7ULL;

		size = 8;
1230
	}
1231

1232
	head += size;
I
Ingo Molnar 已提交
1233

1234 1235 1236 1237 1238
	if (offset + head < stat.st_size)
		goto more;

	rc = EXIT_SUCCESS;
	close(input);
1239

I
Ingo Molnar 已提交
1240 1241 1242
	dprintf("      IP events: %10ld\n", total);
	dprintf("    mmap events: %10ld\n", total_mmap);
	dprintf("    comm events: %10ld\n", total_comm);
1243
	dprintf("    fork events: %10ld\n", total_fork);
I
Ingo Molnar 已提交
1244
	dprintf(" unknown events: %10ld\n", total_unknown);
1245

I
Ingo Molnar 已提交
1246
	if (dump_trace)
1247 1248
		return 0;

1249 1250 1251
	if (verbose >= 3)
		threads__fprintf(stdout);

1252
	if (verbose >= 2)
1253 1254
		dsos__fprintf(stdout);

P
Peter Zijlstra 已提交
1255
	collapse__resort();
1256 1257
	output__resort();
	output__fprintf(stdout, total);
1258 1259 1260 1261

	return rc;
}

1262 1263 1264 1265 1266 1267 1268 1269
static const char * const report_usage[] = {
	"perf report [<options>] <command>",
	NULL
};

static const struct option options[] = {
	OPT_STRING('i', "input", &input_name, "file",
		    "input file name"),
1270 1271
	OPT_BOOLEAN('v', "verbose", &verbose,
		    "be more verbose (show symbol address, etc)"),
1272 1273
	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
		    "dump raw trace in ASCII"),
1274
	OPT_STRING('k', "vmlinux", &vmlinux, "file", "vmlinux pathname"),
1275 1276
	OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
		   "sort by key(s): pid, comm, dso, symbol. Default: pid,symbol"),
1277 1278
	OPT_BOOLEAN('P', "full-paths", &full_paths,
		    "Don't shorten the pathnames taking into account the cwd"),
1279 1280 1281
	OPT_END()
};

1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
static void setup_sorting(void)
{
	char *tmp, *tok, *str = strdup(sort_order);

	for (tok = strtok_r(str, ", ", &tmp);
			tok; tok = strtok_r(NULL, ", ", &tmp)) {
		if (sort_dimension__add(tok) < 0) {
			error("Unknown --sort key: `%s'", tok);
			usage_with_options(report_usage, options);
		}
	}

	free(str);
}

1297 1298
int cmd_report(int argc, const char **argv, const char *prefix)
{
1299
	symbol__init();
1300 1301 1302

	page_size = getpagesize();

1303
	argc = parse_options(argc, argv, options, report_usage, 0);
1304

1305 1306
	setup_sorting();

1307 1308 1309 1310 1311 1312
	/*
	 * Any (unrecognized) arguments left?
	 */
	if (argc)
		usage_with_options(report_usage, options);

1313 1314
	setup_pager();

1315 1316
	return __cmd_report();
}