hist.c 23.2 KB
Newer Older
1
#include "annotate.h"
2
#include "util.h"
3
#include "build-id.h"
4
#include "hist.h"
5 6
#include "session.h"
#include "sort.h"
7
#include <math.h>
8

9 10 11 12 13 14
enum hist_filter {
	HIST_FILTER__DSO,
	HIST_FILTER__THREAD,
	HIST_FILTER__PARENT,
};

15 16
struct callchain_param	callchain_param = {
	.mode	= CHAIN_GRAPH_REL,
17 18
	.min_percent = 0.5,
	.order  = ORDER_CALLEE
19 20
};

21
u16 hists__col_len(struct hists *hists, enum hist_column col)
22
{
23
	return hists->col_len[col];
24 25
}

26
void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
27
{
28
	hists->col_len[col] = len;
29 30
}

31
bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
32
{
33 34
	if (len > hists__col_len(hists, col)) {
		hists__set_col_len(hists, col, len);
35 36 37 38 39
		return true;
	}
	return false;
}

40
static void hists__reset_col_len(struct hists *hists)
41 42 43 44
{
	enum hist_column col;

	for (col = 0; col < HISTC_NR_COLS; ++col)
45
		hists__set_col_len(hists, col, 0);
46 47
}

48
static void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
49 50 51 52
{
	u16 len;

	if (h->ms.sym)
53
		hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen);
54 55 56
	else {
		const unsigned int unresolved_col_width = BITS_PER_LONG / 4;

57
		if (hists__col_len(hists, HISTC_DSO) < unresolved_col_width &&
58 59
		    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
		    !symbol_conf.dso_list)
60
			hists__set_col_len(hists, HISTC_DSO,
61 62
					   unresolved_col_width);
	}
63 64

	len = thread__comm_len(h->thread);
65 66
	if (hists__new_col_len(hists, HISTC_COMM, len))
		hists__set_col_len(hists, HISTC_THREAD, len + 6);
67 68 69

	if (h->ms.map) {
		len = dso__name_len(h->ms.map->dso);
70
		hists__new_col_len(hists, HISTC_DSO, len);
71 72 73
	}
}

74 75
static void hist_entry__add_cpumode_period(struct hist_entry *self,
					   unsigned int cpumode, u64 period)
76
{
77
	switch (cpumode) {
78
	case PERF_RECORD_MISC_KERNEL:
79
		self->period_sys += period;
80 81
		break;
	case PERF_RECORD_MISC_USER:
82
		self->period_us += period;
83 84
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
85
		self->period_guest_sys += period;
86 87
		break;
	case PERF_RECORD_MISC_GUEST_USER:
88
		self->period_guest_us += period;
89 90 91 92 93 94
		break;
	default:
		break;
	}
}

95
/*
96
 * histogram, sorted on item, collects periods
97 98
 */

99 100
static struct hist_entry *hist_entry__new(struct hist_entry *template)
{
101
	size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
102 103 104 105
	struct hist_entry *self = malloc(sizeof(*self) + callchain_size);

	if (self != NULL) {
		*self = *template;
106
		self->nr_events = 1;
107 108
		if (self->ms.map)
			self->ms.map->referenced = true;
109 110 111 112 113 114 115
		if (symbol_conf.use_callchain)
			callchain_init(self->callchain);
	}

	return self;
}

116
static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
117
{
118
	if (!h->filtered) {
119 120
		hists__calc_col_len(hists, h);
		++hists->nr_entries;
121
	}
122 123
}

124 125 126 127 128 129 130
static u8 symbol__parent_filter(const struct symbol *parent)
{
	if (symbol_conf.exclude_other && parent == NULL)
		return 1 << HIST_FILTER__PARENT;
	return 0;
}

131
struct hist_entry *__hists__add_entry(struct hists *hists,
132
				      struct addr_location *al,
133
				      struct symbol *sym_parent, u64 period)
134
{
135
	struct rb_node **p = &hists->entries.rb_node;
136 137 138
	struct rb_node *parent = NULL;
	struct hist_entry *he;
	struct hist_entry entry = {
139
		.thread	= al->thread,
140 141 142 143
		.ms = {
			.map	= al->map,
			.sym	= al->sym,
		},
A
Arun Sharma 已提交
144
		.cpu	= al->cpu,
145 146
		.ip	= al->addr,
		.level	= al->level,
147
		.period	= period,
148
		.parent = sym_parent,
149
		.filtered = symbol__parent_filter(sym_parent),
150 151 152 153 154 155 156 157 158 159
	};
	int cmp;

	while (*p != NULL) {
		parent = *p;
		he = rb_entry(parent, struct hist_entry, rb_node);

		cmp = hist_entry__cmp(&entry, he);

		if (!cmp) {
160 161
			he->period += period;
			++he->nr_events;
162
			goto out;
163 164 165 166 167 168 169 170
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

171
	he = hist_entry__new(&entry);
172 173 174
	if (!he)
		return NULL;
	rb_link_node(&he->rb_node, parent, p);
175 176
	rb_insert_color(&he->rb_node, &hists->entries);
	hists__inc_nr_entries(hists, he);
177
out:
178
	hist_entry__add_cpumode_period(he, al->cpumode, period);
179 180 181
	return he;
}

182 183 184 185 186 187 188
int64_t
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
189
		cmp = se->se_cmp(left, right);
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
		if (cmp)
			break;
	}

	return cmp;
}

int64_t
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
		int64_t (*f)(struct hist_entry *, struct hist_entry *);

206
		f = se->se_collapse ?: se->se_cmp;
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224

		cmp = f(left, right);
		if (cmp)
			break;
	}

	return cmp;
}

void hist_entry__free(struct hist_entry *he)
{
	free(he);
}

/*
 * collapse the histogram
 */

225
static bool hists__collapse_insert_entry(struct hists *hists,
226 227
					 struct rb_root *root,
					 struct hist_entry *he)
228
{
229
	struct rb_node **p = &root->rb_node;
230 231 232 233 234 235 236 237 238 239 240
	struct rb_node *parent = NULL;
	struct hist_entry *iter;
	int64_t cmp;

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct hist_entry, rb_node);

		cmp = hist_entry__collapse(iter, he);

		if (!cmp) {
241
			iter->period += he->period;
242
			if (symbol_conf.use_callchain) {
243 244
				callchain_cursor_reset(&hists->callchain_cursor);
				callchain_merge(&hists->callchain_cursor, iter->callchain,
245 246
						he->callchain);
			}
247
			hist_entry__free(he);
248
			return false;
249 250 251 252 253 254 255 256 257
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&he->rb_node, parent, p);
258
	rb_insert_color(&he->rb_node, root);
259
	return true;
260 261
}

262
void hists__collapse_resort(struct hists *hists)
263
{
264
	struct rb_root tmp;
265 266 267 268 269 270
	struct rb_node *next;
	struct hist_entry *n;

	if (!sort__need_collapse)
		return;

271
	tmp = RB_ROOT;
272 273 274
	next = rb_first(&hists->entries);
	hists->nr_entries = 0;
	hists__reset_col_len(hists);
275

276 277 278 279
	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		next = rb_next(&n->rb_node);

280 281 282
		rb_erase(&n->rb_node, &hists->entries);
		if (hists__collapse_insert_entry(hists, &tmp, n))
			hists__inc_nr_entries(hists, n);
283
	}
284

285
	hists->entries = tmp;
286 287 288
}

/*
289
 * reverse the map, sort on period.
290 291
 */

292 293 294
static void __hists__insert_output_entry(struct rb_root *entries,
					 struct hist_entry *he,
					 u64 min_callchain_hits)
295
{
296
	struct rb_node **p = &entries->rb_node;
297 298 299
	struct rb_node *parent = NULL;
	struct hist_entry *iter;

300
	if (symbol_conf.use_callchain)
301
		callchain_param.sort(&he->sorted_chain, he->callchain,
302 303 304 305 306 307
				      min_callchain_hits, &callchain_param);

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct hist_entry, rb_node);

308
		if (he->period > iter->period)
309 310 311 312 313 314
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&he->rb_node, parent, p);
315
	rb_insert_color(&he->rb_node, entries);
316 317
}

318
void hists__output_resort(struct hists *hists)
319
{
320
	struct rb_root tmp;
321 322 323 324
	struct rb_node *next;
	struct hist_entry *n;
	u64 min_callchain_hits;

325
	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
326

327
	tmp = RB_ROOT;
328
	next = rb_first(&hists->entries);
329

330 331
	hists->nr_entries = 0;
	hists__reset_col_len(hists);
332

333 334 335 336
	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		next = rb_next(&n->rb_node);

337
		rb_erase(&n->rb_node, &hists->entries);
338
		__hists__insert_output_entry(&tmp, n, min_callchain_hits);
339
		hists__inc_nr_entries(hists, n);
340
	}
341

342
	hists->entries = tmp;
343
}
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373

static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
{
	int i;
	int ret = fprintf(fp, "            ");

	for (i = 0; i < left_margin; i++)
		ret += fprintf(fp, " ");

	return ret;
}

static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
					  int left_margin)
{
	int i;
	size_t ret = callchain__fprintf_left_margin(fp, left_margin);

	for (i = 0; i < depth; i++)
		if (depth_mask & (1 << i))
			ret += fprintf(fp, "|          ");
		else
			ret += fprintf(fp, "           ");

	ret += fprintf(fp, "\n");

	return ret;
}

static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
374
				     int depth, int depth_mask, int period,
375
				     u64 total_samples, u64 hits,
376 377 378 379 380 381 382 383 384 385 386
				     int left_margin)
{
	int i;
	size_t ret = 0;

	ret += callchain__fprintf_left_margin(fp, left_margin);
	for (i = 0; i < depth; i++) {
		if (depth_mask & (1 << i))
			ret += fprintf(fp, "|");
		else
			ret += fprintf(fp, " ");
387
		if (!period && i == depth - 1) {
388 389 390 391 392 393 394
			double percent;

			percent = hits * 100.0 / total_samples;
			ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
		} else
			ret += fprintf(fp, "%s", "          ");
	}
395 396
	if (chain->ms.sym)
		ret += fprintf(fp, "%s\n", chain->ms.sym->name);
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
	else
		ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);

	return ret;
}

static struct symbol *rem_sq_bracket;
static struct callchain_list rem_hits;

static void init_rem_hits(void)
{
	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
	if (!rem_sq_bracket) {
		fprintf(stderr, "Not enough memory to display remaining hits\n");
		return;
	}

	strcpy(rem_sq_bracket->name, "[...]");
415
	rem_hits.ms.sym = rem_sq_bracket;
416 417 418 419 420 421 422 423 424 425 426 427 428 429
}

static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
					 u64 total_samples, int depth,
					 int depth_mask, int left_margin)
{
	struct rb_node *node, *next;
	struct callchain_node *child;
	struct callchain_list *chain;
	int new_depth_mask = depth_mask;
	u64 new_total;
	u64 remaining;
	size_t ret = 0;
	int i;
430
	uint entries_printed = 0;
431 432 433 434 435 436 437 438 439 440 441 442 443

	if (callchain_param.mode == CHAIN_GRAPH_REL)
		new_total = self->children_hit;
	else
		new_total = total_samples;

	remaining = new_total;

	node = rb_first(&self->rb_root);
	while (node) {
		u64 cumul;

		child = rb_entry(node, struct callchain_node, rb_node);
444
		cumul = callchain_cumul_hits(child);
445 446 447 448 449 450 451 452 453 454 455 456 457 458
		remaining -= cumul;

		/*
		 * The depth mask manages the output of pipes that show
		 * the depth. We don't want to keep the pipes of the current
		 * level for the last child of this depth.
		 * Except if we have remaining filtered hits. They will
		 * supersede the last child
		 */
		next = rb_next(node);
		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
			new_depth_mask &= ~(1 << (depth - 1));

		/*
D
Daniel Mack 已提交
459
		 * But we keep the older depth mask for the line separator
460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
		 * to keep the level link until we reach the last child
		 */
		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
						   left_margin);
		i = 0;
		list_for_each_entry(chain, &child->val, list) {
			ret += ipchain__fprintf_graph(fp, chain, depth,
						      new_depth_mask, i++,
						      new_total,
						      cumul,
						      left_margin);
		}
		ret += __callchain__fprintf_graph(fp, child, new_total,
						  depth + 1,
						  new_depth_mask | (1 << depth),
						  left_margin);
		node = next;
477 478
		if (++entries_printed == callchain_param.print_limit)
			break;
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
	}

	if (callchain_param.mode == CHAIN_GRAPH_REL &&
		remaining && remaining != new_total) {

		if (!rem_sq_bracket)
			return ret;

		new_depth_mask &= ~(1 << (depth - 1));

		ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
					      new_depth_mask, 0, new_total,
					      remaining, left_margin);
	}

	return ret;
}

static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
				       u64 total_samples, int left_margin)
{
	struct callchain_list *chain;
	bool printed = false;
	int i = 0;
	int ret = 0;
504
	u32 entries_printed = 0;
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520

	list_for_each_entry(chain, &self->val, list) {
		if (!i++ && sort__first_dimension == SORT_SYM)
			continue;

		if (!printed) {
			ret += callchain__fprintf_left_margin(fp, left_margin);
			ret += fprintf(fp, "|\n");
			ret += callchain__fprintf_left_margin(fp, left_margin);
			ret += fprintf(fp, "---");

			left_margin += 3;
			printed = true;
		} else
			ret += callchain__fprintf_left_margin(fp, left_margin);

521 522
		if (chain->ms.sym)
			ret += fprintf(fp, " %s\n", chain->ms.sym->name);
523 524
		else
			ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
525 526 527

		if (++entries_printed == callchain_param.print_limit)
			break;
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
	}

	ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);

	return ret;
}

static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
				      u64 total_samples)
{
	struct callchain_list *chain;
	size_t ret = 0;

	if (!self)
		return 0;

	ret += callchain__fprintf_flat(fp, self->parent, total_samples);


	list_for_each_entry(chain, &self->val, list) {
		if (chain->ip >= PERF_CONTEXT_MAX)
			continue;
550 551
		if (chain->ms.sym)
			ret += fprintf(fp, "                %s\n", chain->ms.sym->name);
552 553 554 555 556 557 558 559 560 561 562 563 564 565
		else
			ret += fprintf(fp, "                %p\n",
					(void *)(long)chain->ip);
	}

	return ret;
}

static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
					    u64 total_samples, int left_margin)
{
	struct rb_node *rb_node;
	struct callchain_node *chain;
	size_t ret = 0;
566
	u32 entries_printed = 0;
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588

	rb_node = rb_first(&self->sorted_chain);
	while (rb_node) {
		double percent;

		chain = rb_entry(rb_node, struct callchain_node, rb_node);
		percent = chain->hit * 100.0 / total_samples;
		switch (callchain_param.mode) {
		case CHAIN_FLAT:
			ret += percent_color_fprintf(fp, "           %6.2f%%\n",
						     percent);
			ret += callchain__fprintf_flat(fp, chain, total_samples);
			break;
		case CHAIN_GRAPH_ABS: /* Falldown */
		case CHAIN_GRAPH_REL:
			ret += callchain__fprintf_graph(fp, chain, total_samples,
							left_margin);
		case CHAIN_NONE:
		default:
			break;
		}
		ret += fprintf(fp, "\n");
589 590
		if (++entries_printed == callchain_param.print_limit)
			break;
591 592 593 594 595 596
		rb_node = rb_next(rb_node);
	}

	return ret;
}

597
int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
598 599 600
			 struct hists *hists, struct hists *pair_hists,
			 bool show_displacement, long displacement,
			 bool color, u64 session_total)
601 602
{
	struct sort_entry *se;
603
	u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
604
	u64 nr_events;
605
	const char *sep = symbol_conf.field_sep;
606
	int ret;
607 608 609 610

	if (symbol_conf.exclude_other && !self->parent)
		return 0;

611
	if (pair_hists) {
612
		period = self->pair ? self->pair->period : 0;
613
		nr_events = self->pair ? self->pair->nr_events : 0;
614
		total = pair_hists->stats.total_period;
615 616 617 618
		period_sys = self->pair ? self->pair->period_sys : 0;
		period_us = self->pair ? self->pair->period_us : 0;
		period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
		period_guest_us = self->pair ? self->pair->period_guest_us : 0;
619
	} else {
620
		period = self->period;
621
		nr_events = self->nr_events;
622
		total = session_total;
623 624 625 626
		period_sys = self->period_sys;
		period_us = self->period_us;
		period_guest_sys = self->period_guest_sys;
		period_guest_us = self->period_guest_us;
627 628
	}

629 630 631 632
	if (total) {
		if (color)
			ret = percent_color_snprintf(s, size,
						     sep ? "%.2f" : "   %6.2f%%",
633
						     (period * 100.0) / total);
634 635
		else
			ret = snprintf(s, size, sep ? "%.2f" : "   %6.2f%%",
636
				       (period * 100.0) / total);
637 638 639
		if (symbol_conf.show_cpu_utilization) {
			ret += percent_color_snprintf(s + ret, size - ret,
					sep ? "%.2f" : "   %6.2f%%",
640
					(period_sys * 100.0) / total);
641 642
			ret += percent_color_snprintf(s + ret, size - ret,
					sep ? "%.2f" : "   %6.2f%%",
643
					(period_us * 100.0) / total);
644 645 646 647
			if (perf_guest) {
				ret += percent_color_snprintf(s + ret,
						size - ret,
						sep ? "%.2f" : "   %6.2f%%",
648
						(period_guest_sys * 100.0) /
649 650 651 652
								total);
				ret += percent_color_snprintf(s + ret,
						size - ret,
						sep ? "%.2f" : "   %6.2f%%",
653
						(period_guest_us * 100.0) /
654 655 656
								total);
			}
		}
657
	} else
658
		ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
659 660

	if (symbol_conf.show_nr_samples) {
661
		if (sep)
662
			ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
663
		else
664
			ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
665 666
	}

667
	if (pair_hists) {
668 669 670 671
		char bf[32];
		double old_percent = 0, new_percent = 0, diff;

		if (total > 0)
672
			old_percent = (period * 100.0) / total;
673
		if (session_total > 0)
674
			new_percent = (self->period * 100.0) / session_total;
675

676
		diff = new_percent - old_percent;
677

678
		if (fabs(diff) >= 0.01)
679 680 681 682 683
			snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
		else
			snprintf(bf, sizeof(bf), " ");

		if (sep)
684
			ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
685
		else
686
			ret += snprintf(s + ret, size - ret, "%11.11s", bf);
687 688 689 690 691 692 693 694

		if (show_displacement) {
			if (displacement)
				snprintf(bf, sizeof(bf), "%+4ld", displacement);
			else
				snprintf(bf, sizeof(bf), " ");

			if (sep)
695
				ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
696
			else
697
				ret += snprintf(s + ret, size - ret, "%6.6s", bf);
698
		}
699 700 701 702 703 704
	}

	list_for_each_entry(se, &hist_entry__sort_list, list) {
		if (se->elide)
			continue;

705
		ret += snprintf(s + ret, size - ret, "%s", sep ?: "  ");
706
		ret += se->se_snprintf(self, s + ret, size - ret,
707
				       hists__col_len(hists, se->se_width_idx));
708 709
	}

710 711 712
	return ret;
}

713 714 715
int hist_entry__fprintf(struct hist_entry *self, struct hists *hists,
			struct hists *pair_hists, bool show_displacement,
			long displacement, FILE *fp, u64 session_total)
716 717
{
	char bf[512];
718
	hist_entry__snprintf(self, bf, sizeof(bf), hists, pair_hists,
719 720 721
			     show_displacement, displacement,
			     true, session_total);
	return fprintf(fp, "%s\n", bf);
722
}
723

724 725
static size_t hist_entry__fprintf_callchain(struct hist_entry *self,
					    struct hists *hists, FILE *fp,
726 727 728
					    u64 session_total)
{
	int left_margin = 0;
729

730 731 732
	if (sort__first_dimension == SORT_COMM) {
		struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
							 typeof(*se), list);
733
		left_margin = hists__col_len(hists, se->se_width_idx);
734
		left_margin -= thread__comm_len(self->thread);
735 736
	}

737 738
	return hist_entry_callchain__fprintf(fp, self, session_total,
					     left_margin);
739 740
}

741
size_t hists__fprintf(struct hists *hists, struct hists *pair,
742
		      bool show_displacement, FILE *fp)
743 744 745 746
{
	struct sort_entry *se;
	struct rb_node *nd;
	size_t ret = 0;
747 748
	unsigned long position = 1;
	long displacement = 0;
749
	unsigned int width;
750
	const char *sep = symbol_conf.field_sep;
751
	const char *col_width = symbol_conf.col_width_list_str;
752 753 754

	init_rem_hits();

755 756
	fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");

757
	if (symbol_conf.show_nr_samples) {
758 759
		if (sep)
			fprintf(fp, "%cSamples", *sep);
760 761 762
		else
			fputs("  Samples  ", fp);
	}
763

764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
	if (symbol_conf.show_cpu_utilization) {
		if (sep) {
			ret += fprintf(fp, "%csys", *sep);
			ret += fprintf(fp, "%cus", *sep);
			if (perf_guest) {
				ret += fprintf(fp, "%cguest sys", *sep);
				ret += fprintf(fp, "%cguest us", *sep);
			}
		} else {
			ret += fprintf(fp, "  sys  ");
			ret += fprintf(fp, "  us  ");
			if (perf_guest) {
				ret += fprintf(fp, "  guest sys  ");
				ret += fprintf(fp, "  guest us  ");
			}
		}
	}

782 783 784 785 786 787 788 789 790 791 792 793 794 795
	if (pair) {
		if (sep)
			ret += fprintf(fp, "%cDelta", *sep);
		else
			ret += fprintf(fp, "  Delta    ");

		if (show_displacement) {
			if (sep)
				ret += fprintf(fp, "%cDisplacement", *sep);
			else
				ret += fprintf(fp, " Displ");
		}
	}

796 797 798
	list_for_each_entry(se, &hist_entry__sort_list, list) {
		if (se->elide)
			continue;
799
		if (sep) {
800
			fprintf(fp, "%c%s", *sep, se->se_header);
801 802
			continue;
		}
803
		width = strlen(se->se_header);
804 805
		if (symbol_conf.col_width_list_str) {
			if (col_width) {
806
				hists__set_col_len(hists, se->se_width_idx,
807 808 809 810
						   atoi(col_width));
				col_width = strchr(col_width, ',');
				if (col_width)
					++col_width;
811 812
			}
		}
813 814
		if (!hists__new_col_len(hists, se->se_width_idx, width))
			width = hists__col_len(hists, se->se_width_idx);
815
		fprintf(fp, "  %*s", width, se->se_header);
816 817 818
	}
	fprintf(fp, "\n");

819
	if (sep)
820 821 822 823 824
		goto print_entries;

	fprintf(fp, "# ........");
	if (symbol_conf.show_nr_samples)
		fprintf(fp, " ..........");
825 826 827 828 829
	if (pair) {
		fprintf(fp, " ..........");
		if (show_displacement)
			fprintf(fp, " .....");
	}
830 831 832 833 834 835 836
	list_for_each_entry(se, &hist_entry__sort_list, list) {
		unsigned int i;

		if (se->elide)
			continue;

		fprintf(fp, "  ");
837
		width = hists__col_len(hists, se->se_width_idx);
838
		if (width == 0)
839
			width = strlen(se->se_header);
840 841 842 843
		for (i = 0; i < width; i++)
			fprintf(fp, ".");
	}

844
	fprintf(fp, "\n#\n");
845 846

print_entries:
847
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
848 849
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

850 851 852
		if (h->filtered)
			continue;

853 854 855 856 857 858 859 860
		if (show_displacement) {
			if (h->pair != NULL)
				displacement = ((long)h->pair->position -
					        (long)position);
			else
				displacement = 0;
			++position;
		}
861 862
		ret += hist_entry__fprintf(h, hists, pair, show_displacement,
					   displacement, fp, hists->stats.total_period);
863 864

		if (symbol_conf.use_callchain)
865 866
			ret += hist_entry__fprintf_callchain(h, hists, fp,
							     hists->stats.total_period);
867
		if (h->ms.map == NULL && verbose > 1) {
868
			__map_groups__fprintf_maps(&h->thread->mg,
869
						   MAP__FUNCTION, verbose, fp);
870 871
			fprintf(fp, "%.10s end\n", graph_dotted_line);
		}
872 873 874 875 876 877
	}

	free(rem_sq_bracket);

	return ret;
}
878

879 880 881
/*
 * See hists__fprintf to match the column widths
 */
882
unsigned int hists__sort_list_width(struct hists *hists)
883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
{
	struct sort_entry *se;
	int ret = 9; /* total % */

	if (symbol_conf.show_cpu_utilization) {
		ret += 7; /* count_sys % */
		ret += 6; /* count_us % */
		if (perf_guest) {
			ret += 13; /* count_guest_sys % */
			ret += 12; /* count_guest_us % */
		}
	}

	if (symbol_conf.show_nr_samples)
		ret += 11;

	list_for_each_entry(se, &hist_entry__sort_list, list)
		if (!se->elide)
901
			ret += 2 + hists__col_len(hists, se->se_width_idx);
902

903 904 905
	if (verbose) /* Addr + origin */
		ret += 3 + BITS_PER_LONG / 4;

906 907 908
	return ret;
}

909
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
910 911 912 913 914 915
				       enum hist_filter filter)
{
	h->filtered &= ~(1 << filter);
	if (h->filtered)
		return;

916
	++hists->nr_entries;
917
	if (h->ms.unfolded)
918
		hists->nr_entries += h->nr_rows;
919
	h->row_offset = 0;
920 921
	hists->stats.total_period += h->period;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
922

923
	hists__calc_col_len(hists, h);
924 925
}

926
void hists__filter_by_dso(struct hists *hists, const struct dso *dso)
927 928 929
{
	struct rb_node *nd;

930 931 932
	hists->nr_entries = hists->stats.total_period = 0;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
	hists__reset_col_len(hists);
933

934
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
935 936 937 938 939 940 941 942 943 944
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (symbol_conf.exclude_other && !h->parent)
			continue;

		if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
			h->filtered |= (1 << HIST_FILTER__DSO);
			continue;
		}

945
		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
946 947 948
	}
}

949
void hists__filter_by_thread(struct hists *hists, const struct thread *thread)
950 951 952
{
	struct rb_node *nd;

953 954 955
	hists->nr_entries = hists->stats.total_period = 0;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
	hists__reset_col_len(hists);
956

957
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
958 959 960 961 962 963
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (thread != NULL && h->thread != thread) {
			h->filtered |= (1 << HIST_FILTER__THREAD);
			continue;
		}
964

965
		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
966 967
	}
}
968

969
int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
970
{
971
	return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
972 973
}

974
int hist_entry__annotate(struct hist_entry *he, size_t privsize)
975
{
976
	return symbol__annotate(he->ms.sym, he->ms.map, privsize);
977
}
978

979
void hists__inc_nr_events(struct hists *hists, u32 type)
980
{
981 982
	++hists->stats.nr_events[0];
	++hists->stats.nr_events[type];
983 984
}

985
size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
986 987 988 989 990
{
	int i;
	size_t ret = 0;

	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
991
		const char *name;
992

993
		if (hists->stats.nr_events[i] == 0)
994 995 996
			continue;

		name = perf_event__name(i);
997
		if (!strcmp(name, "UNKNOWN"))
998
			continue;
999 1000

		ret += fprintf(fp, "%16s events: %10d\n", name,
1001
			       hists->stats.nr_events[i]);
1002 1003 1004 1005
	}

	return ret;
}