hist.c 23.1 KB
Newer Older
1
#include "annotate.h"
2
#include "util.h"
3
#include "build-id.h"
4
#include "hist.h"
5 6
#include "session.h"
#include "sort.h"
7
#include <math.h>
8

9 10 11 12 13 14
enum hist_filter {
	HIST_FILTER__DSO,
	HIST_FILTER__THREAD,
	HIST_FILTER__PARENT,
};

15 16 17 18 19
struct callchain_param	callchain_param = {
	.mode	= CHAIN_GRAPH_REL,
	.min_percent = 0.5
};

20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
u16 hists__col_len(struct hists *self, enum hist_column col)
{
	return self->col_len[col];
}

void hists__set_col_len(struct hists *self, enum hist_column col, u16 len)
{
	self->col_len[col] = len;
}

bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len)
{
	if (len > hists__col_len(self, col)) {
		hists__set_col_len(self, col, len);
		return true;
	}
	return false;
}

static void hists__reset_col_len(struct hists *self)
{
	enum hist_column col;

	for (col = 0; col < HISTC_NR_COLS; ++col)
		hists__set_col_len(self, col, 0);
}

static void hists__calc_col_len(struct hists *self, struct hist_entry *h)
{
	u16 len;

	if (h->ms.sym)
		hists__new_col_len(self, HISTC_SYMBOL, h->ms.sym->namelen);
53 54 55 56 57 58 59 60 61
	else {
		const unsigned int unresolved_col_width = BITS_PER_LONG / 4;

		if (hists__col_len(self, HISTC_DSO) < unresolved_col_width &&
		    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
		    !symbol_conf.dso_list)
			hists__set_col_len(self, HISTC_DSO,
					   unresolved_col_width);
	}
62 63 64 65 66 67 68 69 70 71 72

	len = thread__comm_len(h->thread);
	if (hists__new_col_len(self, HISTC_COMM, len))
		hists__set_col_len(self, HISTC_THREAD, len + 6);

	if (h->ms.map) {
		len = dso__name_len(h->ms.map->dso);
		hists__new_col_len(self, HISTC_DSO, len);
	}
}

73 74
static void hist_entry__add_cpumode_period(struct hist_entry *self,
					   unsigned int cpumode, u64 period)
75
{
76
	switch (cpumode) {
77
	case PERF_RECORD_MISC_KERNEL:
78
		self->period_sys += period;
79 80
		break;
	case PERF_RECORD_MISC_USER:
81
		self->period_us += period;
82 83
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
84
		self->period_guest_sys += period;
85 86
		break;
	case PERF_RECORD_MISC_GUEST_USER:
87
		self->period_guest_us += period;
88 89 90 91 92 93
		break;
	default:
		break;
	}
}

94
/*
95
 * histogram, sorted on item, collects periods
96 97
 */

98 99
static struct hist_entry *hist_entry__new(struct hist_entry *template)
{
100
	size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
101 102 103 104
	struct hist_entry *self = malloc(sizeof(*self) + callchain_size);

	if (self != NULL) {
		*self = *template;
105
		self->nr_events = 1;
106 107
		if (self->ms.map)
			self->ms.map->referenced = true;
108 109 110 111 112 113 114
		if (symbol_conf.use_callchain)
			callchain_init(self->callchain);
	}

	return self;
}

115
static void hists__inc_nr_entries(struct hists *self, struct hist_entry *h)
116
{
117 118 119 120
	if (!h->filtered) {
		hists__calc_col_len(self, h);
		++self->nr_entries;
	}
121 122
}

123 124 125 126 127 128 129
static u8 symbol__parent_filter(const struct symbol *parent)
{
	if (symbol_conf.exclude_other && parent == NULL)
		return 1 << HIST_FILTER__PARENT;
	return 0;
}

130 131
struct hist_entry *__hists__add_entry(struct hists *self,
				      struct addr_location *al,
132
				      struct symbol *sym_parent, u64 period)
133
{
134
	struct rb_node **p = &self->entries.rb_node;
135 136 137
	struct rb_node *parent = NULL;
	struct hist_entry *he;
	struct hist_entry entry = {
138
		.thread	= al->thread,
139 140 141 142
		.ms = {
			.map	= al->map,
			.sym	= al->sym,
		},
A
Arun Sharma 已提交
143
		.cpu	= al->cpu,
144 145
		.ip	= al->addr,
		.level	= al->level,
146
		.period	= period,
147
		.parent = sym_parent,
148
		.filtered = symbol__parent_filter(sym_parent),
149 150 151 152 153 154 155 156 157 158
	};
	int cmp;

	while (*p != NULL) {
		parent = *p;
		he = rb_entry(parent, struct hist_entry, rb_node);

		cmp = hist_entry__cmp(&entry, he);

		if (!cmp) {
159 160
			he->period += period;
			++he->nr_events;
161
			goto out;
162 163 164 165 166 167 168 169
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

170
	he = hist_entry__new(&entry);
171 172 173
	if (!he)
		return NULL;
	rb_link_node(&he->rb_node, parent, p);
174
	rb_insert_color(&he->rb_node, &self->entries);
175
	hists__inc_nr_entries(self, he);
176
out:
177
	hist_entry__add_cpumode_period(he, al->cpumode, period);
178 179 180
	return he;
}

181 182 183 184 185 186 187
int64_t
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
188
		cmp = se->se_cmp(left, right);
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
		if (cmp)
			break;
	}

	return cmp;
}

int64_t
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
		int64_t (*f)(struct hist_entry *, struct hist_entry *);

205
		f = se->se_collapse ?: se->se_cmp;
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223

		cmp = f(left, right);
		if (cmp)
			break;
	}

	return cmp;
}

void hist_entry__free(struct hist_entry *he)
{
	free(he);
}

/*
 * collapse the histogram
 */

224 225 226
static bool hists__collapse_insert_entry(struct hists *self,
					 struct rb_root *root,
					 struct hist_entry *he)
227
{
228
	struct rb_node **p = &root->rb_node;
229 230 231 232 233 234 235 236 237 238 239
	struct rb_node *parent = NULL;
	struct hist_entry *iter;
	int64_t cmp;

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct hist_entry, rb_node);

		cmp = hist_entry__collapse(iter, he);

		if (!cmp) {
240
			iter->period += he->period;
241 242 243 244 245
			if (symbol_conf.use_callchain) {
				callchain_cursor_reset(&self->callchain_cursor);
				callchain_merge(&self->callchain_cursor, iter->callchain,
						he->callchain);
			}
246
			hist_entry__free(he);
247
			return false;
248 249 250 251 252 253 254 255 256
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&he->rb_node, parent, p);
257
	rb_insert_color(&he->rb_node, root);
258
	return true;
259 260
}

261
void hists__collapse_resort(struct hists *self)
262
{
263
	struct rb_root tmp;
264 265 266 267 268 269
	struct rb_node *next;
	struct hist_entry *n;

	if (!sort__need_collapse)
		return;

270
	tmp = RB_ROOT;
271
	next = rb_first(&self->entries);
272
	self->nr_entries = 0;
273
	hists__reset_col_len(self);
274

275 276 277 278
	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		next = rb_next(&n->rb_node);

279
		rb_erase(&n->rb_node, &self->entries);
280
		if (hists__collapse_insert_entry(self, &tmp, n))
281
			hists__inc_nr_entries(self, n);
282
	}
283

284
	self->entries = tmp;
285 286 287
}

/*
288
 * reverse the map, sort on period.
289 290
 */

291 292 293
static void __hists__insert_output_entry(struct rb_root *entries,
					 struct hist_entry *he,
					 u64 min_callchain_hits)
294
{
295
	struct rb_node **p = &entries->rb_node;
296 297 298
	struct rb_node *parent = NULL;
	struct hist_entry *iter;

299
	if (symbol_conf.use_callchain)
300
		callchain_param.sort(&he->sorted_chain, he->callchain,
301 302 303 304 305 306
				      min_callchain_hits, &callchain_param);

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct hist_entry, rb_node);

307
		if (he->period > iter->period)
308 309 310 311 312 313
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&he->rb_node, parent, p);
314
	rb_insert_color(&he->rb_node, entries);
315 316
}

317
void hists__output_resort(struct hists *self)
318
{
319
	struct rb_root tmp;
320 321 322 323
	struct rb_node *next;
	struct hist_entry *n;
	u64 min_callchain_hits;

324
	min_callchain_hits = self->stats.total_period * (callchain_param.min_percent / 100);
325

326
	tmp = RB_ROOT;
327
	next = rb_first(&self->entries);
328

329
	self->nr_entries = 0;
330
	hists__reset_col_len(self);
331

332 333 334 335
	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		next = rb_next(&n->rb_node);

336 337
		rb_erase(&n->rb_node, &self->entries);
		__hists__insert_output_entry(&tmp, n, min_callchain_hits);
338
		hists__inc_nr_entries(self, n);
339
	}
340

341
	self->entries = tmp;
342
}
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372

static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
{
	int i;
	int ret = fprintf(fp, "            ");

	for (i = 0; i < left_margin; i++)
		ret += fprintf(fp, " ");

	return ret;
}

static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
					  int left_margin)
{
	int i;
	size_t ret = callchain__fprintf_left_margin(fp, left_margin);

	for (i = 0; i < depth; i++)
		if (depth_mask & (1 << i))
			ret += fprintf(fp, "|          ");
		else
			ret += fprintf(fp, "           ");

	ret += fprintf(fp, "\n");

	return ret;
}

static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
373
				     int depth, int depth_mask, int period,
374
				     u64 total_samples, u64 hits,
375 376 377 378 379 380 381 382 383 384 385
				     int left_margin)
{
	int i;
	size_t ret = 0;

	ret += callchain__fprintf_left_margin(fp, left_margin);
	for (i = 0; i < depth; i++) {
		if (depth_mask & (1 << i))
			ret += fprintf(fp, "|");
		else
			ret += fprintf(fp, " ");
386
		if (!period && i == depth - 1) {
387 388 389 390 391 392 393
			double percent;

			percent = hits * 100.0 / total_samples;
			ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
		} else
			ret += fprintf(fp, "%s", "          ");
	}
394 395
	if (chain->ms.sym)
		ret += fprintf(fp, "%s\n", chain->ms.sym->name);
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
	else
		ret += fprintf(fp, "%p\n", (void *)(long)chain->ip);

	return ret;
}

static struct symbol *rem_sq_bracket;
static struct callchain_list rem_hits;

static void init_rem_hits(void)
{
	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
	if (!rem_sq_bracket) {
		fprintf(stderr, "Not enough memory to display remaining hits\n");
		return;
	}

	strcpy(rem_sq_bracket->name, "[...]");
414
	rem_hits.ms.sym = rem_sq_bracket;
415 416 417 418 419 420 421 422 423 424 425 426 427 428
}

static size_t __callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
					 u64 total_samples, int depth,
					 int depth_mask, int left_margin)
{
	struct rb_node *node, *next;
	struct callchain_node *child;
	struct callchain_list *chain;
	int new_depth_mask = depth_mask;
	u64 new_total;
	u64 remaining;
	size_t ret = 0;
	int i;
429
	uint entries_printed = 0;
430 431 432 433 434 435 436 437 438 439 440 441 442

	if (callchain_param.mode == CHAIN_GRAPH_REL)
		new_total = self->children_hit;
	else
		new_total = total_samples;

	remaining = new_total;

	node = rb_first(&self->rb_root);
	while (node) {
		u64 cumul;

		child = rb_entry(node, struct callchain_node, rb_node);
443
		cumul = callchain_cumul_hits(child);
444 445 446 447 448 449 450 451 452 453 454 455 456 457
		remaining -= cumul;

		/*
		 * The depth mask manages the output of pipes that show
		 * the depth. We don't want to keep the pipes of the current
		 * level for the last child of this depth.
		 * Except if we have remaining filtered hits. They will
		 * supersede the last child
		 */
		next = rb_next(node);
		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
			new_depth_mask &= ~(1 << (depth - 1));

		/*
D
Daniel Mack 已提交
458
		 * But we keep the older depth mask for the line separator
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
		 * to keep the level link until we reach the last child
		 */
		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
						   left_margin);
		i = 0;
		list_for_each_entry(chain, &child->val, list) {
			ret += ipchain__fprintf_graph(fp, chain, depth,
						      new_depth_mask, i++,
						      new_total,
						      cumul,
						      left_margin);
		}
		ret += __callchain__fprintf_graph(fp, child, new_total,
						  depth + 1,
						  new_depth_mask | (1 << depth),
						  left_margin);
		node = next;
476 477
		if (++entries_printed == callchain_param.print_limit)
			break;
478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
	}

	if (callchain_param.mode == CHAIN_GRAPH_REL &&
		remaining && remaining != new_total) {

		if (!rem_sq_bracket)
			return ret;

		new_depth_mask &= ~(1 << (depth - 1));

		ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
					      new_depth_mask, 0, new_total,
					      remaining, left_margin);
	}

	return ret;
}

static size_t callchain__fprintf_graph(FILE *fp, struct callchain_node *self,
				       u64 total_samples, int left_margin)
{
	struct callchain_list *chain;
	bool printed = false;
	int i = 0;
	int ret = 0;
503
	u32 entries_printed = 0;
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519

	list_for_each_entry(chain, &self->val, list) {
		if (!i++ && sort__first_dimension == SORT_SYM)
			continue;

		if (!printed) {
			ret += callchain__fprintf_left_margin(fp, left_margin);
			ret += fprintf(fp, "|\n");
			ret += callchain__fprintf_left_margin(fp, left_margin);
			ret += fprintf(fp, "---");

			left_margin += 3;
			printed = true;
		} else
			ret += callchain__fprintf_left_margin(fp, left_margin);

520 521
		if (chain->ms.sym)
			ret += fprintf(fp, " %s\n", chain->ms.sym->name);
522 523
		else
			ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);
524 525 526

		if (++entries_printed == callchain_param.print_limit)
			break;
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
	}

	ret += __callchain__fprintf_graph(fp, self, total_samples, 1, 1, left_margin);

	return ret;
}

static size_t callchain__fprintf_flat(FILE *fp, struct callchain_node *self,
				      u64 total_samples)
{
	struct callchain_list *chain;
	size_t ret = 0;

	if (!self)
		return 0;

	ret += callchain__fprintf_flat(fp, self->parent, total_samples);


	list_for_each_entry(chain, &self->val, list) {
		if (chain->ip >= PERF_CONTEXT_MAX)
			continue;
549 550
		if (chain->ms.sym)
			ret += fprintf(fp, "                %s\n", chain->ms.sym->name);
551 552 553 554 555 556 557 558 559 560 561 562 563 564
		else
			ret += fprintf(fp, "                %p\n",
					(void *)(long)chain->ip);
	}

	return ret;
}

static size_t hist_entry_callchain__fprintf(FILE *fp, struct hist_entry *self,
					    u64 total_samples, int left_margin)
{
	struct rb_node *rb_node;
	struct callchain_node *chain;
	size_t ret = 0;
565
	u32 entries_printed = 0;
566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587

	rb_node = rb_first(&self->sorted_chain);
	while (rb_node) {
		double percent;

		chain = rb_entry(rb_node, struct callchain_node, rb_node);
		percent = chain->hit * 100.0 / total_samples;
		switch (callchain_param.mode) {
		case CHAIN_FLAT:
			ret += percent_color_fprintf(fp, "           %6.2f%%\n",
						     percent);
			ret += callchain__fprintf_flat(fp, chain, total_samples);
			break;
		case CHAIN_GRAPH_ABS: /* Falldown */
		case CHAIN_GRAPH_REL:
			ret += callchain__fprintf_graph(fp, chain, total_samples,
							left_margin);
		case CHAIN_NONE:
		default:
			break;
		}
		ret += fprintf(fp, "\n");
588 589
		if (++entries_printed == callchain_param.print_limit)
			break;
590 591 592 593 594 595
		rb_node = rb_next(rb_node);
	}

	return ret;
}

596
int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
597 598 599
			 struct hists *hists, struct hists *pair_hists,
			 bool show_displacement, long displacement,
			 bool color, u64 session_total)
600 601
{
	struct sort_entry *se;
602
	u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
603
	u64 nr_events;
604
	const char *sep = symbol_conf.field_sep;
605
	int ret;
606 607 608 609

	if (symbol_conf.exclude_other && !self->parent)
		return 0;

610
	if (pair_hists) {
611
		period = self->pair ? self->pair->period : 0;
612
		nr_events = self->pair ? self->pair->nr_events : 0;
613
		total = pair_hists->stats.total_period;
614 615 616 617
		period_sys = self->pair ? self->pair->period_sys : 0;
		period_us = self->pair ? self->pair->period_us : 0;
		period_guest_sys = self->pair ? self->pair->period_guest_sys : 0;
		period_guest_us = self->pair ? self->pair->period_guest_us : 0;
618
	} else {
619
		period = self->period;
620
		nr_events = self->nr_events;
621
		total = session_total;
622 623 624 625
		period_sys = self->period_sys;
		period_us = self->period_us;
		period_guest_sys = self->period_guest_sys;
		period_guest_us = self->period_guest_us;
626 627
	}

628 629 630 631
	if (total) {
		if (color)
			ret = percent_color_snprintf(s, size,
						     sep ? "%.2f" : "   %6.2f%%",
632
						     (period * 100.0) / total);
633 634
		else
			ret = snprintf(s, size, sep ? "%.2f" : "   %6.2f%%",
635
				       (period * 100.0) / total);
636 637 638
		if (symbol_conf.show_cpu_utilization) {
			ret += percent_color_snprintf(s + ret, size - ret,
					sep ? "%.2f" : "   %6.2f%%",
639
					(period_sys * 100.0) / total);
640 641
			ret += percent_color_snprintf(s + ret, size - ret,
					sep ? "%.2f" : "   %6.2f%%",
642
					(period_us * 100.0) / total);
643 644 645 646
			if (perf_guest) {
				ret += percent_color_snprintf(s + ret,
						size - ret,
						sep ? "%.2f" : "   %6.2f%%",
647
						(period_guest_sys * 100.0) /
648 649 650 651
								total);
				ret += percent_color_snprintf(s + ret,
						size - ret,
						sep ? "%.2f" : "   %6.2f%%",
652
						(period_guest_us * 100.0) /
653 654 655
								total);
			}
		}
656
	} else
657
		ret = snprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
658 659

	if (symbol_conf.show_nr_samples) {
660
		if (sep)
661
			ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
662
		else
663
			ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
664 665
	}

666
	if (pair_hists) {
667 668 669 670
		char bf[32];
		double old_percent = 0, new_percent = 0, diff;

		if (total > 0)
671
			old_percent = (period * 100.0) / total;
672
		if (session_total > 0)
673
			new_percent = (self->period * 100.0) / session_total;
674

675
		diff = new_percent - old_percent;
676

677
		if (fabs(diff) >= 0.01)
678 679 680 681 682
			snprintf(bf, sizeof(bf), "%+4.2F%%", diff);
		else
			snprintf(bf, sizeof(bf), " ");

		if (sep)
683
			ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
684
		else
685
			ret += snprintf(s + ret, size - ret, "%11.11s", bf);
686 687 688 689 690 691 692 693

		if (show_displacement) {
			if (displacement)
				snprintf(bf, sizeof(bf), "%+4ld", displacement);
			else
				snprintf(bf, sizeof(bf), " ");

			if (sep)
694
				ret += snprintf(s + ret, size - ret, "%c%s", *sep, bf);
695
			else
696
				ret += snprintf(s + ret, size - ret, "%6.6s", bf);
697
		}
698 699 700 701 702 703
	}

	list_for_each_entry(se, &hist_entry__sort_list, list) {
		if (se->elide)
			continue;

704
		ret += snprintf(s + ret, size - ret, "%s", sep ?: "  ");
705
		ret += se->se_snprintf(self, s + ret, size - ret,
706
				       hists__col_len(hists, se->se_width_idx));
707 708
	}

709 710 711
	return ret;
}

712 713 714
int hist_entry__fprintf(struct hist_entry *self, struct hists *hists,
			struct hists *pair_hists, bool show_displacement,
			long displacement, FILE *fp, u64 session_total)
715 716
{
	char bf[512];
717
	hist_entry__snprintf(self, bf, sizeof(bf), hists, pair_hists,
718 719 720
			     show_displacement, displacement,
			     true, session_total);
	return fprintf(fp, "%s\n", bf);
721
}
722

723 724
static size_t hist_entry__fprintf_callchain(struct hist_entry *self,
					    struct hists *hists, FILE *fp,
725 726 727
					    u64 session_total)
{
	int left_margin = 0;
728

729 730 731
	if (sort__first_dimension == SORT_COMM) {
		struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
							 typeof(*se), list);
732
		left_margin = hists__col_len(hists, se->se_width_idx);
733
		left_margin -= thread__comm_len(self->thread);
734 735
	}

736 737
	return hist_entry_callchain__fprintf(fp, self, session_total,
					     left_margin);
738 739
}

740 741
size_t hists__fprintf(struct hists *self, struct hists *pair,
		      bool show_displacement, FILE *fp)
742 743 744 745
{
	struct sort_entry *se;
	struct rb_node *nd;
	size_t ret = 0;
746 747
	unsigned long position = 1;
	long displacement = 0;
748
	unsigned int width;
749
	const char *sep = symbol_conf.field_sep;
750
	const char *col_width = symbol_conf.col_width_list_str;
751 752 753

	init_rem_hits();

754 755
	fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");

756
	if (symbol_conf.show_nr_samples) {
757 758
		if (sep)
			fprintf(fp, "%cSamples", *sep);
759 760 761
		else
			fputs("  Samples  ", fp);
	}
762

763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
	if (symbol_conf.show_cpu_utilization) {
		if (sep) {
			ret += fprintf(fp, "%csys", *sep);
			ret += fprintf(fp, "%cus", *sep);
			if (perf_guest) {
				ret += fprintf(fp, "%cguest sys", *sep);
				ret += fprintf(fp, "%cguest us", *sep);
			}
		} else {
			ret += fprintf(fp, "  sys  ");
			ret += fprintf(fp, "  us  ");
			if (perf_guest) {
				ret += fprintf(fp, "  guest sys  ");
				ret += fprintf(fp, "  guest us  ");
			}
		}
	}

781 782 783 784 785 786 787 788 789 790 791 792 793 794
	if (pair) {
		if (sep)
			ret += fprintf(fp, "%cDelta", *sep);
		else
			ret += fprintf(fp, "  Delta    ");

		if (show_displacement) {
			if (sep)
				ret += fprintf(fp, "%cDisplacement", *sep);
			else
				ret += fprintf(fp, " Displ");
		}
	}

795 796 797
	list_for_each_entry(se, &hist_entry__sort_list, list) {
		if (se->elide)
			continue;
798
		if (sep) {
799
			fprintf(fp, "%c%s", *sep, se->se_header);
800 801
			continue;
		}
802
		width = strlen(se->se_header);
803 804 805 806 807 808 809
		if (symbol_conf.col_width_list_str) {
			if (col_width) {
				hists__set_col_len(self, se->se_width_idx,
						   atoi(col_width));
				col_width = strchr(col_width, ',');
				if (col_width)
					++col_width;
810 811
			}
		}
812 813
		if (!hists__new_col_len(self, se->se_width_idx, width))
			width = hists__col_len(self, se->se_width_idx);
814
		fprintf(fp, "  %*s", width, se->se_header);
815 816 817
	}
	fprintf(fp, "\n");

818
	if (sep)
819 820 821 822 823
		goto print_entries;

	fprintf(fp, "# ........");
	if (symbol_conf.show_nr_samples)
		fprintf(fp, " ..........");
824 825 826 827 828
	if (pair) {
		fprintf(fp, " ..........");
		if (show_displacement)
			fprintf(fp, " .....");
	}
829 830 831 832 833 834 835
	list_for_each_entry(se, &hist_entry__sort_list, list) {
		unsigned int i;

		if (se->elide)
			continue;

		fprintf(fp, "  ");
836 837
		width = hists__col_len(self, se->se_width_idx);
		if (width == 0)
838
			width = strlen(se->se_header);
839 840 841 842
		for (i = 0; i < width; i++)
			fprintf(fp, ".");
	}

843
	fprintf(fp, "\n#\n");
844 845

print_entries:
846
	for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
847 848 849 850 851 852 853 854 855 856
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (show_displacement) {
			if (h->pair != NULL)
				displacement = ((long)h->pair->position -
					        (long)position);
			else
				displacement = 0;
			++position;
		}
857
		ret += hist_entry__fprintf(h, self, pair, show_displacement,
858
					   displacement, fp, self->stats.total_period);
859 860

		if (symbol_conf.use_callchain)
861 862
			ret += hist_entry__fprintf_callchain(h, self, fp,
							     self->stats.total_period);
863
		if (h->ms.map == NULL && verbose > 1) {
864
			__map_groups__fprintf_maps(&h->thread->mg,
865
						   MAP__FUNCTION, verbose, fp);
866 867
			fprintf(fp, "%.10s end\n", graph_dotted_line);
		}
868 869 870 871 872 873
	}

	free(rem_sq_bracket);

	return ret;
}
874

875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
/*
 * See hists__fprintf to match the column widths
 */
unsigned int hists__sort_list_width(struct hists *self)
{
	struct sort_entry *se;
	int ret = 9; /* total % */

	if (symbol_conf.show_cpu_utilization) {
		ret += 7; /* count_sys % */
		ret += 6; /* count_us % */
		if (perf_guest) {
			ret += 13; /* count_guest_sys % */
			ret += 12; /* count_guest_us % */
		}
	}

	if (symbol_conf.show_nr_samples)
		ret += 11;

	list_for_each_entry(se, &hist_entry__sort_list, list)
		if (!se->elide)
			ret += 2 + hists__col_len(self, se->se_width_idx);

899 900 901
	if (verbose) /* Addr + origin */
		ret += 3 + BITS_PER_LONG / 4;

902 903 904
	return ret;
}

905 906 907 908 909 910 911 912
static void hists__remove_entry_filter(struct hists *self, struct hist_entry *h,
				       enum hist_filter filter)
{
	h->filtered &= ~(1 << filter);
	if (h->filtered)
		return;

	++self->nr_entries;
913 914 915
	if (h->ms.unfolded)
		self->nr_entries += h->nr_rows;
	h->row_offset = 0;
916 917 918
	self->stats.total_period += h->period;
	self->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;

919
	hists__calc_col_len(self, h);
920 921
}

922 923 924 925
void hists__filter_by_dso(struct hists *self, const struct dso *dso)
{
	struct rb_node *nd;

926
	self->nr_entries = self->stats.total_period = 0;
927
	self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
928
	hists__reset_col_len(self);
929 930 931 932 933 934 935 936 937 938 939 940

	for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (symbol_conf.exclude_other && !h->parent)
			continue;

		if (dso != NULL && (h->ms.map == NULL || h->ms.map->dso != dso)) {
			h->filtered |= (1 << HIST_FILTER__DSO);
			continue;
		}

941
		hists__remove_entry_filter(self, h, HIST_FILTER__DSO);
942 943 944 945 946 947 948
	}
}

void hists__filter_by_thread(struct hists *self, const struct thread *thread)
{
	struct rb_node *nd;

949
	self->nr_entries = self->stats.total_period = 0;
950
	self->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
951
	hists__reset_col_len(self);
952 953 954 955 956 957 958 959

	for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) {
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (thread != NULL && h->thread != thread) {
			h->filtered |= (1 << HIST_FILTER__THREAD);
			continue;
		}
960 961

		hists__remove_entry_filter(self, h, HIST_FILTER__THREAD);
962 963
	}
}
964

965
int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
966
{
967
	return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
968 969
}

970
int hist_entry__annotate(struct hist_entry *he, size_t privsize)
971
{
972
	return symbol__annotate(he->ms.sym, he->ms.map, privsize);
973
}
974 975 976

void hists__inc_nr_events(struct hists *self, u32 type)
{
977 978
	++self->stats.nr_events[0];
	++self->stats.nr_events[type];
979 980 981 982 983 984 985 986
}

size_t hists__fprintf_nr_events(struct hists *self, FILE *fp)
{
	int i;
	size_t ret = 0;

	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
987
		const char *name;
988

989 990 991 992
		if (self->stats.nr_events[i] == 0)
			continue;

		name = perf_event__name(i);
993
		if (!strcmp(name, "UNKNOWN"))
994
			continue;
995 996 997

		ret += fprintf(fp, "%16s events: %10d\n", name,
			       self->stats.nr_events[i]);
998 999 1000 1001
	}

	return ret;
}