hist.c 32.1 KB
Newer Older
1
#include "annotate.h"
2
#include "util.h"
3
#include "build-id.h"
4
#include "hist.h"
5 6
#include "session.h"
#include "sort.h"
7
#include <math.h>
8

9 10 11 12
static bool hists__filter_entry_by_dso(struct hists *hists,
				       struct hist_entry *he);
static bool hists__filter_entry_by_thread(struct hists *hists,
					  struct hist_entry *he);
13 14
static bool hists__filter_entry_by_symbol(struct hists *hists,
					  struct hist_entry *he);
15

16 17 18 19
enum hist_filter {
	HIST_FILTER__DSO,
	HIST_FILTER__THREAD,
	HIST_FILTER__PARENT,
20
	HIST_FILTER__SYMBOL,
21 22
};

23 24
struct callchain_param	callchain_param = {
	.mode	= CHAIN_GRAPH_REL,
25 26
	.min_percent = 0.5,
	.order  = ORDER_CALLEE
27 28
};

29
u16 hists__col_len(struct hists *hists, enum hist_column col)
30
{
31
	return hists->col_len[col];
32 33
}

34
void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
35
{
36
	hists->col_len[col] = len;
37 38
}

39
bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
40
{
41 42
	if (len > hists__col_len(hists, col)) {
		hists__set_col_len(hists, col, len);
43 44 45 46 47
		return true;
	}
	return false;
}

48
static void hists__reset_col_len(struct hists *hists)
49 50 51 52
{
	enum hist_column col;

	for (col = 0; col < HISTC_NR_COLS; ++col)
53
		hists__set_col_len(hists, col, 0);
54 55
}

56 57 58 59 60 61 62 63 64 65
static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
{
	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;

	if (hists__col_len(hists, dso) < unresolved_col_width &&
	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
	    !symbol_conf.dso_list)
		hists__set_col_len(hists, dso, unresolved_col_width);
}

66
static void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
67
{
68
	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
69 70 71
	u16 len;

	if (h->ms.sym)
72 73 74
		hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
	else
		hists__set_unres_dso_col_len(hists, HISTC_DSO);
75 76

	len = thread__comm_len(h->thread);
77 78
	if (hists__new_col_len(hists, HISTC_COMM, len))
		hists__set_col_len(hists, HISTC_THREAD, len + 6);
79 80 81

	if (h->ms.map) {
		len = dso__name_len(h->ms.map->dso);
82
		hists__new_col_len(hists, HISTC_DSO, len);
83
	}
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114

	if (h->branch_info) {
		int symlen;
		/*
		 * +4 accounts for '[x] ' priv level info
		 * +2 account of 0x prefix on raw addresses
		 */
		if (h->branch_info->from.sym) {
			symlen = (int)h->branch_info->from.sym->namelen + 4;
			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);

			symlen = dso__name_len(h->branch_info->from.map->dso);
			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
		}

		if (h->branch_info->to.sym) {
			symlen = (int)h->branch_info->to.sym->namelen + 4;
			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);

			symlen = dso__name_len(h->branch_info->to.map->dso);
			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
		}
	}
115 116
}

117
static void hist_entry__add_cpumode_period(struct hist_entry *he,
118
					   unsigned int cpumode, u64 period)
119
{
120
	switch (cpumode) {
121
	case PERF_RECORD_MISC_KERNEL:
122
		he->period_sys += period;
123 124
		break;
	case PERF_RECORD_MISC_USER:
125
		he->period_us += period;
126 127
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
128
		he->period_guest_sys += period;
129 130
		break;
	case PERF_RECORD_MISC_GUEST_USER:
131
		he->period_guest_us += period;
132 133 134 135 136 137
		break;
	default:
		break;
	}
}

138 139 140 141 142 143 144 145
static void hist_entry__decay(struct hist_entry *he)
{
	he->period = (he->period * 7) / 8;
	he->nr_events = (he->nr_events * 7) / 8;
}

static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
{
146 147 148
	u64 prev_period = he->period;

	if (prev_period == 0)
149
		return true;
150

151
	hist_entry__decay(he);
152 153 154 155

	if (!he->filtered)
		hists->stats.total_period -= prev_period - he->period;

156 157 158
	return he->period == 0;
}

159 160
static void __hists__decay_entries(struct hists *hists, bool zap_user,
				   bool zap_kernel, bool threaded)
161 162 163 164 165 166 167
{
	struct rb_node *next = rb_first(&hists->entries);
	struct hist_entry *n;

	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		next = rb_next(&n->rb_node);
168 169 170 171 172
		/*
		 * We may be annotating this, for instance, so keep it here in
		 * case some it gets new samples, we'll eventually free it when
		 * the user stops browsing and it agains gets fully decayed.
		 */
173 174 175 176
		if (((zap_user && n->level == '.') ||
		     (zap_kernel && n->level != '.') ||
		     hists__decay_entry(hists, n)) &&
		    !n->used) {
177 178
			rb_erase(&n->rb_node, &hists->entries);

179
			if (sort__need_collapse || threaded)
180 181 182 183 184 185 186 187
				rb_erase(&n->rb_node_in, &hists->entries_collapsed);

			hist_entry__free(n);
			--hists->nr_entries;
		}
	}
}

188
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
189
{
190
	return __hists__decay_entries(hists, zap_user, zap_kernel, false);
191 192
}

193 194
void hists__decay_entries_threaded(struct hists *hists,
				   bool zap_user, bool zap_kernel)
195
{
196
	return __hists__decay_entries(hists, zap_user, zap_kernel, true);
197 198
}

199
/*
200
 * histogram, sorted on item, collects periods
201 202
 */

203 204
static struct hist_entry *hist_entry__new(struct hist_entry *template)
{
205
	size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
206
	struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
207

208 209 210 211 212
	if (he != NULL) {
		*he = *template;
		he->nr_events = 1;
		if (he->ms.map)
			he->ms.map->referenced = true;
213
		if (symbol_conf.use_callchain)
214
			callchain_init(he->callchain);
215 216
	}

217
	return he;
218 219
}

220
static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
221
{
222
	if (!h->filtered) {
223 224
		hists__calc_col_len(hists, h);
		++hists->nr_entries;
225
		hists->stats.total_period += h->period;
226
	}
227 228
}

229 230 231 232 233 234 235
static u8 symbol__parent_filter(const struct symbol *parent)
{
	if (symbol_conf.exclude_other && parent == NULL)
		return 1 << HIST_FILTER__PARENT;
	return 0;
}

236 237
static struct hist_entry *add_hist_entry(struct hists *hists,
				      struct hist_entry *entry,
238
				      struct addr_location *al,
239
				      u64 period)
240
{
241
	struct rb_node **p;
242 243 244 245
	struct rb_node *parent = NULL;
	struct hist_entry *he;
	int cmp;

246 247 248 249
	pthread_mutex_lock(&hists->lock);

	p = &hists->entries_in->rb_node;

250 251
	while (*p != NULL) {
		parent = *p;
252
		he = rb_entry(parent, struct hist_entry, rb_node_in);
253

254
		cmp = hist_entry__cmp(entry, he);
255 256

		if (!cmp) {
257 258
			he->period += period;
			++he->nr_events;
259 260 261 262 263 264 265 266 267 268 269 270

			/* If the map of an existing hist_entry has
			 * become out-of-date due to an exec() or
			 * similar, update it.  Otherwise we will
			 * mis-adjust symbol addresses when computing
			 * the history counter to increment.
			 */
			if (he->ms.map != entry->ms.map) {
				he->ms.map = entry->ms.map;
				if (he->ms.map)
					he->ms.map->referenced = true;
			}
271
			goto out;
272 273 274 275 276 277 278 279
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

280
	he = hist_entry__new(entry);
281
	if (!he)
282 283 284 285
		goto out_unlock;

	rb_link_node(&he->rb_node_in, parent, p);
	rb_insert_color(&he->rb_node_in, hists->entries_in);
286
out:
287
	hist_entry__add_cpumode_period(he, al->cpumode, period);
288 289
out_unlock:
	pthread_mutex_unlock(&hists->lock);
290 291 292
	return he;
}

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
struct hist_entry *__hists__add_branch_entry(struct hists *self,
					     struct addr_location *al,
					     struct symbol *sym_parent,
					     struct branch_info *bi,
					     u64 period)
{
	struct hist_entry entry = {
		.thread	= al->thread,
		.ms = {
			.map	= bi->to.map,
			.sym	= bi->to.sym,
		},
		.cpu	= al->cpu,
		.ip	= bi->to.addr,
		.level	= al->level,
		.period	= period,
		.parent = sym_parent,
		.filtered = symbol__parent_filter(sym_parent),
		.branch_info = bi,
	};

	return add_hist_entry(self, &entry, al, period);
}

struct hist_entry *__hists__add_entry(struct hists *self,
				      struct addr_location *al,
				      struct symbol *sym_parent, u64 period)
{
	struct hist_entry entry = {
		.thread	= al->thread,
		.ms = {
			.map	= al->map,
			.sym	= al->sym,
		},
		.cpu	= al->cpu,
		.ip	= al->addr,
		.level	= al->level,
		.period	= period,
		.parent = sym_parent,
		.filtered = symbol__parent_filter(sym_parent),
	};

	return add_hist_entry(self, &entry, al, period);
}

338 339 340 341 342 343 344
int64_t
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
345
		cmp = se->se_cmp(left, right);
346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
		if (cmp)
			break;
	}

	return cmp;
}

int64_t
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
		int64_t (*f)(struct hist_entry *, struct hist_entry *);

362
		f = se->se_collapse ?: se->se_cmp;
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380

		cmp = f(left, right);
		if (cmp)
			break;
	}

	return cmp;
}

void hist_entry__free(struct hist_entry *he)
{
	free(he);
}

/*
 * collapse the histogram
 */

381
static bool hists__collapse_insert_entry(struct hists *hists,
382 383
					 struct rb_root *root,
					 struct hist_entry *he)
384
{
385
	struct rb_node **p = &root->rb_node;
386 387 388 389 390 391
	struct rb_node *parent = NULL;
	struct hist_entry *iter;
	int64_t cmp;

	while (*p != NULL) {
		parent = *p;
392
		iter = rb_entry(parent, struct hist_entry, rb_node_in);
393 394 395 396

		cmp = hist_entry__collapse(iter, he);

		if (!cmp) {
397
			iter->period += he->period;
398
			iter->nr_events += he->nr_events;
399
			if (symbol_conf.use_callchain) {
400 401
				callchain_cursor_reset(&hists->callchain_cursor);
				callchain_merge(&hists->callchain_cursor, iter->callchain,
402 403
						he->callchain);
			}
404
			hist_entry__free(he);
405
			return false;
406 407 408 409 410 411 412 413
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

414 415
	rb_link_node(&he->rb_node_in, parent, p);
	rb_insert_color(&he->rb_node_in, root);
416
	return true;
417 418
}

419
static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
420
{
421 422 423 424 425 426 427 428 429 430 431 432 433
	struct rb_root *root;

	pthread_mutex_lock(&hists->lock);

	root = hists->entries_in;
	if (++hists->entries_in > &hists->entries_in_array[1])
		hists->entries_in = &hists->entries_in_array[0];

	pthread_mutex_unlock(&hists->lock);

	return root;
}

434 435 436 437
static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
{
	hists__filter_entry_by_dso(hists, he);
	hists__filter_entry_by_thread(hists, he);
438
	hists__filter_entry_by_symbol(hists, he);
439 440
}

441 442 443
static void __hists__collapse_resort(struct hists *hists, bool threaded)
{
	struct rb_root *root;
444 445 446
	struct rb_node *next;
	struct hist_entry *n;

447
	if (!sort__need_collapse && !threaded)
448 449
		return;

450 451
	root = hists__get_rotate_entries_in(hists);
	next = rb_first(root);
452

453
	while (next) {
454 455
		n = rb_entry(next, struct hist_entry, rb_node_in);
		next = rb_next(&n->rb_node_in);
456

457
		rb_erase(&n->rb_node_in, root);
458 459 460 461 462 463 464 465
		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
			/*
			 * If it wasn't combined with one of the entries already
			 * collapsed, we need to apply the filters that may have
			 * been set by, say, the hist_browser.
			 */
			hists__apply_filters(hists, n);
		}
466
	}
467
}
468

469 470 471 472 473 474 475 476
void hists__collapse_resort(struct hists *hists)
{
	return __hists__collapse_resort(hists, false);
}

void hists__collapse_resort_threaded(struct hists *hists)
{
	return __hists__collapse_resort(hists, true);
477 478 479
}

/*
480
 * reverse the map, sort on period.
481 482
 */

483 484 485
static void __hists__insert_output_entry(struct rb_root *entries,
					 struct hist_entry *he,
					 u64 min_callchain_hits)
486
{
487
	struct rb_node **p = &entries->rb_node;
488 489 490
	struct rb_node *parent = NULL;
	struct hist_entry *iter;

491
	if (symbol_conf.use_callchain)
492
		callchain_param.sort(&he->sorted_chain, he->callchain,
493 494 495 496 497 498
				      min_callchain_hits, &callchain_param);

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct hist_entry, rb_node);

499
		if (he->period > iter->period)
500 501 502 503 504 505
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&he->rb_node, parent, p);
506
	rb_insert_color(&he->rb_node, entries);
507 508
}

509
static void __hists__output_resort(struct hists *hists, bool threaded)
510
{
511
	struct rb_root *root;
512 513 514 515
	struct rb_node *next;
	struct hist_entry *n;
	u64 min_callchain_hits;

516
	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
517

518 519 520 521 522 523 524
	if (sort__need_collapse || threaded)
		root = &hists->entries_collapsed;
	else
		root = hists->entries_in;

	next = rb_first(root);
	hists->entries = RB_ROOT;
525

526
	hists->nr_entries = 0;
527
	hists->stats.total_period = 0;
528
	hists__reset_col_len(hists);
529

530
	while (next) {
531 532
		n = rb_entry(next, struct hist_entry, rb_node_in);
		next = rb_next(&n->rb_node_in);
533

534
		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
535
		hists__inc_nr_entries(hists, n);
536
	}
537
}
538

539 540 541 542 543 544 545 546
void hists__output_resort(struct hists *hists)
{
	return __hists__output_resort(hists, false);
}

void hists__output_resort_threaded(struct hists *hists)
{
	return __hists__output_resort(hists, true);
547
}
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577

static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
{
	int i;
	int ret = fprintf(fp, "            ");

	for (i = 0; i < left_margin; i++)
		ret += fprintf(fp, " ");

	return ret;
}

static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
					  int left_margin)
{
	int i;
	size_t ret = callchain__fprintf_left_margin(fp, left_margin);

	for (i = 0; i < depth; i++)
		if (depth_mask & (1 << i))
			ret += fprintf(fp, "|          ");
		else
			ret += fprintf(fp, "           ");

	ret += fprintf(fp, "\n");

	return ret;
}

static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain,
578
				     int depth, int depth_mask, int period,
579
				     u64 total_samples, u64 hits,
580 581 582 583 584 585 586 587 588 589 590
				     int left_margin)
{
	int i;
	size_t ret = 0;

	ret += callchain__fprintf_left_margin(fp, left_margin);
	for (i = 0; i < depth; i++) {
		if (depth_mask & (1 << i))
			ret += fprintf(fp, "|");
		else
			ret += fprintf(fp, " ");
591
		if (!period && i == depth - 1) {
592 593 594 595 596 597 598
			double percent;

			percent = hits * 100.0 / total_samples;
			ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent);
		} else
			ret += fprintf(fp, "%s", "          ");
	}
599 600
	if (chain->ms.sym)
		ret += fprintf(fp, "%s\n", chain->ms.sym->name);
601
	else
602
		ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip);
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618

	return ret;
}

static struct symbol *rem_sq_bracket;
static struct callchain_list rem_hits;

static void init_rem_hits(void)
{
	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
	if (!rem_sq_bracket) {
		fprintf(stderr, "Not enough memory to display remaining hits\n");
		return;
	}

	strcpy(rem_sq_bracket->name, "[...]");
619
	rem_hits.ms.sym = rem_sq_bracket;
620 621
}

622
static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
623 624 625 626 627 628 629 630 631 632
					 u64 total_samples, int depth,
					 int depth_mask, int left_margin)
{
	struct rb_node *node, *next;
	struct callchain_node *child;
	struct callchain_list *chain;
	int new_depth_mask = depth_mask;
	u64 remaining;
	size_t ret = 0;
	int i;
633
	uint entries_printed = 0;
634

635
	remaining = total_samples;
636

637
	node = rb_first(root);
638
	while (node) {
639
		u64 new_total;
640 641 642
		u64 cumul;

		child = rb_entry(node, struct callchain_node, rb_node);
643
		cumul = callchain_cumul_hits(child);
644 645 646 647 648 649 650 651 652 653 654 655 656 657
		remaining -= cumul;

		/*
		 * The depth mask manages the output of pipes that show
		 * the depth. We don't want to keep the pipes of the current
		 * level for the last child of this depth.
		 * Except if we have remaining filtered hits. They will
		 * supersede the last child
		 */
		next = rb_next(node);
		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
			new_depth_mask &= ~(1 << (depth - 1));

		/*
D
Daniel Mack 已提交
658
		 * But we keep the older depth mask for the line separator
659 660 661 662 663 664 665 666
		 * to keep the level link until we reach the last child
		 */
		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
						   left_margin);
		i = 0;
		list_for_each_entry(chain, &child->val, list) {
			ret += ipchain__fprintf_graph(fp, chain, depth,
						      new_depth_mask, i++,
667
						      total_samples,
668 669 670
						      cumul,
						      left_margin);
		}
671 672 673 674 675 676 677

		if (callchain_param.mode == CHAIN_GRAPH_REL)
			new_total = child->children_hit;
		else
			new_total = total_samples;

		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
678 679 680 681
						  depth + 1,
						  new_depth_mask | (1 << depth),
						  left_margin);
		node = next;
682 683
		if (++entries_printed == callchain_param.print_limit)
			break;
684 685 686
	}

	if (callchain_param.mode == CHAIN_GRAPH_REL &&
687
		remaining && remaining != total_samples) {
688 689 690 691 692 693

		if (!rem_sq_bracket)
			return ret;

		new_depth_mask &= ~(1 << (depth - 1));
		ret += ipchain__fprintf_graph(fp, &rem_hits, depth,
694
					      new_depth_mask, 0, total_samples,
695 696 697 698 699 700
					      remaining, left_margin);
	}

	return ret;
}

701
static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
702 703
				       u64 total_samples, int left_margin)
{
704
	struct callchain_node *cnode;
705
	struct callchain_list *chain;
706
	u32 entries_printed = 0;
707
	bool printed = false;
708
	struct rb_node *node;
709
	int i = 0;
710
	int ret;
711

712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
	/*
	 * If have one single callchain root, don't bother printing
	 * its percentage (100 % in fractal mode and the same percentage
	 * than the hist in graph mode). This also avoid one level of column.
	 */
	node = rb_first(root);
	if (node && !rb_next(node)) {
		cnode = rb_entry(node, struct callchain_node, rb_node);
		list_for_each_entry(chain, &cnode->val, list) {
			/*
			 * If we sort by symbol, the first entry is the same than
			 * the symbol. No need to print it otherwise it appears as
			 * displayed twice.
			 */
			if (!i++ && sort__first_dimension == SORT_SYM)
				continue;
			if (!printed) {
				ret += callchain__fprintf_left_margin(fp, left_margin);
				ret += fprintf(fp, "|\n");
				ret += callchain__fprintf_left_margin(fp, left_margin);
				ret += fprintf(fp, "---");
				left_margin += 3;
				printed = true;
			} else
				ret += callchain__fprintf_left_margin(fp, left_margin);

			if (chain->ms.sym)
				ret += fprintf(fp, " %s\n", chain->ms.sym->name);
			else
				ret += fprintf(fp, " %p\n", (void *)(long)chain->ip);

			if (++entries_printed == callchain_param.print_limit)
				break;
		}
		root = &cnode->rb_root;
747 748
	}

749 750
	return __callchain__fprintf_graph(fp, root, total_samples,
					  1, 1, left_margin);
751 752
}

753 754 755
static size_t __callchain__fprintf_flat(FILE *fp,
					struct callchain_node *self,
					u64 total_samples)
756 757 758 759 760 761 762
{
	struct callchain_list *chain;
	size_t ret = 0;

	if (!self)
		return 0;

763
	ret += __callchain__fprintf_flat(fp, self->parent, total_samples);
764 765 766 767 768


	list_for_each_entry(chain, &self->val, list) {
		if (chain->ip >= PERF_CONTEXT_MAX)
			continue;
769 770
		if (chain->ms.sym)
			ret += fprintf(fp, "                %s\n", chain->ms.sym->name);
771 772 773 774 775 776 777 778
		else
			ret += fprintf(fp, "                %p\n",
					(void *)(long)chain->ip);
	}

	return ret;
}

779 780
static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self,
				      u64 total_samples)
781 782
{
	size_t ret = 0;
783
	u32 entries_printed = 0;
784 785
	struct rb_node *rb_node;
	struct callchain_node *chain;
786

787
	rb_node = rb_first(self);
788 789 790 791 792
	while (rb_node) {
		double percent;

		chain = rb_entry(rb_node, struct callchain_node, rb_node);
		percent = chain->hit * 100.0 / total_samples;
793 794 795

		ret = percent_color_fprintf(fp, "           %6.2f%%\n", percent);
		ret += __callchain__fprintf_flat(fp, chain, total_samples);
796
		ret += fprintf(fp, "\n");
797 798
		if (++entries_printed == callchain_param.print_limit)
			break;
799

800 801 802 803 804 805
		rb_node = rb_next(rb_node);
	}

	return ret;
}

806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830
static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
					    u64 total_samples, int left_margin,
					    FILE *fp)
{
	switch (callchain_param.mode) {
	case CHAIN_GRAPH_REL:
		return callchain__fprintf_graph(fp, &he->sorted_chain, he->period,
						left_margin);
		break;
	case CHAIN_GRAPH_ABS:
		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
						left_margin);
		break;
	case CHAIN_FLAT:
		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
		break;
	case CHAIN_NONE:
		break;
	default:
		pr_err("Bad callchain mode\n");
	}

	return 0;
}

831 832 833 834 835 836 837 838 839 840
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
{
	struct rb_node *next = rb_first(&hists->entries);
	struct hist_entry *n;
	int row = 0;

	hists__reset_col_len(hists);

	while (next && row++ < max_rows) {
		n = rb_entry(next, struct hist_entry, rb_node);
841 842
		if (!n->filtered)
			hists__calc_col_len(hists, n);
843 844 845 846
		next = rb_next(&n->rb_node);
	}
}

847
static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s,
848 849
				     size_t size, struct hists *pair_hists,
				     bool show_displacement, long displacement,
850
				     bool color, u64 total_period)
851
{
852
	u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
853
	u64 nr_events;
854
	const char *sep = symbol_conf.field_sep;
855
	int ret;
856

857
	if (symbol_conf.exclude_other && !he->parent)
858 859
		return 0;

860
	if (pair_hists) {
861 862
		period = he->pair ? he->pair->period : 0;
		nr_events = he->pair ? he->pair->nr_events : 0;
863
		total = pair_hists->stats.total_period;
864 865 866 867
		period_sys = he->pair ? he->pair->period_sys : 0;
		period_us = he->pair ? he->pair->period_us : 0;
		period_guest_sys = he->pair ? he->pair->period_guest_sys : 0;
		period_guest_us = he->pair ? he->pair->period_guest_us : 0;
868
	} else {
869 870
		period = he->period;
		nr_events = he->nr_events;
871
		total = total_period;
872 873 874 875
		period_sys = he->period_sys;
		period_us = he->period_us;
		period_guest_sys = he->period_guest_sys;
		period_guest_us = he->period_guest_us;
876 877
	}

878 879 880 881
	if (total) {
		if (color)
			ret = percent_color_snprintf(s, size,
						     sep ? "%.2f" : "   %6.2f%%",
882
						     (period * 100.0) / total);
883
		else
884
			ret = scnprintf(s, size, sep ? "%.2f" : "   %6.2f%%",
885
				       (period * 100.0) / total);
886 887 888
		if (symbol_conf.show_cpu_utilization) {
			ret += percent_color_snprintf(s + ret, size - ret,
					sep ? "%.2f" : "   %6.2f%%",
889
					(period_sys * 100.0) / total);
890 891
			ret += percent_color_snprintf(s + ret, size - ret,
					sep ? "%.2f" : "   %6.2f%%",
892
					(period_us * 100.0) / total);
893 894 895 896
			if (perf_guest) {
				ret += percent_color_snprintf(s + ret,
						size - ret,
						sep ? "%.2f" : "   %6.2f%%",
897
						(period_guest_sys * 100.0) /
898 899 900 901
								total);
				ret += percent_color_snprintf(s + ret,
						size - ret,
						sep ? "%.2f" : "   %6.2f%%",
902
						(period_guest_us * 100.0) /
903 904 905
								total);
			}
		}
906
	} else
907
		ret = scnprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period);
908 909

	if (symbol_conf.show_nr_samples) {
910
		if (sep)
911
			ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
912
		else
913
			ret += scnprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
914 915
	}

916 917
	if (symbol_conf.show_total_period) {
		if (sep)
918
			ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period);
919
		else
920
			ret += scnprintf(s + ret, size - ret, " %12" PRIu64, period);
921 922
	}

923
	if (pair_hists) {
924 925 926 927
		char bf[32];
		double old_percent = 0, new_percent = 0, diff;

		if (total > 0)
928
			old_percent = (period * 100.0) / total;
929
		if (total_period > 0)
930
			new_percent = (he->period * 100.0) / total_period;
931

932
		diff = new_percent - old_percent;
933

934
		if (fabs(diff) >= 0.01)
935
			scnprintf(bf, sizeof(bf), "%+4.2F%%", diff);
936
		else
937
			scnprintf(bf, sizeof(bf), " ");
938 939

		if (sep)
940
			ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
941
		else
942
			ret += scnprintf(s + ret, size - ret, "%11.11s", bf);
943 944 945

		if (show_displacement) {
			if (displacement)
946
				scnprintf(bf, sizeof(bf), "%+4ld", displacement);
947
			else
948
				scnprintf(bf, sizeof(bf), " ");
949 950

			if (sep)
951
				ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf);
952
			else
953
				ret += scnprintf(s + ret, size - ret, "%6.6s", bf);
954
		}
955 956
	}

957 958 959 960 961 962 963 964 965 966
	return ret;
}

int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size,
			 struct hists *hists)
{
	const char *sep = symbol_conf.field_sep;
	struct sort_entry *se;
	int ret = 0;

967 968 969 970
	list_for_each_entry(se, &hist_entry__sort_list, list) {
		if (se->elide)
			continue;

971
		ret += scnprintf(s + ret, size - ret, "%s", sep ?: "  ");
972
		ret += se->se_snprintf(he, s + ret, size - ret,
973
				       hists__col_len(hists, se->se_width_idx));
974 975
	}

976 977 978
	return ret;
}

979 980 981 982
static int hist_entry__fprintf(struct hist_entry *he, size_t size,
			       struct hists *hists, struct hists *pair_hists,
			       bool show_displacement, long displacement,
			       u64 total_period, FILE *fp)
983 984
{
	char bf[512];
985
	int ret;
986 987 988 989

	if (size == 0 || size > sizeof(bf))
		size = sizeof(bf);

990 991
	ret = hist_entry__pcnt_snprintf(he, bf, size, pair_hists,
					show_displacement, displacement,
992
					true, total_period);
993
	hist_entry__snprintf(he, bf + ret, size - ret, hists);
994
	return fprintf(fp, "%s\n", bf);
995
}
996

997 998 999
static size_t hist_entry__fprintf_callchain(struct hist_entry *he,
					    struct hists *hists,
					    u64 total_period, FILE *fp)
1000 1001
{
	int left_margin = 0;
1002

1003 1004 1005
	if (sort__first_dimension == SORT_COMM) {
		struct sort_entry *se = list_first_entry(&hist_entry__sort_list,
							 typeof(*se), list);
1006
		left_margin = hists__col_len(hists, se->se_width_idx);
1007
		left_margin -= thread__comm_len(he->thread);
1008 1009
	}

1010
	return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
1011 1012
}

1013
size_t hists__fprintf(struct hists *hists, struct hists *pair,
1014 1015
		      bool show_displacement, bool show_header, int max_rows,
		      int max_cols, FILE *fp)
1016 1017 1018 1019
{
	struct sort_entry *se;
	struct rb_node *nd;
	size_t ret = 0;
1020
	u64 total_period;
1021 1022
	unsigned long position = 1;
	long displacement = 0;
1023
	unsigned int width;
1024
	const char *sep = symbol_conf.field_sep;
1025
	const char *col_width = symbol_conf.col_width_list_str;
1026
	int nr_rows = 0;
1027 1028 1029

	init_rem_hits();

1030 1031 1032
	if (!show_header)
		goto print_entries;

1033 1034
	fprintf(fp, "# %s", pair ? "Baseline" : "Overhead");

1035 1036 1037 1038 1039 1040 1041 1042 1043
	if (symbol_conf.show_cpu_utilization) {
		if (sep) {
			ret += fprintf(fp, "%csys", *sep);
			ret += fprintf(fp, "%cus", *sep);
			if (perf_guest) {
				ret += fprintf(fp, "%cguest sys", *sep);
				ret += fprintf(fp, "%cguest us", *sep);
			}
		} else {
1044 1045
			ret += fprintf(fp, "     sys  ");
			ret += fprintf(fp, "      us  ");
1046 1047 1048 1049 1050 1051 1052
			if (perf_guest) {
				ret += fprintf(fp, "  guest sys  ");
				ret += fprintf(fp, "  guest us  ");
			}
		}
	}

1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
	if (symbol_conf.show_nr_samples) {
		if (sep)
			fprintf(fp, "%cSamples", *sep);
		else
			fputs("  Samples  ", fp);
	}

	if (symbol_conf.show_total_period) {
		if (sep)
			ret += fprintf(fp, "%cPeriod", *sep);
		else
			ret += fprintf(fp, "   Period    ");
	}

1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
	if (pair) {
		if (sep)
			ret += fprintf(fp, "%cDelta", *sep);
		else
			ret += fprintf(fp, "  Delta    ");

		if (show_displacement) {
			if (sep)
				ret += fprintf(fp, "%cDisplacement", *sep);
			else
				ret += fprintf(fp, " Displ");
		}
	}

1081 1082 1083
	list_for_each_entry(se, &hist_entry__sort_list, list) {
		if (se->elide)
			continue;
1084
		if (sep) {
1085
			fprintf(fp, "%c%s", *sep, se->se_header);
1086 1087
			continue;
		}
1088
		width = strlen(se->se_header);
1089 1090
		if (symbol_conf.col_width_list_str) {
			if (col_width) {
1091
				hists__set_col_len(hists, se->se_width_idx,
1092 1093 1094 1095
						   atoi(col_width));
				col_width = strchr(col_width, ',');
				if (col_width)
					++col_width;
1096 1097
			}
		}
1098 1099
		if (!hists__new_col_len(hists, se->se_width_idx, width))
			width = hists__col_len(hists, se->se_width_idx);
1100
		fprintf(fp, "  %*s", width, se->se_header);
1101
	}
1102

1103
	fprintf(fp, "\n");
1104 1105
	if (max_rows && ++nr_rows >= max_rows)
		goto out;
1106

1107
	if (sep)
1108 1109 1110
		goto print_entries;

	fprintf(fp, "# ........");
1111 1112
	if (symbol_conf.show_cpu_utilization)
		fprintf(fp, "   .......   .......");
1113 1114
	if (symbol_conf.show_nr_samples)
		fprintf(fp, " ..........");
1115 1116
	if (symbol_conf.show_total_period)
		fprintf(fp, " ............");
1117 1118 1119 1120 1121
	if (pair) {
		fprintf(fp, " ..........");
		if (show_displacement)
			fprintf(fp, " .....");
	}
1122 1123 1124 1125 1126 1127 1128
	list_for_each_entry(se, &hist_entry__sort_list, list) {
		unsigned int i;

		if (se->elide)
			continue;

		fprintf(fp, "  ");
1129
		width = hists__col_len(hists, se->se_width_idx);
1130
		if (width == 0)
1131
			width = strlen(se->se_header);
1132 1133 1134 1135
		for (i = 0; i < width; i++)
			fprintf(fp, ".");
	}

1136 1137 1138 1139 1140 1141 1142
	fprintf(fp, "\n");
	if (max_rows && ++nr_rows >= max_rows)
		goto out;

	fprintf(fp, "#\n");
	if (max_rows && ++nr_rows >= max_rows)
		goto out;
1143 1144

print_entries:
1145 1146
	total_period = hists->stats.total_period;

1147
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1148 1149
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

1150 1151 1152
		if (h->filtered)
			continue;

1153 1154 1155 1156 1157 1158 1159 1160
		if (show_displacement) {
			if (h->pair != NULL)
				displacement = ((long)h->pair->position -
					        (long)position);
			else
				displacement = 0;
			++position;
		}
1161
		ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement,
1162
					   displacement, total_period, fp);
1163 1164

		if (symbol_conf.use_callchain)
1165
			ret += hist_entry__fprintf_callchain(h, hists, total_period, fp);
1166 1167 1168
		if (max_rows && ++nr_rows >= max_rows)
			goto out;

1169
		if (h->ms.map == NULL && verbose > 1) {
1170
			__map_groups__fprintf_maps(&h->thread->mg,
1171
						   MAP__FUNCTION, verbose, fp);
1172 1173
			fprintf(fp, "%.10s end\n", graph_dotted_line);
		}
1174
	}
1175
out:
1176 1177 1178 1179
	free(rem_sq_bracket);

	return ret;
}
1180

1181 1182 1183
/*
 * See hists__fprintf to match the column widths
 */
1184
unsigned int hists__sort_list_width(struct hists *hists)
1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
{
	struct sort_entry *se;
	int ret = 9; /* total % */

	if (symbol_conf.show_cpu_utilization) {
		ret += 7; /* count_sys % */
		ret += 6; /* count_us % */
		if (perf_guest) {
			ret += 13; /* count_guest_sys % */
			ret += 12; /* count_guest_us % */
		}
	}

	if (symbol_conf.show_nr_samples)
		ret += 11;

1201 1202 1203
	if (symbol_conf.show_total_period)
		ret += 13;

1204 1205
	list_for_each_entry(se, &hist_entry__sort_list, list)
		if (!se->elide)
1206
			ret += 2 + hists__col_len(hists, se->se_width_idx);
1207

1208 1209 1210
	if (verbose) /* Addr + origin */
		ret += 3 + BITS_PER_LONG / 4;

1211 1212 1213
	return ret;
}

1214
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1215 1216 1217 1218 1219 1220
				       enum hist_filter filter)
{
	h->filtered &= ~(1 << filter);
	if (h->filtered)
		return;

1221
	++hists->nr_entries;
1222
	if (h->ms.unfolded)
1223
		hists->nr_entries += h->nr_rows;
1224
	h->row_offset = 0;
1225 1226
	hists->stats.total_period += h->period;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
1227

1228
	hists__calc_col_len(hists, h);
1229 1230
}

1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243

static bool hists__filter_entry_by_dso(struct hists *hists,
				       struct hist_entry *he)
{
	if (hists->dso_filter != NULL &&
	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
		he->filtered |= (1 << HIST_FILTER__DSO);
		return true;
	}

	return false;
}

1244
void hists__filter_by_dso(struct hists *hists)
1245 1246 1247
{
	struct rb_node *nd;

1248 1249 1250
	hists->nr_entries = hists->stats.total_period = 0;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
	hists__reset_col_len(hists);
1251

1252
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1253 1254 1255 1256 1257
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (symbol_conf.exclude_other && !h->parent)
			continue;

1258
		if (hists__filter_entry_by_dso(hists, h))
1259 1260
			continue;

1261
		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1262 1263 1264
	}
}

1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
static bool hists__filter_entry_by_thread(struct hists *hists,
					  struct hist_entry *he)
{
	if (hists->thread_filter != NULL &&
	    he->thread != hists->thread_filter) {
		he->filtered |= (1 << HIST_FILTER__THREAD);
		return true;
	}

	return false;
}

1277
void hists__filter_by_thread(struct hists *hists)
1278 1279 1280
{
	struct rb_node *nd;

1281 1282 1283
	hists->nr_entries = hists->stats.total_period = 0;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
	hists__reset_col_len(hists);
1284

1285
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1286 1287
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

1288
		if (hists__filter_entry_by_thread(hists, h))
1289
			continue;
1290

1291
		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1292 1293
	}
}
1294

1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
static bool hists__filter_entry_by_symbol(struct hists *hists,
					  struct hist_entry *he)
{
	if (hists->symbol_filter_str != NULL &&
	    (!he->ms.sym || strstr(he->ms.sym->name,
				   hists->symbol_filter_str) == NULL)) {
		he->filtered |= (1 << HIST_FILTER__SYMBOL);
		return true;
	}

	return false;
}

void hists__filter_by_symbol(struct hists *hists)
{
	struct rb_node *nd;

	hists->nr_entries = hists->stats.total_period = 0;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
	hists__reset_col_len(hists);

	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (hists__filter_entry_by_symbol(hists, h))
			continue;

		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
	}
}

1326
int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
1327
{
1328
	return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
1329 1330
}

1331
int hist_entry__annotate(struct hist_entry *he, size_t privsize)
1332
{
1333
	return symbol__annotate(he->ms.sym, he->ms.map, privsize);
1334
}
1335

1336
void hists__inc_nr_events(struct hists *hists, u32 type)
1337
{
1338 1339
	++hists->stats.nr_events[0];
	++hists->stats.nr_events[type];
1340 1341
}

1342
size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
1343 1344 1345 1346 1347
{
	int i;
	size_t ret = 0;

	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
1348
		const char *name;
1349

1350
		if (hists->stats.nr_events[i] == 0)
1351 1352 1353
			continue;

		name = perf_event__name(i);
1354
		if (!strcmp(name, "UNKNOWN"))
1355
			continue;
1356 1357

		ret += fprintf(fp, "%16s events: %10d\n", name,
1358
			       hists->stats.nr_events[i]);
1359 1360 1361 1362
	}

	return ret;
}