hist.c 28.4 KB
Newer Older
1
#include "util.h"
2
#include "build-id.h"
3
#include "hist.h"
4 5
#include "session.h"
#include "sort.h"
6
#include "evsel.h"
7
#include "annotate.h"
8
#include <math.h>
9

10 11 12 13
static bool hists__filter_entry_by_dso(struct hists *hists,
				       struct hist_entry *he);
static bool hists__filter_entry_by_thread(struct hists *hists,
					  struct hist_entry *he);
14 15
static bool hists__filter_entry_by_symbol(struct hists *hists,
					  struct hist_entry *he);
16

17 18
struct callchain_param	callchain_param = {
	.mode	= CHAIN_GRAPH_REL,
19
	.min_percent = 0.5,
20 21
	.order  = ORDER_CALLEE,
	.key	= CCKEY_FUNCTION
22 23
};

24
u16 hists__col_len(struct hists *hists, enum hist_column col)
25
{
26
	return hists->col_len[col];
27 28
}

29
void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
30
{
31
	hists->col_len[col] = len;
32 33
}

34
bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
35
{
36 37
	if (len > hists__col_len(hists, col)) {
		hists__set_col_len(hists, col, len);
38 39 40 41 42
		return true;
	}
	return false;
}

43
void hists__reset_col_len(struct hists *hists)
44 45 46 47
{
	enum hist_column col;

	for (col = 0; col < HISTC_NR_COLS; ++col)
48
		hists__set_col_len(hists, col, 0);
49 50
}

51 52 53 54 55 56 57 58 59 60
static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
{
	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;

	if (hists__col_len(hists, dso) < unresolved_col_width &&
	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
	    !symbol_conf.dso_list)
		hists__set_col_len(hists, dso, unresolved_col_width);
}

61
void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
62
{
63
	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
64
	int symlen;
65 66
	u16 len;

67 68 69 70 71 72 73 74 75 76 77
	/*
	 * +4 accounts for '[x] ' priv level info
	 * +2 accounts for 0x prefix on raw addresses
	 * +3 accounts for ' y ' symtab origin info
	 */
	if (h->ms.sym) {
		symlen = h->ms.sym->namelen + 4;
		if (verbose)
			symlen += BITS_PER_LONG / 4 + 2 + 3;
		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
	} else {
78 79
		symlen = unresolved_col_width + 4 + 2;
		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
80
		hists__set_unres_dso_col_len(hists, HISTC_DSO);
81
	}
82 83

	len = thread__comm_len(h->thread);
84 85
	if (hists__new_col_len(hists, HISTC_COMM, len))
		hists__set_col_len(hists, HISTC_THREAD, len + 6);
86 87 88

	if (h->ms.map) {
		len = dso__name_len(h->ms.map->dso);
89
		hists__new_col_len(hists, HISTC_DSO, len);
90
	}
91

92 93 94
	if (h->parent)
		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);

95 96 97
	if (h->branch_info) {
		if (h->branch_info->from.sym) {
			symlen = (int)h->branch_info->from.sym->namelen + 4;
98 99
			if (verbose)
				symlen += BITS_PER_LONG / 4 + 2 + 3;
100 101 102 103 104 105 106 107 108 109 110 111
			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);

			symlen = dso__name_len(h->branch_info->from.map->dso);
			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
		}

		if (h->branch_info->to.sym) {
			symlen = (int)h->branch_info->to.sym->namelen + 4;
112 113
			if (verbose)
				symlen += BITS_PER_LONG / 4 + 2 + 3;
114 115 116 117 118 119 120 121 122 123
			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);

			symlen = dso__name_len(h->branch_info->to.map->dso);
			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
		}
	}
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155

	if (h->mem_info) {
		if (h->mem_info->daddr.sym) {
			symlen = (int)h->mem_info->daddr.sym->namelen + 4
			       + unresolved_col_width + 2;
			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
					   symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
					   symlen);
		}
		if (h->mem_info->daddr.map) {
			symlen = dso__name_len(h->mem_info->daddr.map->dso);
			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
					   symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
		}
	} else {
		symlen = unresolved_col_width + 4 + 2;
		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
	}

	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
156 157 158 159

	if (h->transaction)
		hists__new_col_len(hists, HISTC_TRANSACTION,
				   hist_entry__transaction_len());
160 161
}

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
{
	struct rb_node *next = rb_first(&hists->entries);
	struct hist_entry *n;
	int row = 0;

	hists__reset_col_len(hists);

	while (next && row++ < max_rows) {
		n = rb_entry(next, struct hist_entry, rb_node);
		if (!n->filtered)
			hists__calc_col_len(hists, n);
		next = rb_next(&n->rb_node);
	}
}

178 179
static void he_stat__add_cpumode_period(struct he_stat *he_stat,
					unsigned int cpumode, u64 period)
180
{
181
	switch (cpumode) {
182
	case PERF_RECORD_MISC_KERNEL:
183
		he_stat->period_sys += period;
184 185
		break;
	case PERF_RECORD_MISC_USER:
186
		he_stat->period_us += period;
187 188
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
189
		he_stat->period_guest_sys += period;
190 191
		break;
	case PERF_RECORD_MISC_GUEST_USER:
192
		he_stat->period_guest_us += period;
193 194 195 196 197 198
		break;
	default:
		break;
	}
}

199 200
static void he_stat__add_period(struct he_stat *he_stat, u64 period,
				u64 weight)
201
{
202

203
	he_stat->period		+= period;
204
	he_stat->weight		+= weight;
205 206 207 208 209 210 211 212 213 214 215
	he_stat->nr_events	+= 1;
}

static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
{
	dest->period		+= src->period;
	dest->period_sys	+= src->period_sys;
	dest->period_us		+= src->period_us;
	dest->period_guest_sys	+= src->period_guest_sys;
	dest->period_guest_us	+= src->period_guest_us;
	dest->nr_events		+= src->nr_events;
216
	dest->weight		+= src->weight;
217 218
}

219
static void he_stat__decay(struct he_stat *he_stat)
220
{
221 222
	he_stat->period = (he_stat->period * 7) / 8;
	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
223
	/* XXX need decay for weight too? */
224 225 226 227
}

static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
{
228
	u64 prev_period = he->stat.period;
229
	u64 diff;
230 231

	if (prev_period == 0)
232
		return true;
233

234
	he_stat__decay(&he->stat);
235

236 237 238
	diff = prev_period - he->stat.period;

	hists->stats.total_period -= diff;
239
	if (!he->filtered)
240
		hists->stats.total_non_filtered_period -= diff;
241

242
	return he->stat.period == 0;
243 244
}

245
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
246 247 248 249 250 251 252
{
	struct rb_node *next = rb_first(&hists->entries);
	struct hist_entry *n;

	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		next = rb_next(&n->rb_node);
253 254 255 256 257
		/*
		 * We may be annotating this, for instance, so keep it here in
		 * case some it gets new samples, we'll eventually free it when
		 * the user stops browsing and it agains gets fully decayed.
		 */
258 259 260 261
		if (((zap_user && n->level == '.') ||
		     (zap_kernel && n->level != '.') ||
		     hists__decay_entry(hists, n)) &&
		    !n->used) {
262 263
			rb_erase(&n->rb_node, &hists->entries);

264
			if (sort__need_collapse)
265 266 267
				rb_erase(&n->rb_node_in, &hists->entries_collapsed);

			--hists->nr_entries;
268 269 270 271
			if (!n->filtered)
				--hists->nr_non_filtered_entries;

			hist_entry__free(n);
272 273 274 275
		}
	}
}

276
/*
277
 * histogram, sorted on item, collects periods
278 279
 */

280 281
static struct hist_entry *hist_entry__new(struct hist_entry *template)
{
282
	size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
283
	struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
284

285 286
	if (he != NULL) {
		*he = *template;
287

288 289
		if (he->ms.map)
			he->ms.map->referenced = true;
290 291

		if (he->branch_info) {
292 293
			/*
			 * This branch info is (a part of) allocated from
294
			 * sample__resolve_bstack() and will be freed after
295 296 297 298 299 300 301 302 303 304 305
			 * adding new entries.  So we need to save a copy.
			 */
			he->branch_info = malloc(sizeof(*he->branch_info));
			if (he->branch_info == NULL) {
				free(he);
				return NULL;
			}

			memcpy(he->branch_info, template->branch_info,
			       sizeof(*he->branch_info));

306 307 308 309 310 311
			if (he->branch_info->from.map)
				he->branch_info->from.map->referenced = true;
			if (he->branch_info->to.map)
				he->branch_info->to.map->referenced = true;
		}

312 313 314 315 316 317 318
		if (he->mem_info) {
			if (he->mem_info->iaddr.map)
				he->mem_info->iaddr.map->referenced = true;
			if (he->mem_info->daddr.map)
				he->mem_info->daddr.map->referenced = true;
		}

319
		if (symbol_conf.use_callchain)
320
			callchain_init(he->callchain);
321 322

		INIT_LIST_HEAD(&he->pairs.node);
323 324
	}

325
	return he;
326 327
}

328 329 330 331 332 333 334
static u8 symbol__parent_filter(const struct symbol *parent)
{
	if (symbol_conf.exclude_other && parent == NULL)
		return 1 << HIST_FILTER__PARENT;
	return 0;
}

335
static struct hist_entry *add_hist_entry(struct hists *hists,
336 337
					 struct hist_entry *entry,
					 struct addr_location *al)
338
{
339
	struct rb_node **p;
340 341
	struct rb_node *parent = NULL;
	struct hist_entry *he;
342
	int64_t cmp;
343 344
	u64 period = entry->stat.period;
	u64 weight = entry->stat.weight;
345

346 347
	p = &hists->entries_in->rb_node;

348 349
	while (*p != NULL) {
		parent = *p;
350
		he = rb_entry(parent, struct hist_entry, rb_node_in);
351

352 353 354 355 356 357 358
		/*
		 * Make sure that it receives arguments in a same order as
		 * hist_entry__collapse() so that we can use an appropriate
		 * function when searching an entry regardless which sort
		 * keys were used.
		 */
		cmp = hist_entry__cmp(he, entry);
359 360

		if (!cmp) {
361
			he_stat__add_period(&he->stat, period, weight);
362

363
			/*
364
			 * This mem info was allocated from sample__resolve_mem
365 366
			 * and will not be used anymore.
			 */
367
			zfree(&entry->mem_info);
368

369 370 371 372 373 374 375 376 377 378 379
			/* If the map of an existing hist_entry has
			 * become out-of-date due to an exec() or
			 * similar, update it.  Otherwise we will
			 * mis-adjust symbol addresses when computing
			 * the history counter to increment.
			 */
			if (he->ms.map != entry->ms.map) {
				he->ms.map = entry->ms.map;
				if (he->ms.map)
					he->ms.map->referenced = true;
			}
380
			goto out;
381 382 383 384 385 386 387 388
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

389
	he = hist_entry__new(entry);
390
	if (!he)
391
		return NULL;
392 393 394

	rb_link_node(&he->rb_node_in, parent, p);
	rb_insert_color(&he->rb_node_in, hists->entries_in);
395
out:
396
	he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
397 398 399
	return he;
}

400
struct hist_entry *__hists__add_entry(struct hists *hists,
401
				      struct addr_location *al,
402 403 404 405
				      struct symbol *sym_parent,
				      struct branch_info *bi,
				      struct mem_info *mi,
				      u64 period, u64 weight, u64 transaction)
406 407 408
{
	struct hist_entry entry = {
		.thread	= al->thread,
409
		.comm = thread__comm(al->thread),
410 411 412 413 414 415 416
		.ms = {
			.map	= al->map,
			.sym	= al->sym,
		},
		.cpu	= al->cpu,
		.ip	= al->addr,
		.level	= al->level,
417
		.stat = {
418
			.nr_events = 1,
419
			.period	= period,
420
			.weight = weight,
421
		},
422
		.parent = sym_parent,
423
		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
424
		.hists	= hists,
425 426
		.branch_info = bi,
		.mem_info = mi,
427
		.transaction = transaction,
428 429
	};

430
	return add_hist_entry(hists, &entry, al);
431 432
}

433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
static int
iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
		    struct addr_location *al __maybe_unused)
{
	return 0;
}

static int
iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
			struct addr_location *al __maybe_unused)
{
	return 0;
}

static int
iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
	struct perf_sample *sample = iter->sample;
	struct mem_info *mi;

	mi = sample__resolve_mem(sample, al);
	if (mi == NULL)
		return -ENOMEM;

	iter->priv = mi;
	return 0;
}

static int
iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
	u64 cost;
	struct mem_info *mi = iter->priv;
	struct hist_entry *he;

	if (mi == NULL)
		return -EINVAL;

	cost = iter->sample->weight;
	if (!cost)
		cost = 1;

	/*
	 * must pass period=weight in order to get the correct
	 * sorting from hists__collapse_resort() which is solely
	 * based on periods. We want sorting be done on nr_events * weight
	 * and this is indirectly achieved by passing period=weight here
	 * and the he_stat__add_period() function.
	 */
	he = __hists__add_entry(&iter->evsel->hists, al, iter->parent, NULL, mi,
				cost, cost, 0);
	if (!he)
		return -ENOMEM;

	iter->he = he;
	return 0;
}

static int
iter_finish_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
	struct perf_evsel *evsel = iter->evsel;
	struct hist_entry *he = iter->he;
	struct mem_info *mx;
	int err = -EINVAL;

	if (he == NULL)
		goto out;

	if (ui__has_annotation()) {
		err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
		if (err)
			goto out;

		mx = he->mem_info;
		err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx);
		if (err)
			goto out;
	}

	hists__inc_nr_samples(&evsel->hists, he->filtered);

	err = hist_entry__append_callchain(he, iter->sample);

out:
	/*
	 * We don't need to free iter->priv (mem_info) here since
	 * the mem info was either already freed in add_hist_entry() or
	 * passed to a new hist entry by hist_entry__new().
	 */
	iter->priv = NULL;

	iter->he = NULL;
	return err;
}

static int
iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
	struct branch_info *bi;
	struct perf_sample *sample = iter->sample;

	bi = sample__resolve_bstack(sample, al);
	if (!bi)
		return -ENOMEM;

	iter->curr = 0;
	iter->total = sample->branch_stack->nr;

	iter->priv = bi;
	return 0;
}

static int
iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
			     struct addr_location *al __maybe_unused)
{
	return 0;
}

static int
iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
	struct branch_info *bi = iter->priv;
	int i = iter->curr;

	if (bi == NULL)
		return 0;

	if (iter->curr >= iter->total)
		return 0;

	al->map = bi[i].to.map;
	al->sym = bi[i].to.sym;
	al->addr = bi[i].to.addr;
	return 1;
}

static int
iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
	struct branch_info *bi, *bx;
	struct perf_evsel *evsel = iter->evsel;
	struct hist_entry *he = NULL;
	int i = iter->curr;
	int err = 0;

	bi = iter->priv;

	if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
		goto out;

	/*
	 * The report shows the percentage of total branches captured
	 * and not events sampled. Thus we use a pseudo period of 1.
	 */
	he = __hists__add_entry(&evsel->hists, al, iter->parent, &bi[i], NULL,
				1, 1, 0);
	if (he == NULL)
		return -ENOMEM;

	if (ui__has_annotation()) {
		bx = he->branch_info;
		err = addr_map_symbol__inc_samples(&bx->from, evsel->idx);
		if (err)
			goto out;

		err = addr_map_symbol__inc_samples(&bx->to, evsel->idx);
		if (err)
			goto out;
	}

	hists__inc_nr_samples(&evsel->hists, he->filtered);

out:
	iter->he = he;
	iter->curr++;
	return err;
}

static int
iter_finish_branch_entry(struct hist_entry_iter *iter,
			 struct addr_location *al __maybe_unused)
{
	zfree(&iter->priv);
	iter->he = NULL;

	return iter->curr >= iter->total ? 0 : -1;
}

static int
iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
			  struct addr_location *al __maybe_unused)
{
	return 0;
}

static int
iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
	struct perf_evsel *evsel = iter->evsel;
	struct perf_sample *sample = iter->sample;
	struct hist_entry *he;

	he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
				sample->period, sample->weight,
				sample->transaction);
	if (he == NULL)
		return -ENOMEM;

	iter->he = he;
	return 0;
}

static int
iter_finish_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
{
	int err;
	struct hist_entry *he = iter->he;
	struct perf_evsel *evsel = iter->evsel;
	struct perf_sample *sample = iter->sample;

	if (he == NULL)
		return 0;

	iter->he = NULL;

	if (ui__has_annotation()) {
		err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
		if (err)
			return err;
	}

	hists__inc_nr_samples(&evsel->hists, he->filtered);

	return hist_entry__append_callchain(he, sample);
}

const struct hist_iter_ops hist_iter_mem = {
	.prepare_entry 		= iter_prepare_mem_entry,
	.add_single_entry 	= iter_add_single_mem_entry,
	.next_entry 		= iter_next_nop_entry,
	.add_next_entry 	= iter_add_next_nop_entry,
	.finish_entry 		= iter_finish_mem_entry,
};

const struct hist_iter_ops hist_iter_branch = {
	.prepare_entry 		= iter_prepare_branch_entry,
	.add_single_entry 	= iter_add_single_branch_entry,
	.next_entry 		= iter_next_branch_entry,
	.add_next_entry 	= iter_add_next_branch_entry,
	.finish_entry 		= iter_finish_branch_entry,
};

const struct hist_iter_ops hist_iter_normal = {
	.prepare_entry 		= iter_prepare_normal_entry,
	.add_single_entry 	= iter_add_single_normal_entry,
	.next_entry 		= iter_next_nop_entry,
	.add_next_entry 	= iter_add_next_nop_entry,
	.finish_entry 		= iter_finish_normal_entry,
};

int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
			 struct perf_evsel *evsel, struct perf_sample *sample,
			 int max_stack_depth)
{
	int err, err2;

	err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
					max_stack_depth);
	if (err)
		return err;

	iter->evsel = evsel;
	iter->sample = sample;

	err = iter->ops->prepare_entry(iter, al);
	if (err)
		goto out;

	err = iter->ops->add_single_entry(iter, al);
	if (err)
		goto out;

	while (iter->ops->next_entry(iter, al)) {
		err = iter->ops->add_next_entry(iter, al);
		if (err)
			break;
	}

out:
	err2 = iter->ops->finish_entry(iter, al);
	if (!err)
		err = err2;

	return err;
}

731 732 733
int64_t
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
734
	struct perf_hpp_fmt *fmt;
735 736
	int64_t cmp = 0;

737
	perf_hpp__for_each_sort_list(fmt) {
738 739 740
		if (perf_hpp__should_skip(fmt))
			continue;

741
		cmp = fmt->cmp(left, right);
742 743 744 745 746 747 748 749 750 751
		if (cmp)
			break;
	}

	return cmp;
}

int64_t
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
752
	struct perf_hpp_fmt *fmt;
753 754
	int64_t cmp = 0;

755
	perf_hpp__for_each_sort_list(fmt) {
756 757 758
		if (perf_hpp__should_skip(fmt))
			continue;

759
		cmp = fmt->collapse(left, right);
760 761 762 763 764 765 766 767 768
		if (cmp)
			break;
	}

	return cmp;
}

void hist_entry__free(struct hist_entry *he)
{
769 770
	zfree(&he->branch_info);
	zfree(&he->mem_info);
771
	free_srcline(he->srcline);
772 773 774 775 776 777 778
	free(he);
}

/*
 * collapse the histogram
 */

779
static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
780 781
					 struct rb_root *root,
					 struct hist_entry *he)
782
{
783
	struct rb_node **p = &root->rb_node;
784 785 786 787 788 789
	struct rb_node *parent = NULL;
	struct hist_entry *iter;
	int64_t cmp;

	while (*p != NULL) {
		parent = *p;
790
		iter = rb_entry(parent, struct hist_entry, rb_node_in);
791 792 793 794

		cmp = hist_entry__collapse(iter, he);

		if (!cmp) {
795
			he_stat__add_stat(&iter->stat, &he->stat);
796

797
			if (symbol_conf.use_callchain) {
798 799 800
				callchain_cursor_reset(&callchain_cursor);
				callchain_merge(&callchain_cursor,
						iter->callchain,
801 802
						he->callchain);
			}
803
			hist_entry__free(he);
804
			return false;
805 806 807 808 809 810 811 812
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

813 814
	rb_link_node(&he->rb_node_in, parent, p);
	rb_insert_color(&he->rb_node_in, root);
815
	return true;
816 817
}

818
static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
819
{
820 821 822 823 824 825 826 827 828 829 830 831 832
	struct rb_root *root;

	pthread_mutex_lock(&hists->lock);

	root = hists->entries_in;
	if (++hists->entries_in > &hists->entries_in_array[1])
		hists->entries_in = &hists->entries_in_array[0];

	pthread_mutex_unlock(&hists->lock);

	return root;
}

833 834 835 836
static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
{
	hists__filter_entry_by_dso(hists, he);
	hists__filter_entry_by_thread(hists, he);
837
	hists__filter_entry_by_symbol(hists, he);
838 839
}

840
void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
841 842
{
	struct rb_root *root;
843 844 845
	struct rb_node *next;
	struct hist_entry *n;

846
	if (!sort__need_collapse)
847 848
		return;

849 850
	root = hists__get_rotate_entries_in(hists);
	next = rb_first(root);
851

852
	while (next) {
853 854
		if (session_done())
			break;
855 856
		n = rb_entry(next, struct hist_entry, rb_node_in);
		next = rb_next(&n->rb_node_in);
857

858
		rb_erase(&n->rb_node_in, root);
859 860 861 862 863 864 865 866
		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
			/*
			 * If it wasn't combined with one of the entries already
			 * collapsed, we need to apply the filters that may have
			 * been set by, say, the hist_browser.
			 */
			hists__apply_filters(hists, n);
		}
867 868
		if (prog)
			ui_progress__update(prog, 1);
869
	}
870
}
871

872
static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
873
{
874 875
	struct perf_hpp_fmt *fmt;
	int64_t cmp = 0;
876

877
	perf_hpp__for_each_sort_list(fmt) {
878 879 880
		if (perf_hpp__should_skip(fmt))
			continue;

881 882
		cmp = fmt->sort(a, b);
		if (cmp)
883 884 885
			break;
	}

886
	return cmp;
887 888
}

889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
static void hists__reset_filter_stats(struct hists *hists)
{
	hists->nr_non_filtered_entries = 0;
	hists->stats.total_non_filtered_period = 0;
}

void hists__reset_stats(struct hists *hists)
{
	hists->nr_entries = 0;
	hists->stats.total_period = 0;

	hists__reset_filter_stats(hists);
}

static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
{
	hists->nr_non_filtered_entries++;
	hists->stats.total_non_filtered_period += h->stat.period;
}

void hists__inc_stats(struct hists *hists, struct hist_entry *h)
{
	if (!h->filtered)
		hists__inc_filter_stats(hists, h);

	hists->nr_entries++;
	hists->stats.total_period += h->stat.period;
}

918 919 920
static void __hists__insert_output_entry(struct rb_root *entries,
					 struct hist_entry *he,
					 u64 min_callchain_hits)
921
{
922
	struct rb_node **p = &entries->rb_node;
923 924 925
	struct rb_node *parent = NULL;
	struct hist_entry *iter;

926
	if (symbol_conf.use_callchain)
927
		callchain_param.sort(&he->sorted_chain, he->callchain,
928 929 930 931 932 933
				      min_callchain_hits, &callchain_param);

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct hist_entry, rb_node);

934
		if (hist_entry__sort(he, iter) > 0)
935 936 937 938 939 940
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&he->rb_node, parent, p);
941
	rb_insert_color(&he->rb_node, entries);
942 943
}

944
void hists__output_resort(struct hists *hists)
945
{
946
	struct rb_root *root;
947 948 949 950
	struct rb_node *next;
	struct hist_entry *n;
	u64 min_callchain_hits;

951
	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
952

953
	if (sort__need_collapse)
954 955 956 957 958 959
		root = &hists->entries_collapsed;
	else
		root = hists->entries_in;

	next = rb_first(root);
	hists->entries = RB_ROOT;
960

961
	hists__reset_stats(hists);
962
	hists__reset_col_len(hists);
963

964
	while (next) {
965 966
		n = rb_entry(next, struct hist_entry, rb_node_in);
		next = rb_next(&n->rb_node_in);
967

968
		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
969
		hists__inc_stats(hists, n);
970 971 972

		if (!n->filtered)
			hists__calc_col_len(hists, n);
973
	}
974
}
975

976
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
977 978 979 980 981 982
				       enum hist_filter filter)
{
	h->filtered &= ~(1 << filter);
	if (h->filtered)
		return;

983 984
	/* force fold unfiltered entry for simplicity */
	h->ms.unfolded = false;
985
	h->row_offset = 0;
986

987
	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
988

989
	hists__inc_filter_stats(hists, h);
990
	hists__calc_col_len(hists, h);
991 992
}

993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005

static bool hists__filter_entry_by_dso(struct hists *hists,
				       struct hist_entry *he)
{
	if (hists->dso_filter != NULL &&
	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
		he->filtered |= (1 << HIST_FILTER__DSO);
		return true;
	}

	return false;
}

1006
void hists__filter_by_dso(struct hists *hists)
1007 1008 1009
{
	struct rb_node *nd;

1010
	hists->stats.nr_non_filtered_samples = 0;
1011 1012

	hists__reset_filter_stats(hists);
1013
	hists__reset_col_len(hists);
1014

1015
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1016 1017 1018 1019 1020
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (symbol_conf.exclude_other && !h->parent)
			continue;

1021
		if (hists__filter_entry_by_dso(hists, h))
1022 1023
			continue;

1024
		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1025 1026 1027
	}
}

1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
static bool hists__filter_entry_by_thread(struct hists *hists,
					  struct hist_entry *he)
{
	if (hists->thread_filter != NULL &&
	    he->thread != hists->thread_filter) {
		he->filtered |= (1 << HIST_FILTER__THREAD);
		return true;
	}

	return false;
}

1040
void hists__filter_by_thread(struct hists *hists)
1041 1042 1043
{
	struct rb_node *nd;

1044
	hists->stats.nr_non_filtered_samples = 0;
1045 1046

	hists__reset_filter_stats(hists);
1047
	hists__reset_col_len(hists);
1048

1049
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1050 1051
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

1052
		if (hists__filter_entry_by_thread(hists, h))
1053
			continue;
1054

1055
		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1056 1057
	}
}
1058

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075
static bool hists__filter_entry_by_symbol(struct hists *hists,
					  struct hist_entry *he)
{
	if (hists->symbol_filter_str != NULL &&
	    (!he->ms.sym || strstr(he->ms.sym->name,
				   hists->symbol_filter_str) == NULL)) {
		he->filtered |= (1 << HIST_FILTER__SYMBOL);
		return true;
	}

	return false;
}

void hists__filter_by_symbol(struct hists *hists)
{
	struct rb_node *nd;

1076
	hists->stats.nr_non_filtered_samples = 0;
1077 1078

	hists__reset_filter_stats(hists);
1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090
	hists__reset_col_len(hists);

	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (hists__filter_entry_by_symbol(hists, h))
			continue;

		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
	}
}

1091 1092 1093 1094 1095 1096
void events_stats__inc(struct events_stats *stats, u32 type)
{
	++stats->nr_events[0];
	++stats->nr_events[type];
}

1097
void hists__inc_nr_events(struct hists *hists, u32 type)
1098
{
1099
	events_stats__inc(&hists->stats, type);
1100
}
1101

1102 1103 1104 1105 1106 1107 1108
void hists__inc_nr_samples(struct hists *hists, bool filtered)
{
	events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
	if (!filtered)
		hists->stats.nr_non_filtered_samples++;
}

1109 1110 1111
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
						 struct hist_entry *pair)
{
1112 1113
	struct rb_root *root;
	struct rb_node **p;
1114 1115
	struct rb_node *parent = NULL;
	struct hist_entry *he;
1116
	int64_t cmp;
1117

1118 1119 1120 1121 1122 1123 1124
	if (sort__need_collapse)
		root = &hists->entries_collapsed;
	else
		root = hists->entries_in;

	p = &root->rb_node;

1125 1126
	while (*p != NULL) {
		parent = *p;
1127
		he = rb_entry(parent, struct hist_entry, rb_node_in);
1128

1129
		cmp = hist_entry__collapse(he, pair);
1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141

		if (!cmp)
			goto out;

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	he = hist_entry__new(pair);
	if (he) {
1142 1143
		memset(&he->stat, 0, sizeof(he->stat));
		he->hists = hists;
1144 1145
		rb_link_node(&he->rb_node_in, parent, p);
		rb_insert_color(&he->rb_node_in, root);
1146
		hists__inc_stats(hists, he);
1147
		he->dummy = true;
1148 1149 1150 1151 1152
	}
out:
	return he;
}

1153 1154 1155
static struct hist_entry *hists__find_entry(struct hists *hists,
					    struct hist_entry *he)
{
1156 1157 1158 1159 1160 1161
	struct rb_node *n;

	if (sort__need_collapse)
		n = hists->entries_collapsed.rb_node;
	else
		n = hists->entries_in->rb_node;
1162 1163

	while (n) {
1164 1165
		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
		int64_t cmp = hist_entry__collapse(iter, he);
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182

		if (cmp < 0)
			n = n->rb_left;
		else if (cmp > 0)
			n = n->rb_right;
		else
			return iter;
	}

	return NULL;
}

/*
 * Look for pairs to link to the leader buckets (hist_entries):
 */
void hists__match(struct hists *leader, struct hists *other)
{
1183
	struct rb_root *root;
1184 1185 1186
	struct rb_node *nd;
	struct hist_entry *pos, *pair;

1187 1188 1189 1190 1191 1192 1193
	if (sort__need_collapse)
		root = &leader->entries_collapsed;
	else
		root = leader->entries_in;

	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
1194 1195 1196
		pair = hists__find_entry(other, pos);

		if (pair)
1197
			hist_entry__add_pair(pair, pos);
1198 1199
	}
}
1200 1201 1202 1203 1204 1205 1206 1207

/*
 * Look for entries in the other hists that are not present in the leader, if
 * we find them, just add a dummy entry on the leader hists, with period=0,
 * nr_events=0, to serve as the list header.
 */
int hists__link(struct hists *leader, struct hists *other)
{
1208
	struct rb_root *root;
1209 1210 1211
	struct rb_node *nd;
	struct hist_entry *pos, *pair;

1212 1213 1214 1215 1216 1217 1218
	if (sort__need_collapse)
		root = &other->entries_collapsed;
	else
		root = other->entries_in;

	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
		pos = rb_entry(nd, struct hist_entry, rb_node_in);
1219 1220 1221 1222 1223

		if (!hist_entry__has_pairs(pos)) {
			pair = hists__add_dummy_entry(leader, pos);
			if (pair == NULL)
				return -1;
1224
			hist_entry__add_pair(pos, pair);
1225 1226 1227 1228 1229
		}
	}

	return 0;
}
1230 1231 1232 1233 1234 1235

u64 hists__total_period(struct hists *hists)
{
	return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
		hists->stats.total_period;
}
N
Namhyung Kim 已提交
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248

int parse_filter_percentage(const struct option *opt __maybe_unused,
			    const char *arg, int unset __maybe_unused)
{
	if (!strcmp(arg, "relative"))
		symbol_conf.filter_relative = true;
	else if (!strcmp(arg, "absolute"))
		symbol_conf.filter_relative = false;
	else
		return -1;

	return 0;
}
1249 1250 1251 1252 1253 1254 1255 1256

int perf_hist_config(const char *var, const char *value)
{
	if (!strcmp(var, "hist.percentage"))
		return parse_filter_percentage(NULL, value, 0);

	return 0;
}