hist.c 21.7 KB
Newer Older
1
#include "util.h"
2
#include "build-id.h"
3
#include "hist.h"
4 5
#include "session.h"
#include "sort.h"
6
#include "evsel.h"
7
#include <math.h>
8

9 10 11 12
static bool hists__filter_entry_by_dso(struct hists *hists,
				       struct hist_entry *he);
static bool hists__filter_entry_by_thread(struct hists *hists,
					  struct hist_entry *he);
13 14
static bool hists__filter_entry_by_symbol(struct hists *hists,
					  struct hist_entry *he);
15

16 17 18 19
enum hist_filter {
	HIST_FILTER__DSO,
	HIST_FILTER__THREAD,
	HIST_FILTER__PARENT,
20
	HIST_FILTER__SYMBOL,
21 22
};

23 24
struct callchain_param	callchain_param = {
	.mode	= CHAIN_GRAPH_REL,
25
	.min_percent = 0.5,
26 27
	.order  = ORDER_CALLEE,
	.key	= CCKEY_FUNCTION
28 29
};

30
u16 hists__col_len(struct hists *hists, enum hist_column col)
31
{
32
	return hists->col_len[col];
33 34
}

35
void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
36
{
37
	hists->col_len[col] = len;
38 39
}

40
bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
41
{
42 43
	if (len > hists__col_len(hists, col)) {
		hists__set_col_len(hists, col, len);
44 45 46 47 48
		return true;
	}
	return false;
}

49
void hists__reset_col_len(struct hists *hists)
50 51 52 53
{
	enum hist_column col;

	for (col = 0; col < HISTC_NR_COLS; ++col)
54
		hists__set_col_len(hists, col, 0);
55 56
}

57 58 59 60 61 62 63 64 65 66
static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
{
	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;

	if (hists__col_len(hists, dso) < unresolved_col_width &&
	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
	    !symbol_conf.dso_list)
		hists__set_col_len(hists, dso, unresolved_col_width);
}

67
void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
68
{
69
	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
70
	int symlen;
71 72
	u16 len;

73 74 75 76 77 78 79 80 81 82 83
	/*
	 * +4 accounts for '[x] ' priv level info
	 * +2 accounts for 0x prefix on raw addresses
	 * +3 accounts for ' y ' symtab origin info
	 */
	if (h->ms.sym) {
		symlen = h->ms.sym->namelen + 4;
		if (verbose)
			symlen += BITS_PER_LONG / 4 + 2 + 3;
		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
	} else {
84 85
		symlen = unresolved_col_width + 4 + 2;
		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
86
		hists__set_unres_dso_col_len(hists, HISTC_DSO);
87
	}
88 89

	len = thread__comm_len(h->thread);
90 91
	if (hists__new_col_len(hists, HISTC_COMM, len))
		hists__set_col_len(hists, HISTC_THREAD, len + 6);
92 93 94

	if (h->ms.map) {
		len = dso__name_len(h->ms.map->dso);
95
		hists__new_col_len(hists, HISTC_DSO, len);
96
	}
97

98 99 100
	if (h->parent)
		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);

101 102 103
	if (h->branch_info) {
		if (h->branch_info->from.sym) {
			symlen = (int)h->branch_info->from.sym->namelen + 4;
104 105
			if (verbose)
				symlen += BITS_PER_LONG / 4 + 2 + 3;
106 107 108 109 110 111 112 113 114 115 116 117
			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);

			symlen = dso__name_len(h->branch_info->from.map->dso);
			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
		}

		if (h->branch_info->to.sym) {
			symlen = (int)h->branch_info->to.sym->namelen + 4;
118 119
			if (verbose)
				symlen += BITS_PER_LONG / 4 + 2 + 3;
120 121 122 123 124 125 126 127 128 129
			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);

			symlen = dso__name_len(h->branch_info->to.map->dso);
			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
		}
	}
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161

	if (h->mem_info) {
		if (h->mem_info->daddr.sym) {
			symlen = (int)h->mem_info->daddr.sym->namelen + 4
			       + unresolved_col_width + 2;
			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
					   symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
					   symlen);
		}
		if (h->mem_info->daddr.map) {
			symlen = dso__name_len(h->mem_info->daddr.map->dso);
			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
					   symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
		}
	} else {
		symlen = unresolved_col_width + 4 + 2;
		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
	}

	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
162 163 164 165

	if (h->transaction)
		hists__new_col_len(hists, HISTC_TRANSACTION,
				   hist_entry__transaction_len());
166 167
}

168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
{
	struct rb_node *next = rb_first(&hists->entries);
	struct hist_entry *n;
	int row = 0;

	hists__reset_col_len(hists);

	while (next && row++ < max_rows) {
		n = rb_entry(next, struct hist_entry, rb_node);
		if (!n->filtered)
			hists__calc_col_len(hists, n);
		next = rb_next(&n->rb_node);
	}
}

184
static void hist_entry__add_cpumode_period(struct hist_entry *he,
185
					   unsigned int cpumode, u64 period)
186
{
187
	switch (cpumode) {
188
	case PERF_RECORD_MISC_KERNEL:
189
		he->stat.period_sys += period;
190 191
		break;
	case PERF_RECORD_MISC_USER:
192
		he->stat.period_us += period;
193 194
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
195
		he->stat.period_guest_sys += period;
196 197
		break;
	case PERF_RECORD_MISC_GUEST_USER:
198
		he->stat.period_guest_us += period;
199 200 201 202 203 204
		break;
	default:
		break;
	}
}

205 206
static void he_stat__add_period(struct he_stat *he_stat, u64 period,
				u64 weight)
207
{
208

209
	he_stat->period		+= period;
210
	he_stat->weight		+= weight;
211 212 213 214 215 216 217 218 219 220 221
	he_stat->nr_events	+= 1;
}

static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
{
	dest->period		+= src->period;
	dest->period_sys	+= src->period_sys;
	dest->period_us		+= src->period_us;
	dest->period_guest_sys	+= src->period_guest_sys;
	dest->period_guest_us	+= src->period_guest_us;
	dest->nr_events		+= src->nr_events;
222
	dest->weight		+= src->weight;
223 224
}

225 226
static void hist_entry__decay(struct hist_entry *he)
{
227 228
	he->stat.period = (he->stat.period * 7) / 8;
	he->stat.nr_events = (he->stat.nr_events * 7) / 8;
229
	/* XXX need decay for weight too? */
230 231 232 233
}

static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
{
234
	u64 prev_period = he->stat.period;
235 236

	if (prev_period == 0)
237
		return true;
238

239
	hist_entry__decay(he);
240 241

	if (!he->filtered)
242
		hists->stats.total_period -= prev_period - he->stat.period;
243

244
	return he->stat.period == 0;
245 246
}

247
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
248 249 250 251 252 253 254
{
	struct rb_node *next = rb_first(&hists->entries);
	struct hist_entry *n;

	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		next = rb_next(&n->rb_node);
255 256 257 258 259
		/*
		 * We may be annotating this, for instance, so keep it here in
		 * case some it gets new samples, we'll eventually free it when
		 * the user stops browsing and it agains gets fully decayed.
		 */
260 261 262 263
		if (((zap_user && n->level == '.') ||
		     (zap_kernel && n->level != '.') ||
		     hists__decay_entry(hists, n)) &&
		    !n->used) {
264 265
			rb_erase(&n->rb_node, &hists->entries);

266
			if (sort__need_collapse)
267 268 269 270 271 272 273 274
				rb_erase(&n->rb_node_in, &hists->entries_collapsed);

			hist_entry__free(n);
			--hists->nr_entries;
		}
	}
}

275
/*
276
 * histogram, sorted on item, collects periods
277 278
 */

279 280
static struct hist_entry *hist_entry__new(struct hist_entry *template)
{
281
	size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
282
	struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
283

284 285
	if (he != NULL) {
		*he = *template;
286

287 288
		if (he->ms.map)
			he->ms.map->referenced = true;
289 290

		if (he->branch_info) {
291 292 293 294 295 296 297 298 299 300 301 302 303 304
			/*
			 * This branch info is (a part of) allocated from
			 * machine__resolve_bstack() and will be freed after
			 * adding new entries.  So we need to save a copy.
			 */
			he->branch_info = malloc(sizeof(*he->branch_info));
			if (he->branch_info == NULL) {
				free(he);
				return NULL;
			}

			memcpy(he->branch_info, template->branch_info,
			       sizeof(*he->branch_info));

305 306 307 308 309 310
			if (he->branch_info->from.map)
				he->branch_info->from.map->referenced = true;
			if (he->branch_info->to.map)
				he->branch_info->to.map->referenced = true;
		}

311 312 313 314 315 316 317
		if (he->mem_info) {
			if (he->mem_info->iaddr.map)
				he->mem_info->iaddr.map->referenced = true;
			if (he->mem_info->daddr.map)
				he->mem_info->daddr.map->referenced = true;
		}

318
		if (symbol_conf.use_callchain)
319
			callchain_init(he->callchain);
320 321

		INIT_LIST_HEAD(&he->pairs.node);
322 323
	}

324
	return he;
325 326
}

327
void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
328
{
329
	if (!h->filtered) {
330 331
		hists__calc_col_len(hists, h);
		++hists->nr_entries;
332
		hists->stats.total_period += h->stat.period;
333
	}
334 335
}

336 337 338 339 340 341 342
static u8 symbol__parent_filter(const struct symbol *parent)
{
	if (symbol_conf.exclude_other && parent == NULL)
		return 1 << HIST_FILTER__PARENT;
	return 0;
}

343
static struct hist_entry *add_hist_entry(struct hists *hists,
344 345
					 struct hist_entry *entry,
					 struct addr_location *al)
346
{
347
	struct rb_node **p;
348 349
	struct rb_node *parent = NULL;
	struct hist_entry *he;
350
	int64_t cmp;
351 352
	u64 period = entry->stat.period;
	u64 weight = entry->stat.weight;
353

354 355
	p = &hists->entries_in->rb_node;

356 357
	while (*p != NULL) {
		parent = *p;
358
		he = rb_entry(parent, struct hist_entry, rb_node_in);
359

360 361 362 363 364 365 366
		/*
		 * Make sure that it receives arguments in a same order as
		 * hist_entry__collapse() so that we can use an appropriate
		 * function when searching an entry regardless which sort
		 * keys were used.
		 */
		cmp = hist_entry__cmp(he, entry);
367 368

		if (!cmp) {
369
			he_stat__add_period(&he->stat, period, weight);
370

371 372 373 374 375 376
			/*
			 * This mem info was allocated from machine__resolve_mem
			 * and will not be used anymore.
			 */
			free(entry->mem_info);

377 378 379 380 381 382 383 384 385 386 387
			/* If the map of an existing hist_entry has
			 * become out-of-date due to an exec() or
			 * similar, update it.  Otherwise we will
			 * mis-adjust symbol addresses when computing
			 * the history counter to increment.
			 */
			if (he->ms.map != entry->ms.map) {
				he->ms.map = entry->ms.map;
				if (he->ms.map)
					he->ms.map->referenced = true;
			}
388
			goto out;
389 390 391 392 393 394 395 396
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

397
	he = hist_entry__new(entry);
398
	if (!he)
399
		return NULL;
400

401
	hists->nr_entries++;
402 403
	rb_link_node(&he->rb_node_in, parent, p);
	rb_insert_color(&he->rb_node_in, hists->entries_in);
404
out:
405
	hist_entry__add_cpumode_period(he, al->cpumode, period);
406 407 408
	return he;
}

409
struct hist_entry *__hists__add_entry(struct hists *hists,
410
				      struct addr_location *al,
411 412 413 414
				      struct symbol *sym_parent,
				      struct branch_info *bi,
				      struct mem_info *mi,
				      u64 period, u64 weight, u64 transaction)
415 416 417
{
	struct hist_entry entry = {
		.thread	= al->thread,
418
		.comm = thread__comm(al->thread),
419 420 421 422 423 424 425
		.ms = {
			.map	= al->map,
			.sym	= al->sym,
		},
		.cpu	= al->cpu,
		.ip	= al->addr,
		.level	= al->level,
426
		.stat = {
427
			.nr_events = 1,
428
			.period	= period,
429
			.weight = weight,
430
		},
431 432
		.parent = sym_parent,
		.filtered = symbol__parent_filter(sym_parent),
433
		.hists	= hists,
434 435
		.branch_info = bi,
		.mem_info = mi,
436
		.transaction = transaction,
437 438
	};

439
	return add_hist_entry(hists, &entry, al);
440 441
}

442 443 444 445 446 447 448
int64_t
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
449
		cmp = se->se_cmp(left, right);
450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
		if (cmp)
			break;
	}

	return cmp;
}

int64_t
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
		int64_t (*f)(struct hist_entry *, struct hist_entry *);

466
		f = se->se_collapse ?: se->se_cmp;
467 468 469 470 471 472 473 474 475 476 477

		cmp = f(left, right);
		if (cmp)
			break;
	}

	return cmp;
}

void hist_entry__free(struct hist_entry *he)
{
478
	free(he->branch_info);
479
	free(he->mem_info);
480
	free_srcline(he->srcline);
481 482 483 484 485 486 487
	free(he);
}

/*
 * collapse the histogram
 */

488
static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
489 490
					 struct rb_root *root,
					 struct hist_entry *he)
491
{
492
	struct rb_node **p = &root->rb_node;
493 494 495 496 497 498
	struct rb_node *parent = NULL;
	struct hist_entry *iter;
	int64_t cmp;

	while (*p != NULL) {
		parent = *p;
499
		iter = rb_entry(parent, struct hist_entry, rb_node_in);
500 501 502 503

		cmp = hist_entry__collapse(iter, he);

		if (!cmp) {
504
			he_stat__add_stat(&iter->stat, &he->stat);
505

506
			if (symbol_conf.use_callchain) {
507 508 509
				callchain_cursor_reset(&callchain_cursor);
				callchain_merge(&callchain_cursor,
						iter->callchain,
510 511
						he->callchain);
			}
512
			hist_entry__free(he);
513
			return false;
514 515 516 517 518 519 520 521
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

522 523
	rb_link_node(&he->rb_node_in, parent, p);
	rb_insert_color(&he->rb_node_in, root);
524
	return true;
525 526
}

527
static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
528
{
529 530 531 532 533 534 535 536 537 538 539 540 541
	struct rb_root *root;

	pthread_mutex_lock(&hists->lock);

	root = hists->entries_in;
	if (++hists->entries_in > &hists->entries_in_array[1])
		hists->entries_in = &hists->entries_in_array[0];

	pthread_mutex_unlock(&hists->lock);

	return root;
}

542 543 544 545
static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
{
	hists__filter_entry_by_dso(hists, he);
	hists__filter_entry_by_thread(hists, he);
546
	hists__filter_entry_by_symbol(hists, he);
547 548
}

549
void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
550 551
{
	struct rb_root *root;
552 553 554
	struct rb_node *next;
	struct hist_entry *n;

555
	if (!sort__need_collapse)
556 557
		return;

558 559
	root = hists__get_rotate_entries_in(hists);
	next = rb_first(root);
560

561
	while (next) {
562 563
		if (session_done())
			break;
564 565
		n = rb_entry(next, struct hist_entry, rb_node_in);
		next = rb_next(&n->rb_node_in);
566

567
		rb_erase(&n->rb_node_in, root);
568 569 570 571 572 573 574 575
		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
			/*
			 * If it wasn't combined with one of the entries already
			 * collapsed, we need to apply the filters that may have
			 * been set by, say, the hist_browser.
			 */
			hists__apply_filters(hists, n);
		}
576 577
		if (prog)
			ui_progress__update(prog, 1);
578
	}
579
}
580

581
/*
582
 * reverse the map, sort on period.
583 584
 */

585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
static int period_cmp(u64 period_a, u64 period_b)
{
	if (period_a > period_b)
		return 1;
	if (period_a < period_b)
		return -1;
	return 0;
}

static int hist_entry__sort_on_period(struct hist_entry *a,
				      struct hist_entry *b)
{
	int ret;
	int i, nr_members;
	struct perf_evsel *evsel;
	struct hist_entry *pair;
	u64 *periods_a, *periods_b;

	ret = period_cmp(a->stat.period, b->stat.period);
	if (ret || !symbol_conf.event_group)
		return ret;

	evsel = hists_to_evsel(a->hists);
	nr_members = evsel->nr_members;
	if (nr_members <= 1)
		return ret;

	periods_a = zalloc(sizeof(periods_a) * nr_members);
	periods_b = zalloc(sizeof(periods_b) * nr_members);

	if (!periods_a || !periods_b)
		goto out;

	list_for_each_entry(pair, &a->pairs.head, pairs.node) {
		evsel = hists_to_evsel(pair->hists);
		periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
	}

	list_for_each_entry(pair, &b->pairs.head, pairs.node) {
		evsel = hists_to_evsel(pair->hists);
		periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
	}

	for (i = 1; i < nr_members; i++) {
		ret = period_cmp(periods_a[i], periods_b[i]);
		if (ret)
			break;
	}

out:
	free(periods_a);
	free(periods_b);

	return ret;
}

641 642 643
static void __hists__insert_output_entry(struct rb_root *entries,
					 struct hist_entry *he,
					 u64 min_callchain_hits)
644
{
645
	struct rb_node **p = &entries->rb_node;
646 647 648
	struct rb_node *parent = NULL;
	struct hist_entry *iter;

649
	if (symbol_conf.use_callchain)
650
		callchain_param.sort(&he->sorted_chain, he->callchain,
651 652 653 654 655 656
				      min_callchain_hits, &callchain_param);

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct hist_entry, rb_node);

657
		if (hist_entry__sort_on_period(he, iter) > 0)
658 659 660 661 662 663
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&he->rb_node, parent, p);
664
	rb_insert_color(&he->rb_node, entries);
665 666
}

667
void hists__output_resort(struct hists *hists)
668
{
669
	struct rb_root *root;
670 671 672 673
	struct rb_node *next;
	struct hist_entry *n;
	u64 min_callchain_hits;

674
	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
675

676
	if (sort__need_collapse)
677 678 679 680 681 682
		root = &hists->entries_collapsed;
	else
		root = hists->entries_in;

	next = rb_first(root);
	hists->entries = RB_ROOT;
683

684
	hists->nr_entries = 0;
685
	hists->stats.total_period = 0;
686
	hists__reset_col_len(hists);
687

688
	while (next) {
689 690
		n = rb_entry(next, struct hist_entry, rb_node_in);
		next = rb_next(&n->rb_node_in);
691

692
		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
693
		hists__inc_nr_entries(hists, n);
694
	}
695
}
696

697
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
698 699 700 701 702 703
				       enum hist_filter filter)
{
	h->filtered &= ~(1 << filter);
	if (h->filtered)
		return;

704
	++hists->nr_entries;
705
	if (h->ms.unfolded)
706
		hists->nr_entries += h->nr_rows;
707
	h->row_offset = 0;
708 709
	hists->stats.total_period += h->stat.period;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
710

711
	hists__calc_col_len(hists, h);
712 713
}

714 715 716 717 718 719 720 721 722 723 724 725 726

static bool hists__filter_entry_by_dso(struct hists *hists,
				       struct hist_entry *he)
{
	if (hists->dso_filter != NULL &&
	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
		he->filtered |= (1 << HIST_FILTER__DSO);
		return true;
	}

	return false;
}

727
void hists__filter_by_dso(struct hists *hists)
728 729 730
{
	struct rb_node *nd;

731 732 733
	hists->nr_entries = hists->stats.total_period = 0;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
	hists__reset_col_len(hists);
734

735
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
736 737 738 739 740
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (symbol_conf.exclude_other && !h->parent)
			continue;

741
		if (hists__filter_entry_by_dso(hists, h))
742 743
			continue;

744
		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
745 746 747
	}
}

748 749 750 751 752 753 754 755 756 757 758 759
static bool hists__filter_entry_by_thread(struct hists *hists,
					  struct hist_entry *he)
{
	if (hists->thread_filter != NULL &&
	    he->thread != hists->thread_filter) {
		he->filtered |= (1 << HIST_FILTER__THREAD);
		return true;
	}

	return false;
}

760
void hists__filter_by_thread(struct hists *hists)
761 762 763
{
	struct rb_node *nd;

764 765 766
	hists->nr_entries = hists->stats.total_period = 0;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
	hists__reset_col_len(hists);
767

768
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
769 770
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

771
		if (hists__filter_entry_by_thread(hists, h))
772
			continue;
773

774
		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
775 776
	}
}
777

778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
static bool hists__filter_entry_by_symbol(struct hists *hists,
					  struct hist_entry *he)
{
	if (hists->symbol_filter_str != NULL &&
	    (!he->ms.sym || strstr(he->ms.sym->name,
				   hists->symbol_filter_str) == NULL)) {
		he->filtered |= (1 << HIST_FILTER__SYMBOL);
		return true;
	}

	return false;
}

void hists__filter_by_symbol(struct hists *hists)
{
	struct rb_node *nd;

	hists->nr_entries = hists->stats.total_period = 0;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
	hists__reset_col_len(hists);

	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (hists__filter_entry_by_symbol(hists, h))
			continue;

		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
	}
}

809 810 811 812 813 814
void events_stats__inc(struct events_stats *stats, u32 type)
{
	++stats->nr_events[0];
	++stats->nr_events[type];
}

815
void hists__inc_nr_events(struct hists *hists, u32 type)
816
{
817
	events_stats__inc(&hists->stats, type);
818
}
819

820 821 822
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
						 struct hist_entry *pair)
{
823 824
	struct rb_root *root;
	struct rb_node **p;
825 826
	struct rb_node *parent = NULL;
	struct hist_entry *he;
827
	int64_t cmp;
828

829 830 831 832 833 834 835
	if (sort__need_collapse)
		root = &hists->entries_collapsed;
	else
		root = hists->entries_in;

	p = &root->rb_node;

836 837
	while (*p != NULL) {
		parent = *p;
838
		he = rb_entry(parent, struct hist_entry, rb_node_in);
839

840
		cmp = hist_entry__collapse(he, pair);
841 842 843 844 845 846 847 848 849 850 851 852

		if (!cmp)
			goto out;

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	he = hist_entry__new(pair);
	if (he) {
853 854
		memset(&he->stat, 0, sizeof(he->stat));
		he->hists = hists;
855 856
		rb_link_node(&he->rb_node_in, parent, p);
		rb_insert_color(&he->rb_node_in, root);
857
		hists__inc_nr_entries(hists, he);
858
		he->dummy = true;
859 860 861 862 863
	}
out:
	return he;
}

864 865 866
static struct hist_entry *hists__find_entry(struct hists *hists,
					    struct hist_entry *he)
{
867 868 869 870 871 872
	struct rb_node *n;

	if (sort__need_collapse)
		n = hists->entries_collapsed.rb_node;
	else
		n = hists->entries_in->rb_node;
873 874

	while (n) {
875 876
		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
		int64_t cmp = hist_entry__collapse(iter, he);
877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893

		if (cmp < 0)
			n = n->rb_left;
		else if (cmp > 0)
			n = n->rb_right;
		else
			return iter;
	}

	return NULL;
}

/*
 * Look for pairs to link to the leader buckets (hist_entries):
 */
void hists__match(struct hists *leader, struct hists *other)
{
894
	struct rb_root *root;
895 896 897
	struct rb_node *nd;
	struct hist_entry *pos, *pair;

898 899 900 901 902 903 904
	if (sort__need_collapse)
		root = &leader->entries_collapsed;
	else
		root = leader->entries_in;

	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
905 906 907
		pair = hists__find_entry(other, pos);

		if (pair)
908
			hist_entry__add_pair(pair, pos);
909 910
	}
}
911 912 913 914 915 916 917 918

/*
 * Look for entries in the other hists that are not present in the leader, if
 * we find them, just add a dummy entry on the leader hists, with period=0,
 * nr_events=0, to serve as the list header.
 */
int hists__link(struct hists *leader, struct hists *other)
{
919
	struct rb_root *root;
920 921 922
	struct rb_node *nd;
	struct hist_entry *pos, *pair;

923 924 925 926 927 928 929
	if (sort__need_collapse)
		root = &other->entries_collapsed;
	else
		root = other->entries_in;

	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
		pos = rb_entry(nd, struct hist_entry, rb_node_in);
930 931 932 933 934

		if (!hist_entry__has_pairs(pos)) {
			pair = hists__add_dummy_entry(leader, pos);
			if (pair == NULL)
				return -1;
935
			hist_entry__add_pair(pos, pair);
936 937 938 939 940
		}
	}

	return 0;
}