hist.c 22.3 KB
Newer Older
1
#include "util.h"
2
#include "build-id.h"
3
#include "hist.h"
4 5
#include "session.h"
#include "sort.h"
6
#include "evsel.h"
7
#include <math.h>
8

9 10 11 12
static bool hists__filter_entry_by_dso(struct hists *hists,
				       struct hist_entry *he);
static bool hists__filter_entry_by_thread(struct hists *hists,
					  struct hist_entry *he);
13 14
static bool hists__filter_entry_by_symbol(struct hists *hists,
					  struct hist_entry *he);
15

16 17
struct callchain_param	callchain_param = {
	.mode	= CHAIN_GRAPH_REL,
18
	.min_percent = 0.5,
19 20
	.order  = ORDER_CALLEE,
	.key	= CCKEY_FUNCTION
21 22
};

23
u16 hists__col_len(struct hists *hists, enum hist_column col)
24
{
25
	return hists->col_len[col];
26 27
}

28
void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
29
{
30
	hists->col_len[col] = len;
31 32
}

33
bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
34
{
35 36
	if (len > hists__col_len(hists, col)) {
		hists__set_col_len(hists, col, len);
37 38 39 40 41
		return true;
	}
	return false;
}

42
void hists__reset_col_len(struct hists *hists)
43 44 45 46
{
	enum hist_column col;

	for (col = 0; col < HISTC_NR_COLS; ++col)
47
		hists__set_col_len(hists, col, 0);
48 49
}

50 51 52 53 54 55 56 57 58 59
static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
{
	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;

	if (hists__col_len(hists, dso) < unresolved_col_width &&
	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
	    !symbol_conf.dso_list)
		hists__set_col_len(hists, dso, unresolved_col_width);
}

60
void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
61
{
62
	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
63
	int symlen;
64 65
	u16 len;

66 67 68 69 70 71 72 73 74 75 76
	/*
	 * +4 accounts for '[x] ' priv level info
	 * +2 accounts for 0x prefix on raw addresses
	 * +3 accounts for ' y ' symtab origin info
	 */
	if (h->ms.sym) {
		symlen = h->ms.sym->namelen + 4;
		if (verbose)
			symlen += BITS_PER_LONG / 4 + 2 + 3;
		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
	} else {
77 78
		symlen = unresolved_col_width + 4 + 2;
		hists__new_col_len(hists, HISTC_SYMBOL, symlen);
79
		hists__set_unres_dso_col_len(hists, HISTC_DSO);
80
	}
81 82

	len = thread__comm_len(h->thread);
83 84
	if (hists__new_col_len(hists, HISTC_COMM, len))
		hists__set_col_len(hists, HISTC_THREAD, len + 6);
85 86 87

	if (h->ms.map) {
		len = dso__name_len(h->ms.map->dso);
88
		hists__new_col_len(hists, HISTC_DSO, len);
89
	}
90

91 92 93
	if (h->parent)
		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);

94 95 96
	if (h->branch_info) {
		if (h->branch_info->from.sym) {
			symlen = (int)h->branch_info->from.sym->namelen + 4;
97 98
			if (verbose)
				symlen += BITS_PER_LONG / 4 + 2 + 3;
99 100 101 102 103 104 105 106 107 108 109 110
			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);

			symlen = dso__name_len(h->branch_info->from.map->dso);
			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
		}

		if (h->branch_info->to.sym) {
			symlen = (int)h->branch_info->to.sym->namelen + 4;
111 112
			if (verbose)
				symlen += BITS_PER_LONG / 4 + 2 + 3;
113 114 115 116 117 118 119 120 121 122
			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);

			symlen = dso__name_len(h->branch_info->to.map->dso);
			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
		}
	}
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154

	if (h->mem_info) {
		if (h->mem_info->daddr.sym) {
			symlen = (int)h->mem_info->daddr.sym->namelen + 4
			       + unresolved_col_width + 2;
			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
					   symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
					   symlen);
		}
		if (h->mem_info->daddr.map) {
			symlen = dso__name_len(h->mem_info->daddr.map->dso);
			hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
					   symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
		}
	} else {
		symlen = unresolved_col_width + 4 + 2;
		hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
		hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
	}

	hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
	hists__new_col_len(hists, HISTC_MEM_TLB, 22);
	hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
	hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
	hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
	hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
155 156 157 158

	if (h->transaction)
		hists__new_col_len(hists, HISTC_TRANSACTION,
				   hist_entry__transaction_len());
159 160
}

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
{
	struct rb_node *next = rb_first(&hists->entries);
	struct hist_entry *n;
	int row = 0;

	hists__reset_col_len(hists);

	while (next && row++ < max_rows) {
		n = rb_entry(next, struct hist_entry, rb_node);
		if (!n->filtered)
			hists__calc_col_len(hists, n);
		next = rb_next(&n->rb_node);
	}
}

177 178
static void he_stat__add_cpumode_period(struct he_stat *he_stat,
					unsigned int cpumode, u64 period)
179
{
180
	switch (cpumode) {
181
	case PERF_RECORD_MISC_KERNEL:
182
		he_stat->period_sys += period;
183 184
		break;
	case PERF_RECORD_MISC_USER:
185
		he_stat->period_us += period;
186 187
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
188
		he_stat->period_guest_sys += period;
189 190
		break;
	case PERF_RECORD_MISC_GUEST_USER:
191
		he_stat->period_guest_us += period;
192 193 194 195 196 197
		break;
	default:
		break;
	}
}

198 199
static void he_stat__add_period(struct he_stat *he_stat, u64 period,
				u64 weight)
200
{
201

202
	he_stat->period		+= period;
203
	he_stat->weight		+= weight;
204 205 206 207 208 209 210 211 212 213 214
	he_stat->nr_events	+= 1;
}

static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
{
	dest->period		+= src->period;
	dest->period_sys	+= src->period_sys;
	dest->period_us		+= src->period_us;
	dest->period_guest_sys	+= src->period_guest_sys;
	dest->period_guest_us	+= src->period_guest_us;
	dest->nr_events		+= src->nr_events;
215
	dest->weight		+= src->weight;
216 217
}

218
static void he_stat__decay(struct he_stat *he_stat)
219
{
220 221
	he_stat->period = (he_stat->period * 7) / 8;
	he_stat->nr_events = (he_stat->nr_events * 7) / 8;
222
	/* XXX need decay for weight too? */
223 224 225 226
}

static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
{
227
	u64 prev_period = he->stat.period;
228 229

	if (prev_period == 0)
230
		return true;
231

232
	he_stat__decay(&he->stat);
233 234

	if (!he->filtered)
235
		hists->stats.total_period -= prev_period - he->stat.period;
236

237
	return he->stat.period == 0;
238 239
}

240
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
241 242 243 244 245 246 247
{
	struct rb_node *next = rb_first(&hists->entries);
	struct hist_entry *n;

	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		next = rb_next(&n->rb_node);
248 249 250 251 252
		/*
		 * We may be annotating this, for instance, so keep it here in
		 * case some it gets new samples, we'll eventually free it when
		 * the user stops browsing and it agains gets fully decayed.
		 */
253 254 255 256
		if (((zap_user && n->level == '.') ||
		     (zap_kernel && n->level != '.') ||
		     hists__decay_entry(hists, n)) &&
		    !n->used) {
257 258
			rb_erase(&n->rb_node, &hists->entries);

259
			if (sort__need_collapse)
260 261 262 263 264 265 266 267
				rb_erase(&n->rb_node_in, &hists->entries_collapsed);

			hist_entry__free(n);
			--hists->nr_entries;
		}
	}
}

268
/*
269
 * histogram, sorted on item, collects periods
270 271
 */

272 273
static struct hist_entry *hist_entry__new(struct hist_entry *template)
{
274
	size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
275
	struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
276

277 278
	if (he != NULL) {
		*he = *template;
279

280 281
		if (he->ms.map)
			he->ms.map->referenced = true;
282 283

		if (he->branch_info) {
284 285
			/*
			 * This branch info is (a part of) allocated from
286
			 * sample__resolve_bstack() and will be freed after
287 288 289 290 291 292 293 294 295 296 297
			 * adding new entries.  So we need to save a copy.
			 */
			he->branch_info = malloc(sizeof(*he->branch_info));
			if (he->branch_info == NULL) {
				free(he);
				return NULL;
			}

			memcpy(he->branch_info, template->branch_info,
			       sizeof(*he->branch_info));

298 299 300 301 302 303
			if (he->branch_info->from.map)
				he->branch_info->from.map->referenced = true;
			if (he->branch_info->to.map)
				he->branch_info->to.map->referenced = true;
		}

304 305 306 307 308 309 310
		if (he->mem_info) {
			if (he->mem_info->iaddr.map)
				he->mem_info->iaddr.map->referenced = true;
			if (he->mem_info->daddr.map)
				he->mem_info->daddr.map->referenced = true;
		}

311
		if (symbol_conf.use_callchain)
312
			callchain_init(he->callchain);
313 314

		INIT_LIST_HEAD(&he->pairs.node);
315 316
	}

317
	return he;
318 319
}

320
void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
321
{
322
	if (!h->filtered) {
323 324
		hists__calc_col_len(hists, h);
		++hists->nr_entries;
325
		hists->stats.total_period += h->stat.period;
326
	}
327 328
}

329 330 331 332 333 334 335
static u8 symbol__parent_filter(const struct symbol *parent)
{
	if (symbol_conf.exclude_other && parent == NULL)
		return 1 << HIST_FILTER__PARENT;
	return 0;
}

336
static struct hist_entry *add_hist_entry(struct hists *hists,
337 338
					 struct hist_entry *entry,
					 struct addr_location *al)
339
{
340
	struct rb_node **p;
341 342
	struct rb_node *parent = NULL;
	struct hist_entry *he;
343
	int64_t cmp;
344 345
	u64 period = entry->stat.period;
	u64 weight = entry->stat.weight;
346

347 348
	p = &hists->entries_in->rb_node;

349 350
	while (*p != NULL) {
		parent = *p;
351
		he = rb_entry(parent, struct hist_entry, rb_node_in);
352

353 354 355 356 357 358 359
		/*
		 * Make sure that it receives arguments in a same order as
		 * hist_entry__collapse() so that we can use an appropriate
		 * function when searching an entry regardless which sort
		 * keys were used.
		 */
		cmp = hist_entry__cmp(he, entry);
360 361

		if (!cmp) {
362
			he_stat__add_period(&he->stat, period, weight);
363

364
			/*
365
			 * This mem info was allocated from sample__resolve_mem
366 367
			 * and will not be used anymore.
			 */
368
			zfree(&entry->mem_info);
369

370 371 372 373 374 375 376 377 378 379 380
			/* If the map of an existing hist_entry has
			 * become out-of-date due to an exec() or
			 * similar, update it.  Otherwise we will
			 * mis-adjust symbol addresses when computing
			 * the history counter to increment.
			 */
			if (he->ms.map != entry->ms.map) {
				he->ms.map = entry->ms.map;
				if (he->ms.map)
					he->ms.map->referenced = true;
			}
381
			goto out;
382 383 384 385 386 387 388 389
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

390
	he = hist_entry__new(entry);
391
	if (!he)
392
		return NULL;
393

394
	hists->nr_entries++;
395 396
	rb_link_node(&he->rb_node_in, parent, p);
	rb_insert_color(&he->rb_node_in, hists->entries_in);
397
out:
398
	he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
399 400 401
	return he;
}

402
struct hist_entry *__hists__add_entry(struct hists *hists,
403
				      struct addr_location *al,
404 405 406 407
				      struct symbol *sym_parent,
				      struct branch_info *bi,
				      struct mem_info *mi,
				      u64 period, u64 weight, u64 transaction)
408 409 410
{
	struct hist_entry entry = {
		.thread	= al->thread,
411
		.comm = thread__comm(al->thread),
412 413 414 415 416 417 418
		.ms = {
			.map	= al->map,
			.sym	= al->sym,
		},
		.cpu	= al->cpu,
		.ip	= al->addr,
		.level	= al->level,
419
		.stat = {
420
			.nr_events = 1,
421
			.period	= period,
422
			.weight = weight,
423
		},
424
		.parent = sym_parent,
425
		.filtered = symbol__parent_filter(sym_parent) | al->filtered,
426
		.hists	= hists,
427 428
		.branch_info = bi,
		.mem_info = mi,
429
		.transaction = transaction,
430 431
	};

432
	return add_hist_entry(hists, &entry, al);
433 434
}

435 436 437 438 439 440 441
int64_t
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
442
		cmp = se->se_cmp(left, right);
443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
		if (cmp)
			break;
	}

	return cmp;
}

int64_t
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
		int64_t (*f)(struct hist_entry *, struct hist_entry *);

459
		f = se->se_collapse ?: se->se_cmp;
460 461 462 463 464 465 466 467 468 469 470

		cmp = f(left, right);
		if (cmp)
			break;
	}

	return cmp;
}

void hist_entry__free(struct hist_entry *he)
{
471 472
	zfree(&he->branch_info);
	zfree(&he->mem_info);
473
	free_srcline(he->srcline);
474 475 476 477 478 479 480
	free(he);
}

/*
 * collapse the histogram
 */

481
static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
482 483
					 struct rb_root *root,
					 struct hist_entry *he)
484
{
485
	struct rb_node **p = &root->rb_node;
486 487 488 489 490 491
	struct rb_node *parent = NULL;
	struct hist_entry *iter;
	int64_t cmp;

	while (*p != NULL) {
		parent = *p;
492
		iter = rb_entry(parent, struct hist_entry, rb_node_in);
493 494 495 496

		cmp = hist_entry__collapse(iter, he);

		if (!cmp) {
497
			he_stat__add_stat(&iter->stat, &he->stat);
498

499
			if (symbol_conf.use_callchain) {
500 501 502
				callchain_cursor_reset(&callchain_cursor);
				callchain_merge(&callchain_cursor,
						iter->callchain,
503 504
						he->callchain);
			}
505
			hist_entry__free(he);
506
			return false;
507 508 509 510 511 512 513 514
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

515 516
	rb_link_node(&he->rb_node_in, parent, p);
	rb_insert_color(&he->rb_node_in, root);
517
	return true;
518 519
}

520
static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
521
{
522 523 524 525 526 527 528 529 530 531 532 533 534
	struct rb_root *root;

	pthread_mutex_lock(&hists->lock);

	root = hists->entries_in;
	if (++hists->entries_in > &hists->entries_in_array[1])
		hists->entries_in = &hists->entries_in_array[0];

	pthread_mutex_unlock(&hists->lock);

	return root;
}

535 536 537 538
static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
{
	hists__filter_entry_by_dso(hists, he);
	hists__filter_entry_by_thread(hists, he);
539
	hists__filter_entry_by_symbol(hists, he);
540 541
}

542
void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
543 544
{
	struct rb_root *root;
545 546 547
	struct rb_node *next;
	struct hist_entry *n;

548
	if (!sort__need_collapse)
549 550
		return;

551 552
	root = hists__get_rotate_entries_in(hists);
	next = rb_first(root);
553

554
	while (next) {
555 556
		if (session_done())
			break;
557 558
		n = rb_entry(next, struct hist_entry, rb_node_in);
		next = rb_next(&n->rb_node_in);
559

560
		rb_erase(&n->rb_node_in, root);
561 562 563 564 565 566 567 568
		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
			/*
			 * If it wasn't combined with one of the entries already
			 * collapsed, we need to apply the filters that may have
			 * been set by, say, the hist_browser.
			 */
			hists__apply_filters(hists, n);
		}
569 570
		if (prog)
			ui_progress__update(prog, 1);
571
	}
572
}
573

574
/*
575
 * reverse the map, sort on period.
576 577
 */

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
static int period_cmp(u64 period_a, u64 period_b)
{
	if (period_a > period_b)
		return 1;
	if (period_a < period_b)
		return -1;
	return 0;
}

static int hist_entry__sort_on_period(struct hist_entry *a,
				      struct hist_entry *b)
{
	int ret;
	int i, nr_members;
	struct perf_evsel *evsel;
	struct hist_entry *pair;
	u64 *periods_a, *periods_b;

	ret = period_cmp(a->stat.period, b->stat.period);
	if (ret || !symbol_conf.event_group)
		return ret;

	evsel = hists_to_evsel(a->hists);
	nr_members = evsel->nr_members;
	if (nr_members <= 1)
		return ret;

	periods_a = zalloc(sizeof(periods_a) * nr_members);
	periods_b = zalloc(sizeof(periods_b) * nr_members);

	if (!periods_a || !periods_b)
		goto out;

	list_for_each_entry(pair, &a->pairs.head, pairs.node) {
		evsel = hists_to_evsel(pair->hists);
		periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
	}

	list_for_each_entry(pair, &b->pairs.head, pairs.node) {
		evsel = hists_to_evsel(pair->hists);
		periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
	}

	for (i = 1; i < nr_members; i++) {
		ret = period_cmp(periods_a[i], periods_b[i]);
		if (ret)
			break;
	}

out:
	free(periods_a);
	free(periods_b);

	return ret;
}

634 635 636
static void __hists__insert_output_entry(struct rb_root *entries,
					 struct hist_entry *he,
					 u64 min_callchain_hits)
637
{
638
	struct rb_node **p = &entries->rb_node;
639 640 641
	struct rb_node *parent = NULL;
	struct hist_entry *iter;

642
	if (symbol_conf.use_callchain)
643
		callchain_param.sort(&he->sorted_chain, he->callchain,
644 645 646 647 648 649
				      min_callchain_hits, &callchain_param);

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct hist_entry, rb_node);

650
		if (hist_entry__sort_on_period(he, iter) > 0)
651 652 653 654 655 656
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&he->rb_node, parent, p);
657
	rb_insert_color(&he->rb_node, entries);
658 659
}

660
void hists__output_resort(struct hists *hists)
661
{
662
	struct rb_root *root;
663 664 665 666
	struct rb_node *next;
	struct hist_entry *n;
	u64 min_callchain_hits;

667
	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
668

669
	if (sort__need_collapse)
670 671 672 673 674 675
		root = &hists->entries_collapsed;
	else
		root = hists->entries_in;

	next = rb_first(root);
	hists->entries = RB_ROOT;
676

677 678
	hists->nr_entries = hists->nr_non_filtered_entries = 0;
	hists->stats.total_period = hists->stats.total_non_filtered_period = 0;
679
	hists__reset_col_len(hists);
680

681
	while (next) {
682 683
		n = rb_entry(next, struct hist_entry, rb_node_in);
		next = rb_next(&n->rb_node_in);
684

685
		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
686
		hists__inc_nr_entries(hists, n);
687
	}
688
}
689

690
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
691 692 693 694 695 696
				       enum hist_filter filter)
{
	h->filtered &= ~(1 << filter);
	if (h->filtered)
		return;

697
	++hists->nr_entries;
698 699
	++hists->nr_non_filtered_entries;
	if (h->ms.unfolded) {
700
		hists->nr_entries += h->nr_rows;
701 702
		hists->nr_non_filtered_entries += h->nr_rows;
	}
703
	h->row_offset = 0;
704
	hists->stats.total_period += h->stat.period;
705
	hists->stats.total_non_filtered_period += h->stat.period;
706
	hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
707
	hists->stats.nr_non_filtered_samples += h->stat.nr_events;
708

709
	hists__calc_col_len(hists, h);
710 711
}

712 713 714 715 716 717 718 719 720 721 722 723 724

static bool hists__filter_entry_by_dso(struct hists *hists,
				       struct hist_entry *he)
{
	if (hists->dso_filter != NULL &&
	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
		he->filtered |= (1 << HIST_FILTER__DSO);
		return true;
	}

	return false;
}

725
void hists__filter_by_dso(struct hists *hists)
726 727 728
{
	struct rb_node *nd;

729
	hists->nr_entries = hists->stats.total_period = 0;
730
	hists->nr_non_filtered_entries = hists->stats.total_non_filtered_period = 0;
731
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
732
	hists->stats.nr_non_filtered_samples = 0;
733
	hists__reset_col_len(hists);
734

735
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
736 737 738 739 740
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (symbol_conf.exclude_other && !h->parent)
			continue;

741
		if (hists__filter_entry_by_dso(hists, h))
742 743
			continue;

744
		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
745 746 747
	}
}

748 749 750 751 752 753 754 755 756 757 758 759
static bool hists__filter_entry_by_thread(struct hists *hists,
					  struct hist_entry *he)
{
	if (hists->thread_filter != NULL &&
	    he->thread != hists->thread_filter) {
		he->filtered |= (1 << HIST_FILTER__THREAD);
		return true;
	}

	return false;
}

760
void hists__filter_by_thread(struct hists *hists)
761 762 763
{
	struct rb_node *nd;

764
	hists->nr_entries = hists->stats.total_period = 0;
765
	hists->nr_non_filtered_entries = hists->stats.total_non_filtered_period = 0;
766
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
767
	hists->stats.nr_non_filtered_samples = 0;
768
	hists__reset_col_len(hists);
769

770
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
771 772
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

773
		if (hists__filter_entry_by_thread(hists, h))
774
			continue;
775

776
		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
777 778
	}
}
779

780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
static bool hists__filter_entry_by_symbol(struct hists *hists,
					  struct hist_entry *he)
{
	if (hists->symbol_filter_str != NULL &&
	    (!he->ms.sym || strstr(he->ms.sym->name,
				   hists->symbol_filter_str) == NULL)) {
		he->filtered |= (1 << HIST_FILTER__SYMBOL);
		return true;
	}

	return false;
}

void hists__filter_by_symbol(struct hists *hists)
{
	struct rb_node *nd;

	hists->nr_entries = hists->stats.total_period = 0;
798
	hists->nr_non_filtered_entries = hists->stats.total_non_filtered_period = 0;
799
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
800
	hists->stats.nr_non_filtered_samples = 0;
801 802 803 804 805 806 807 808 809 810 811 812
	hists__reset_col_len(hists);

	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (hists__filter_entry_by_symbol(hists, h))
			continue;

		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
	}
}

813 814 815 816 817 818
void events_stats__inc(struct events_stats *stats, u32 type)
{
	++stats->nr_events[0];
	++stats->nr_events[type];
}

819
void hists__inc_nr_events(struct hists *hists, u32 type)
820
{
821
	events_stats__inc(&hists->stats, type);
822
}
823

824 825 826
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
						 struct hist_entry *pair)
{
827 828
	struct rb_root *root;
	struct rb_node **p;
829 830
	struct rb_node *parent = NULL;
	struct hist_entry *he;
831
	int64_t cmp;
832

833 834 835 836 837 838 839
	if (sort__need_collapse)
		root = &hists->entries_collapsed;
	else
		root = hists->entries_in;

	p = &root->rb_node;

840 841
	while (*p != NULL) {
		parent = *p;
842
		he = rb_entry(parent, struct hist_entry, rb_node_in);
843

844
		cmp = hist_entry__collapse(he, pair);
845 846 847 848 849 850 851 852 853 854 855 856

		if (!cmp)
			goto out;

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	he = hist_entry__new(pair);
	if (he) {
857 858
		memset(&he->stat, 0, sizeof(he->stat));
		he->hists = hists;
859 860
		rb_link_node(&he->rb_node_in, parent, p);
		rb_insert_color(&he->rb_node_in, root);
861
		hists__inc_nr_entries(hists, he);
862
		he->dummy = true;
863 864 865 866 867
	}
out:
	return he;
}

868 869 870
static struct hist_entry *hists__find_entry(struct hists *hists,
					    struct hist_entry *he)
{
871 872 873 874 875 876
	struct rb_node *n;

	if (sort__need_collapse)
		n = hists->entries_collapsed.rb_node;
	else
		n = hists->entries_in->rb_node;
877 878

	while (n) {
879 880
		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
		int64_t cmp = hist_entry__collapse(iter, he);
881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897

		if (cmp < 0)
			n = n->rb_left;
		else if (cmp > 0)
			n = n->rb_right;
		else
			return iter;
	}

	return NULL;
}

/*
 * Look for pairs to link to the leader buckets (hist_entries):
 */
void hists__match(struct hists *leader, struct hists *other)
{
898
	struct rb_root *root;
899 900 901
	struct rb_node *nd;
	struct hist_entry *pos, *pair;

902 903 904 905 906 907 908
	if (sort__need_collapse)
		root = &leader->entries_collapsed;
	else
		root = leader->entries_in;

	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
909 910 911
		pair = hists__find_entry(other, pos);

		if (pair)
912
			hist_entry__add_pair(pair, pos);
913 914
	}
}
915 916 917 918 919 920 921 922

/*
 * Look for entries in the other hists that are not present in the leader, if
 * we find them, just add a dummy entry on the leader hists, with period=0,
 * nr_events=0, to serve as the list header.
 */
int hists__link(struct hists *leader, struct hists *other)
{
923
	struct rb_root *root;
924 925 926
	struct rb_node *nd;
	struct hist_entry *pos, *pair;

927 928 929 930 931 932 933
	if (sort__need_collapse)
		root = &other->entries_collapsed;
	else
		root = other->entries_in;

	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
		pos = rb_entry(nd, struct hist_entry, rb_node_in);
934 935 936 937 938

		if (!hist_entry__has_pairs(pos)) {
			pair = hists__add_dummy_entry(leader, pos);
			if (pair == NULL)
				return -1;
939
			hist_entry__add_pair(pos, pair);
940 941 942 943 944
		}
	}

	return 0;
}