hist.c 19.5 KB
Newer Older
1
#include "annotate.h"
2
#include "util.h"
3
#include "build-id.h"
4
#include "hist.h"
5 6
#include "session.h"
#include "sort.h"
7
#include <math.h>
8

9 10 11 12
static bool hists__filter_entry_by_dso(struct hists *hists,
				       struct hist_entry *he);
static bool hists__filter_entry_by_thread(struct hists *hists,
					  struct hist_entry *he);
13 14
static bool hists__filter_entry_by_symbol(struct hists *hists,
					  struct hist_entry *he);
15

16 17 18 19
enum hist_filter {
	HIST_FILTER__DSO,
	HIST_FILTER__THREAD,
	HIST_FILTER__PARENT,
20
	HIST_FILTER__SYMBOL,
21 22
};

23 24
struct callchain_param	callchain_param = {
	.mode	= CHAIN_GRAPH_REL,
25 26
	.min_percent = 0.5,
	.order  = ORDER_CALLEE
27 28
};

29
u16 hists__col_len(struct hists *hists, enum hist_column col)
30
{
31
	return hists->col_len[col];
32 33
}

34
void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
35
{
36
	hists->col_len[col] = len;
37 38
}

39
bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
40
{
41 42
	if (len > hists__col_len(hists, col)) {
		hists__set_col_len(hists, col, len);
43 44 45 46 47
		return true;
	}
	return false;
}

48
void hists__reset_col_len(struct hists *hists)
49 50 51 52
{
	enum hist_column col;

	for (col = 0; col < HISTC_NR_COLS; ++col)
53
		hists__set_col_len(hists, col, 0);
54 55
}

56 57 58 59 60 61 62 63 64 65
static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
{
	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;

	if (hists__col_len(hists, dso) < unresolved_col_width &&
	    !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
	    !symbol_conf.dso_list)
		hists__set_col_len(hists, dso, unresolved_col_width);
}

66
void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
67
{
68
	const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
69 70 71
	u16 len;

	if (h->ms.sym)
72 73 74
		hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
	else
		hists__set_unres_dso_col_len(hists, HISTC_DSO);
75 76

	len = thread__comm_len(h->thread);
77 78
	if (hists__new_col_len(hists, HISTC_COMM, len))
		hists__set_col_len(hists, HISTC_THREAD, len + 6);
79 80 81

	if (h->ms.map) {
		len = dso__name_len(h->ms.map->dso);
82
		hists__new_col_len(hists, HISTC_DSO, len);
83
	}
84

85 86 87
	if (h->parent)
		hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);

88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
	if (h->branch_info) {
		int symlen;
		/*
		 * +4 accounts for '[x] ' priv level info
		 * +2 account of 0x prefix on raw addresses
		 */
		if (h->branch_info->from.sym) {
			symlen = (int)h->branch_info->from.sym->namelen + 4;
			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);

			symlen = dso__name_len(h->branch_info->from.map->dso);
			hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
			hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
		}

		if (h->branch_info->to.sym) {
			symlen = (int)h->branch_info->to.sym->namelen + 4;
			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);

			symlen = dso__name_len(h->branch_info->to.map->dso);
			hists__new_col_len(hists, HISTC_DSO_TO, symlen);
		} else {
			symlen = unresolved_col_width + 4 + 2;
			hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
			hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
		}
	}
118 119
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
{
	struct rb_node *next = rb_first(&hists->entries);
	struct hist_entry *n;
	int row = 0;

	hists__reset_col_len(hists);

	while (next && row++ < max_rows) {
		n = rb_entry(next, struct hist_entry, rb_node);
		if (!n->filtered)
			hists__calc_col_len(hists, n);
		next = rb_next(&n->rb_node);
	}
}

136
static void hist_entry__add_cpumode_period(struct hist_entry *he,
137
					   unsigned int cpumode, u64 period)
138
{
139
	switch (cpumode) {
140
	case PERF_RECORD_MISC_KERNEL:
141
		he->stat.period_sys += period;
142 143
		break;
	case PERF_RECORD_MISC_USER:
144
		he->stat.period_us += period;
145 146
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
147
		he->stat.period_guest_sys += period;
148 149
		break;
	case PERF_RECORD_MISC_GUEST_USER:
150
		he->stat.period_guest_us += period;
151 152 153 154 155 156
		break;
	default:
		break;
	}
}

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
static void he_stat__add_period(struct he_stat *he_stat, u64 period)
{
	he_stat->period		+= period;
	he_stat->nr_events	+= 1;
}

static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
{
	dest->period		+= src->period;
	dest->period_sys	+= src->period_sys;
	dest->period_us		+= src->period_us;
	dest->period_guest_sys	+= src->period_guest_sys;
	dest->period_guest_us	+= src->period_guest_us;
	dest->nr_events		+= src->nr_events;
}

173 174
static void hist_entry__decay(struct hist_entry *he)
{
175 176
	he->stat.period = (he->stat.period * 7) / 8;
	he->stat.nr_events = (he->stat.nr_events * 7) / 8;
177 178 179 180
}

static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
{
181
	u64 prev_period = he->stat.period;
182 183

	if (prev_period == 0)
184
		return true;
185

186
	hist_entry__decay(he);
187 188

	if (!he->filtered)
189
		hists->stats.total_period -= prev_period - he->stat.period;
190

191
	return he->stat.period == 0;
192 193
}

194 195
static void __hists__decay_entries(struct hists *hists, bool zap_user,
				   bool zap_kernel, bool threaded)
196 197 198 199 200 201 202
{
	struct rb_node *next = rb_first(&hists->entries);
	struct hist_entry *n;

	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		next = rb_next(&n->rb_node);
203 204 205 206 207
		/*
		 * We may be annotating this, for instance, so keep it here in
		 * case some it gets new samples, we'll eventually free it when
		 * the user stops browsing and it agains gets fully decayed.
		 */
208 209 210 211
		if (((zap_user && n->level == '.') ||
		     (zap_kernel && n->level != '.') ||
		     hists__decay_entry(hists, n)) &&
		    !n->used) {
212 213
			rb_erase(&n->rb_node, &hists->entries);

214
			if (sort__need_collapse || threaded)
215 216 217 218 219 220 221 222
				rb_erase(&n->rb_node_in, &hists->entries_collapsed);

			hist_entry__free(n);
			--hists->nr_entries;
		}
	}
}

223
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
224
{
225
	return __hists__decay_entries(hists, zap_user, zap_kernel, false);
226 227
}

228 229
void hists__decay_entries_threaded(struct hists *hists,
				   bool zap_user, bool zap_kernel)
230
{
231
	return __hists__decay_entries(hists, zap_user, zap_kernel, true);
232 233
}

234
/*
235
 * histogram, sorted on item, collects periods
236 237
 */

238 239
static struct hist_entry *hist_entry__new(struct hist_entry *template)
{
240
	size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
241
	struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
242

243 244
	if (he != NULL) {
		*he = *template;
245

246 247
		if (he->ms.map)
			he->ms.map->referenced = true;
248 249 250 251 252 253 254 255

		if (he->branch_info) {
			if (he->branch_info->from.map)
				he->branch_info->from.map->referenced = true;
			if (he->branch_info->to.map)
				he->branch_info->to.map->referenced = true;
		}

256
		if (symbol_conf.use_callchain)
257
			callchain_init(he->callchain);
258 259

		INIT_LIST_HEAD(&he->pairs.node);
260 261
	}

262
	return he;
263 264
}

265
void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
266
{
267
	if (!h->filtered) {
268 269
		hists__calc_col_len(hists, h);
		++hists->nr_entries;
270
		hists->stats.total_period += h->stat.period;
271
	}
272 273
}

274 275 276 277 278 279 280
static u8 symbol__parent_filter(const struct symbol *parent)
{
	if (symbol_conf.exclude_other && parent == NULL)
		return 1 << HIST_FILTER__PARENT;
	return 0;
}

281 282
static struct hist_entry *add_hist_entry(struct hists *hists,
				      struct hist_entry *entry,
283
				      struct addr_location *al,
284
				      u64 period)
285
{
286
	struct rb_node **p;
287 288 289 290
	struct rb_node *parent = NULL;
	struct hist_entry *he;
	int cmp;

291 292 293 294
	pthread_mutex_lock(&hists->lock);

	p = &hists->entries_in->rb_node;

295 296
	while (*p != NULL) {
		parent = *p;
297
		he = rb_entry(parent, struct hist_entry, rb_node_in);
298

299 300 301 302 303 304 305
		/*
		 * Make sure that it receives arguments in a same order as
		 * hist_entry__collapse() so that we can use an appropriate
		 * function when searching an entry regardless which sort
		 * keys were used.
		 */
		cmp = hist_entry__cmp(he, entry);
306 307

		if (!cmp) {
308
			he_stat__add_period(&he->stat, period);
309 310 311 312 313 314 315 316 317 318 319 320

			/* If the map of an existing hist_entry has
			 * become out-of-date due to an exec() or
			 * similar, update it.  Otherwise we will
			 * mis-adjust symbol addresses when computing
			 * the history counter to increment.
			 */
			if (he->ms.map != entry->ms.map) {
				he->ms.map = entry->ms.map;
				if (he->ms.map)
					he->ms.map->referenced = true;
			}
321
			goto out;
322 323 324 325 326 327 328 329
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

330
	he = hist_entry__new(entry);
331
	if (!he)
332 333 334 335
		goto out_unlock;

	rb_link_node(&he->rb_node_in, parent, p);
	rb_insert_color(&he->rb_node_in, hists->entries_in);
336
out:
337
	hist_entry__add_cpumode_period(he, al->cpumode, period);
338 339
out_unlock:
	pthread_mutex_unlock(&hists->lock);
340 341 342
	return he;
}

343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
struct hist_entry *__hists__add_branch_entry(struct hists *self,
					     struct addr_location *al,
					     struct symbol *sym_parent,
					     struct branch_info *bi,
					     u64 period)
{
	struct hist_entry entry = {
		.thread	= al->thread,
		.ms = {
			.map	= bi->to.map,
			.sym	= bi->to.sym,
		},
		.cpu	= al->cpu,
		.ip	= bi->to.addr,
		.level	= al->level,
358 359
		.stat = {
			.period	= period,
360
			.nr_events = 1,
361
		},
362 363 364
		.parent = sym_parent,
		.filtered = symbol__parent_filter(sym_parent),
		.branch_info = bi,
365
		.hists	= self,
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
	};

	return add_hist_entry(self, &entry, al, period);
}

struct hist_entry *__hists__add_entry(struct hists *self,
				      struct addr_location *al,
				      struct symbol *sym_parent, u64 period)
{
	struct hist_entry entry = {
		.thread	= al->thread,
		.ms = {
			.map	= al->map,
			.sym	= al->sym,
		},
		.cpu	= al->cpu,
		.ip	= al->addr,
		.level	= al->level,
384 385
		.stat = {
			.period	= period,
386
			.nr_events = 1,
387
		},
388 389
		.parent = sym_parent,
		.filtered = symbol__parent_filter(sym_parent),
390
		.hists	= self,
391 392 393 394 395
	};

	return add_hist_entry(self, &entry, al, period);
}

396 397 398 399 400 401 402
int64_t
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
403
		cmp = se->se_cmp(left, right);
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419
		if (cmp)
			break;
	}

	return cmp;
}

int64_t
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
	struct sort_entry *se;
	int64_t cmp = 0;

	list_for_each_entry(se, &hist_entry__sort_list, list) {
		int64_t (*f)(struct hist_entry *, struct hist_entry *);

420
		f = se->se_collapse ?: se->se_cmp;
421 422 423 424 425 426 427 428 429 430 431

		cmp = f(left, right);
		if (cmp)
			break;
	}

	return cmp;
}

void hist_entry__free(struct hist_entry *he)
{
432
	free(he->branch_info);
433 434 435 436 437 438 439
	free(he);
}

/*
 * collapse the histogram
 */

440
static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
441 442
					 struct rb_root *root,
					 struct hist_entry *he)
443
{
444
	struct rb_node **p = &root->rb_node;
445 446 447 448 449 450
	struct rb_node *parent = NULL;
	struct hist_entry *iter;
	int64_t cmp;

	while (*p != NULL) {
		parent = *p;
451
		iter = rb_entry(parent, struct hist_entry, rb_node_in);
452 453 454 455

		cmp = hist_entry__collapse(iter, he);

		if (!cmp) {
456
			he_stat__add_stat(&iter->stat, &he->stat);
457

458
			if (symbol_conf.use_callchain) {
459 460 461
				callchain_cursor_reset(&callchain_cursor);
				callchain_merge(&callchain_cursor,
						iter->callchain,
462 463
						he->callchain);
			}
464
			hist_entry__free(he);
465
			return false;
466 467 468 469 470 471 472 473
		}

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

474 475
	rb_link_node(&he->rb_node_in, parent, p);
	rb_insert_color(&he->rb_node_in, root);
476
	return true;
477 478
}

479
static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
480
{
481 482 483 484 485 486 487 488 489 490 491 492 493
	struct rb_root *root;

	pthread_mutex_lock(&hists->lock);

	root = hists->entries_in;
	if (++hists->entries_in > &hists->entries_in_array[1])
		hists->entries_in = &hists->entries_in_array[0];

	pthread_mutex_unlock(&hists->lock);

	return root;
}

494 495 496 497
static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
{
	hists__filter_entry_by_dso(hists, he);
	hists__filter_entry_by_thread(hists, he);
498
	hists__filter_entry_by_symbol(hists, he);
499 500
}

501 502 503
static void __hists__collapse_resort(struct hists *hists, bool threaded)
{
	struct rb_root *root;
504 505 506
	struct rb_node *next;
	struct hist_entry *n;

507
	if (!sort__need_collapse && !threaded)
508 509
		return;

510 511
	root = hists__get_rotate_entries_in(hists);
	next = rb_first(root);
512

513
	while (next) {
514 515
		n = rb_entry(next, struct hist_entry, rb_node_in);
		next = rb_next(&n->rb_node_in);
516

517
		rb_erase(&n->rb_node_in, root);
518 519 520 521 522 523 524 525
		if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
			/*
			 * If it wasn't combined with one of the entries already
			 * collapsed, we need to apply the filters that may have
			 * been set by, say, the hist_browser.
			 */
			hists__apply_filters(hists, n);
		}
526
	}
527
}
528

529 530 531 532 533 534 535 536
void hists__collapse_resort(struct hists *hists)
{
	return __hists__collapse_resort(hists, false);
}

void hists__collapse_resort_threaded(struct hists *hists)
{
	return __hists__collapse_resort(hists, true);
537 538 539
}

/*
540
 * reverse the map, sort on period.
541 542
 */

543 544 545
static void __hists__insert_output_entry(struct rb_root *entries,
					 struct hist_entry *he,
					 u64 min_callchain_hits)
546
{
547
	struct rb_node **p = &entries->rb_node;
548 549 550
	struct rb_node *parent = NULL;
	struct hist_entry *iter;

551
	if (symbol_conf.use_callchain)
552
		callchain_param.sort(&he->sorted_chain, he->callchain,
553 554 555 556 557 558
				      min_callchain_hits, &callchain_param);

	while (*p != NULL) {
		parent = *p;
		iter = rb_entry(parent, struct hist_entry, rb_node);

559
		if (he->stat.period > iter->stat.period)
560 561 562 563 564 565
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	rb_link_node(&he->rb_node, parent, p);
566
	rb_insert_color(&he->rb_node, entries);
567 568
}

569
static void __hists__output_resort(struct hists *hists, bool threaded)
570
{
571
	struct rb_root *root;
572 573 574 575
	struct rb_node *next;
	struct hist_entry *n;
	u64 min_callchain_hits;

576
	min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
577

578 579 580 581 582 583 584
	if (sort__need_collapse || threaded)
		root = &hists->entries_collapsed;
	else
		root = hists->entries_in;

	next = rb_first(root);
	hists->entries = RB_ROOT;
585

586
	hists->nr_entries = 0;
587
	hists->stats.total_period = 0;
588
	hists__reset_col_len(hists);
589

590
	while (next) {
591 592
		n = rb_entry(next, struct hist_entry, rb_node_in);
		next = rb_next(&n->rb_node_in);
593

594
		__hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
595
		hists__inc_nr_entries(hists, n);
596
	}
597
}
598

599 600 601 602 603 604 605 606
void hists__output_resort(struct hists *hists)
{
	return __hists__output_resort(hists, false);
}

void hists__output_resort_threaded(struct hists *hists)
{
	return __hists__output_resort(hists, true);
607
}
608

609
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
610 611 612 613 614 615
				       enum hist_filter filter)
{
	h->filtered &= ~(1 << filter);
	if (h->filtered)
		return;

616
	++hists->nr_entries;
617
	if (h->ms.unfolded)
618
		hists->nr_entries += h->nr_rows;
619
	h->row_offset = 0;
620 621
	hists->stats.total_period += h->stat.period;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
622

623
	hists__calc_col_len(hists, h);
624 625
}

626 627 628 629 630 631 632 633 634 635 636 637 638

static bool hists__filter_entry_by_dso(struct hists *hists,
				       struct hist_entry *he)
{
	if (hists->dso_filter != NULL &&
	    (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
		he->filtered |= (1 << HIST_FILTER__DSO);
		return true;
	}

	return false;
}

639
void hists__filter_by_dso(struct hists *hists)
640 641 642
{
	struct rb_node *nd;

643 644 645
	hists->nr_entries = hists->stats.total_period = 0;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
	hists__reset_col_len(hists);
646

647
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
648 649 650 651 652
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (symbol_conf.exclude_other && !h->parent)
			continue;

653
		if (hists__filter_entry_by_dso(hists, h))
654 655
			continue;

656
		hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
657 658 659
	}
}

660 661 662 663 664 665 666 667 668 669 670 671
static bool hists__filter_entry_by_thread(struct hists *hists,
					  struct hist_entry *he)
{
	if (hists->thread_filter != NULL &&
	    he->thread != hists->thread_filter) {
		he->filtered |= (1 << HIST_FILTER__THREAD);
		return true;
	}

	return false;
}

672
void hists__filter_by_thread(struct hists *hists)
673 674 675
{
	struct rb_node *nd;

676 677 678
	hists->nr_entries = hists->stats.total_period = 0;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
	hists__reset_col_len(hists);
679

680
	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
681 682
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

683
		if (hists__filter_entry_by_thread(hists, h))
684
			continue;
685

686
		hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
687 688
	}
}
689

690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
static bool hists__filter_entry_by_symbol(struct hists *hists,
					  struct hist_entry *he)
{
	if (hists->symbol_filter_str != NULL &&
	    (!he->ms.sym || strstr(he->ms.sym->name,
				   hists->symbol_filter_str) == NULL)) {
		he->filtered |= (1 << HIST_FILTER__SYMBOL);
		return true;
	}

	return false;
}

void hists__filter_by_symbol(struct hists *hists)
{
	struct rb_node *nd;

	hists->nr_entries = hists->stats.total_period = 0;
	hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
	hists__reset_col_len(hists);

	for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);

		if (hists__filter_entry_by_symbol(hists, h))
			continue;

		hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
	}
}

721
int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
722
{
723
	return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
724 725
}

726
int hist_entry__annotate(struct hist_entry *he, size_t privsize)
727
{
728
	return symbol__annotate(he->ms.sym, he->ms.map, privsize);
729
}
730

731 732 733 734 735 736
void events_stats__inc(struct events_stats *stats, u32 type)
{
	++stats->nr_events[0];
	++stats->nr_events[type];
}

737
void hists__inc_nr_events(struct hists *hists, u32 type)
738
{
739
	events_stats__inc(&hists->stats, type);
740
}
741

742 743 744
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
						 struct hist_entry *pair)
{
745 746
	struct rb_root *root;
	struct rb_node **p;
747 748 749 750
	struct rb_node *parent = NULL;
	struct hist_entry *he;
	int cmp;

751 752 753 754 755 756 757
	if (sort__need_collapse)
		root = &hists->entries_collapsed;
	else
		root = hists->entries_in;

	p = &root->rb_node;

758 759
	while (*p != NULL) {
		parent = *p;
760
		he = rb_entry(parent, struct hist_entry, rb_node_in);
761

762
		cmp = hist_entry__collapse(he, pair);
763 764 765 766 767 768 769 770 771 772 773 774

		if (!cmp)
			goto out;

		if (cmp < 0)
			p = &(*p)->rb_left;
		else
			p = &(*p)->rb_right;
	}

	he = hist_entry__new(pair);
	if (he) {
775 776
		memset(&he->stat, 0, sizeof(he->stat));
		he->hists = hists;
777 778
		rb_link_node(&he->rb_node_in, parent, p);
		rb_insert_color(&he->rb_node_in, root);
779 780 781 782 783 784
		hists__inc_nr_entries(hists, he);
	}
out:
	return he;
}

785 786 787
static struct hist_entry *hists__find_entry(struct hists *hists,
					    struct hist_entry *he)
{
788 789 790 791 792 793
	struct rb_node *n;

	if (sort__need_collapse)
		n = hists->entries_collapsed.rb_node;
	else
		n = hists->entries_in->rb_node;
794 795

	while (n) {
796 797
		struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
		int64_t cmp = hist_entry__collapse(iter, he);
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814

		if (cmp < 0)
			n = n->rb_left;
		else if (cmp > 0)
			n = n->rb_right;
		else
			return iter;
	}

	return NULL;
}

/*
 * Look for pairs to link to the leader buckets (hist_entries):
 */
void hists__match(struct hists *leader, struct hists *other)
{
815
	struct rb_root *root;
816 817 818
	struct rb_node *nd;
	struct hist_entry *pos, *pair;

819 820 821 822 823 824 825
	if (sort__need_collapse)
		root = &leader->entries_collapsed;
	else
		root = leader->entries_in;

	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
826 827 828
		pair = hists__find_entry(other, pos);

		if (pair)
829
			hist_entry__add_pair(pair, pos);
830 831
	}
}
832 833 834 835 836 837 838 839

/*
 * Look for entries in the other hists that are not present in the leader, if
 * we find them, just add a dummy entry on the leader hists, with period=0,
 * nr_events=0, to serve as the list header.
 */
int hists__link(struct hists *leader, struct hists *other)
{
840
	struct rb_root *root;
841 842 843
	struct rb_node *nd;
	struct hist_entry *pos, *pair;

844 845 846 847 848 849 850
	if (sort__need_collapse)
		root = &other->entries_collapsed;
	else
		root = other->entries_in;

	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
		pos = rb_entry(nd, struct hist_entry, rb_node_in);
851 852 853 854 855

		if (!hist_entry__has_pairs(pos)) {
			pair = hists__add_dummy_entry(leader, pos);
			if (pair == NULL)
				return -1;
856
			hist_entry__add_pair(pos, pair);
857 858 859 860 861
		}
	}

	return 0;
}