hist.c 21.9 KB
Newer Older
1
#include <stdio.h>
2
#include <linux/string.h>
3 4 5 6

#include "../../util/util.h"
#include "../../util/hist.h"
#include "../../util/sort.h"
7
#include "../../util/evsel.h"
8
#include "../../util/srcline.h"
9
#include "../../util/string2.h"
10
#include "../../util/thread.h"
11
#include "../../util/sane_ctype.h"
12 13 14 15 16 17 18 19 20 21 22 23

static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
{
	int i;
	int ret = fprintf(fp, "            ");

	for (i = 0; i < left_margin; i++)
		ret += fprintf(fp, " ");

	return ret;
}

24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
static size_t inline__fprintf(struct map *map, u64 ip, int left_margin,
			      int depth, int depth_mask, FILE *fp)
{
	struct dso *dso;
	struct inline_node *node;
	struct inline_list *ilist;
	int ret = 0, i;

	if (map == NULL)
		return 0;

	dso = map->dso;
	if (dso == NULL)
		return 0;

	node = dso__parse_addr_inlines(dso,
				       map__rip_2objdump(map, ip));
	if (node == NULL)
		return 0;

	list_for_each_entry(ilist, &node->val, list) {
		if ((ilist->filename != NULL) || (ilist->funcname != NULL)) {
			ret += callchain__fprintf_left_margin(fp, left_margin);

			for (i = 0; i < depth; i++) {
				if (depth_mask & (1 << i))
					ret += fprintf(fp, "|");
				else
					ret += fprintf(fp, " ");
				ret += fprintf(fp, "          ");
			}

56 57
			if (callchain_param.key == CCKEY_ADDRESS ||
			    callchain_param.key == CCKEY_SRCLINE) {
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
				if (ilist->filename != NULL)
					ret += fprintf(fp, "%s:%d (inline)",
						       ilist->filename,
						       ilist->line_nr);
				else
					ret += fprintf(fp, "??");
			} else if (ilist->funcname != NULL)
				ret += fprintf(fp, "%s (inline)",
					       ilist->funcname);
			else if (ilist->filename != NULL)
				ret += fprintf(fp, "%s:%d (inline)",
					       ilist->filename,
					       ilist->line_nr);
			else
				ret += fprintf(fp, "??");

			ret += fprintf(fp, "\n");
		}
	}

	inline_node__delete(node);
	return ret;
}

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
					  int left_margin)
{
	int i;
	size_t ret = callchain__fprintf_left_margin(fp, left_margin);

	for (i = 0; i < depth; i++)
		if (depth_mask & (1 << i))
			ret += fprintf(fp, "|          ");
		else
			ret += fprintf(fp, "           ");

	ret += fprintf(fp, "\n");

	return ret;
}

99 100
static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
				     struct callchain_list *chain,
101
				     int depth, int depth_mask, int period,
102
				     u64 total_samples, int left_margin)
103 104 105
{
	int i;
	size_t ret = 0;
106 107 108
	char bf[1024], *alloc_str = NULL;
	char buf[64];
	const char *str;
109 110 111 112 113 114 115 116

	ret += callchain__fprintf_left_margin(fp, left_margin);
	for (i = 0; i < depth; i++) {
		if (depth_mask & (1 << i))
			ret += fprintf(fp, "|");
		else
			ret += fprintf(fp, " ");
		if (!period && i == depth - 1) {
117 118 119
			ret += fprintf(fp, "--");
			ret += callchain_node__fprintf_value(node, fp, total_samples);
			ret += fprintf(fp, "--");
120 121 122
		} else
			ret += fprintf(fp, "%s", "          ");
	}
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140

	str = callchain_list__sym_name(chain, bf, sizeof(bf), false);

	if (symbol_conf.show_branchflag_count) {
		if (!period)
			callchain_list_counts__printf_value(node, chain, NULL,
							    buf, sizeof(buf));
		else
			callchain_list_counts__printf_value(NULL, chain, NULL,
							    buf, sizeof(buf));

		if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
			str = "Not enough memory!";
		else
			str = alloc_str;
	}

	fputs(str, fp);
141
	fputc('\n', fp);
142
	free(alloc_str);
143 144 145 146

	if (symbol_conf.inline_name)
		ret += inline__fprintf(chain->ms.map, chain->ip,
				       left_margin, depth, depth_mask, fp);
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
	return ret;
}

static struct symbol *rem_sq_bracket;
static struct callchain_list rem_hits;

static void init_rem_hits(void)
{
	rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
	if (!rem_sq_bracket) {
		fprintf(stderr, "Not enough memory to display remaining hits\n");
		return;
	}

	strcpy(rem_sq_bracket->name, "[...]");
	rem_hits.ms.sym = rem_sq_bracket;
}

static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
					 u64 total_samples, int depth,
					 int depth_mask, int left_margin)
{
	struct rb_node *node, *next;
170
	struct callchain_node *child = NULL;
171 172 173 174 175 176
	struct callchain_list *chain;
	int new_depth_mask = depth_mask;
	u64 remaining;
	size_t ret = 0;
	int i;
	uint entries_printed = 0;
177
	int cumul_count = 0;
178 179 180 181 182 183 184 185 186 187 188

	remaining = total_samples;

	node = rb_first(root);
	while (node) {
		u64 new_total;
		u64 cumul;

		child = rb_entry(node, struct callchain_node, rb_node);
		cumul = callchain_cumul_hits(child);
		remaining -= cumul;
189
		cumul_count += callchain_cumul_counts(child);
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209

		/*
		 * The depth mask manages the output of pipes that show
		 * the depth. We don't want to keep the pipes of the current
		 * level for the last child of this depth.
		 * Except if we have remaining filtered hits. They will
		 * supersede the last child
		 */
		next = rb_next(node);
		if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
			new_depth_mask &= ~(1 << (depth - 1));

		/*
		 * But we keep the older depth mask for the line separator
		 * to keep the level link until we reach the last child
		 */
		ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
						   left_margin);
		i = 0;
		list_for_each_entry(chain, &child->val, list) {
210
			ret += ipchain__fprintf_graph(fp, child, chain, depth,
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
						      new_depth_mask, i++,
						      total_samples,
						      left_margin);
		}

		if (callchain_param.mode == CHAIN_GRAPH_REL)
			new_total = child->children_hit;
		else
			new_total = total_samples;

		ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
						  depth + 1,
						  new_depth_mask | (1 << depth),
						  left_margin);
		node = next;
		if (++entries_printed == callchain_param.print_limit)
			break;
	}

	if (callchain_param.mode == CHAIN_GRAPH_REL &&
		remaining && remaining != total_samples) {
232 233 234
		struct callchain_node rem_node = {
			.hit = remaining,
		};
235 236 237 238

		if (!rem_sq_bracket)
			return ret;

239 240 241 242 243 244
		if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
			rem_node.count = child->parent->children_count - cumul_count;
			if (rem_node.count <= 0)
				return ret;
		}

245
		new_depth_mask &= ~(1 << (depth - 1));
246
		ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
247
					      new_depth_mask, 0, total_samples,
248
					      left_margin);
249 250 251 252 253
	}

	return ret;
}

254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
/*
 * If have one single callchain root, don't bother printing
 * its percentage (100 % in fractal mode and the same percentage
 * than the hist in graph mode). This also avoid one level of column.
 *
 * However when percent-limit applied, it's possible that single callchain
 * node have different (non-100% in fractal mode) percentage.
 */
static bool need_percent_display(struct rb_node *node, u64 parent_samples)
{
	struct callchain_node *cnode;

	if (rb_next(node))
		return true;

	cnode = rb_entry(node, struct callchain_node, rb_node);
	return callchain_cumul_hits(cnode) != parent_samples;
}

273
static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
274 275
				       u64 total_samples, u64 parent_samples,
				       int left_margin)
276 277 278 279 280 281 282 283
{
	struct callchain_node *cnode;
	struct callchain_list *chain;
	u32 entries_printed = 0;
	bool printed = false;
	struct rb_node *node;
	int i = 0;
	int ret = 0;
284
	char bf[1024];
285 286

	node = rb_first(root);
287
	if (node && !need_percent_display(node, parent_samples)) {
288 289 290 291 292 293 294
		cnode = rb_entry(node, struct callchain_node, rb_node);
		list_for_each_entry(chain, &cnode->val, list) {
			/*
			 * If we sort by symbol, the first entry is the same than
			 * the symbol. No need to print it otherwise it appears as
			 * displayed twice.
			 */
295
			if (!i++ && field_order == NULL &&
296
			    sort_order && strstarts(sort_order, "sym"))
297
				continue;
298

299 300 301 302 303 304 305 306 307 308
			if (!printed) {
				ret += callchain__fprintf_left_margin(fp, left_margin);
				ret += fprintf(fp, "|\n");
				ret += callchain__fprintf_left_margin(fp, left_margin);
				ret += fprintf(fp, "---");
				left_margin += 3;
				printed = true;
			} else
				ret += callchain__fprintf_left_margin(fp, left_margin);

309 310 311 312 313 314 315 316 317
			ret += fprintf(fp, "%s",
				       callchain_list__sym_name(chain, bf,
								sizeof(bf),
								false));

			if (symbol_conf.show_branchflag_count)
				ret += callchain_list_counts__printf_value(
						NULL, chain, fp, NULL, 0);
			ret += fprintf(fp, "\n");
318 319 320

			if (++entries_printed == callchain_param.print_limit)
				break;
321 322 323 324 325 326 327

			if (symbol_conf.inline_name)
				ret += inline__fprintf(chain->ms.map,
						       chain->ip,
						       left_margin,
						       0, 0,
						       fp);
328 329 330 331
		}
		root = &cnode->rb_root;
	}

332 333 334
	if (callchain_param.mode == CHAIN_GRAPH_REL)
		total_samples = parent_samples;

335 336
	ret += __callchain__fprintf_graph(fp, root, total_samples,
					  1, 1, left_margin);
337 338 339 340
	if (ret) {
		/* do not add a blank line if it printed nothing */
		ret += fprintf(fp, "\n");
	}
341 342 343 344

	return ret;
}

345
static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
346 347 348 349
					u64 total_samples)
{
	struct callchain_list *chain;
	size_t ret = 0;
350
	char bf[1024];
351

352
	if (!node)
353 354
		return 0;

355
	ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
356 357


358
	list_for_each_entry(chain, &node->val, list) {
359 360
		if (chain->ip >= PERF_CONTEXT_MAX)
			continue;
361 362
		ret += fprintf(fp, "                %s\n", callchain_list__sym_name(chain,
					bf, sizeof(bf), false));
363 364 365 366 367
	}

	return ret;
}

368
static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
369 370 371 372 373
				      u64 total_samples)
{
	size_t ret = 0;
	u32 entries_printed = 0;
	struct callchain_node *chain;
374
	struct rb_node *rb_node = rb_first(tree);
375 376 377 378

	while (rb_node) {
		chain = rb_entry(rb_node, struct callchain_node, rb_node);

379 380 381
		ret += fprintf(fp, "           ");
		ret += callchain_node__fprintf_value(chain, fp, total_samples);
		ret += fprintf(fp, "\n");
382 383 384 385 386 387 388 389 390 391 392
		ret += __callchain__fprintf_flat(fp, chain, total_samples);
		ret += fprintf(fp, "\n");
		if (++entries_printed == callchain_param.print_limit)
			break;

		rb_node = rb_next(rb_node);
	}

	return ret;
}

393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
{
	const char *sep = symbol_conf.field_sep ?: ";";
	struct callchain_list *chain;
	size_t ret = 0;
	char bf[1024];
	bool first;

	if (!node)
		return 0;

	ret += __callchain__fprintf_folded(fp, node->parent);

	first = (ret == 0);
	list_for_each_entry(chain, &node->val, list) {
		if (chain->ip >= PERF_CONTEXT_MAX)
			continue;
		ret += fprintf(fp, "%s%s", first ? "" : sep,
			       callchain_list__sym_name(chain,
						bf, sizeof(bf), false));
		first = false;
	}

	return ret;
}

static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
					u64 total_samples)
{
	size_t ret = 0;
	u32 entries_printed = 0;
	struct callchain_node *chain;
	struct rb_node *rb_node = rb_first(tree);

	while (rb_node) {

		chain = rb_entry(rb_node, struct callchain_node, rb_node);

431 432
		ret += callchain_node__fprintf_value(chain, fp, total_samples);
		ret += fprintf(fp, " ");
433 434 435 436 437 438 439 440 441 442 443
		ret += __callchain__fprintf_folded(fp, chain);
		ret += fprintf(fp, "\n");
		if (++entries_printed == callchain_param.print_limit)
			break;

		rb_node = rb_next(rb_node);
	}

	return ret;
}

444 445 446 447
static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
					    u64 total_samples, int left_margin,
					    FILE *fp)
{
448 449 450 451 452
	u64 parent_samples = he->stat.period;

	if (symbol_conf.cumulate_callchain)
		parent_samples = he->stat_acc->period;

453 454
	switch (callchain_param.mode) {
	case CHAIN_GRAPH_REL:
455 456
		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
						parent_samples, left_margin);
457 458 459
		break;
	case CHAIN_GRAPH_ABS:
		return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
460
						parent_samples, left_margin);
461 462 463 464
		break;
	case CHAIN_FLAT:
		return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
		break;
465 466 467
	case CHAIN_FOLDED:
		return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
		break;
468 469 470 471 472 473 474 475 476
	case CHAIN_NONE:
		break;
	default:
		pr_err("Bad callchain mode\n");
	}

	return 0;
}

477 478
int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
			   struct perf_hpp_list *hpp_list)
479 480 481 482 483 484 485 486 487 488
{
	const char *sep = symbol_conf.field_sep;
	struct perf_hpp_fmt *fmt;
	char *start = hpp->buf;
	int ret;
	bool first = true;

	if (symbol_conf.exclude_other && !he->parent)
		return 0;

489
	perf_hpp_list__for_each_format(hpp_list, fmt) {
490
		if (perf_hpp__should_skip(fmt, he->hists))
491 492
			continue;

493 494 495 496 497 498 499 500 501 502
		/*
		 * If there's no field_sep, we still need
		 * to display initial '  '.
		 */
		if (!sep || !first) {
			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
			advance_hpp(hpp, ret);
		} else
			first = false;

503
		if (perf_hpp__use_color() && fmt->color)
504 505 506 507
			ret = fmt->color(fmt, hpp, he);
		else
			ret = fmt->entry(fmt, hpp, he);

508
		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
509 510 511 512 513 514
		advance_hpp(hpp, ret);
	}

	return hpp->buf - start;
}

515 516 517 518 519
static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
{
	return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
}

520 521
static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
					 struct perf_hpp *hpp,
522
					 struct hists *hists,
523 524 525 526
					 FILE *fp)
{
	const char *sep = symbol_conf.field_sep;
	struct perf_hpp_fmt *fmt;
527
	struct perf_hpp_list_node *fmt_node;
528
	char *buf = hpp->buf;
529
	size_t size = hpp->size;
530 531 532 533 534 535 536 537 538
	int ret, printed = 0;
	bool first = true;

	if (symbol_conf.exclude_other && !he->parent)
		return 0;

	ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
	advance_hpp(hpp, ret);

539 540 541 542
	/* the first hpp_list_node is for overhead columns */
	fmt_node = list_first_entry(&hists->hpp_formats,
				    struct perf_hpp_list_node, list);
	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
		/*
		 * If there's no field_sep, we still need
		 * to display initial '  '.
		 */
		if (!sep || !first) {
			ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: "  ");
			advance_hpp(hpp, ret);
		} else
			first = false;

		if (perf_hpp__use_color() && fmt->color)
			ret = fmt->color(fmt, hpp, he);
		else
			ret = fmt->entry(fmt, hpp, he);

		ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
		advance_hpp(hpp, ret);
	}

562
	if (!sep)
563
		ret = scnprintf(hpp->buf, hpp->size, "%*s",
564
				(hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
565 566
	advance_hpp(hpp, ret);

567 568
	printed += fprintf(fp, "%s", buf);

569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
	perf_hpp_list__for_each_format(he->hpp_list, fmt) {
		hpp->buf  = buf;
		hpp->size = size;

		/*
		 * No need to call hist_entry__snprintf_alignment() since this
		 * fmt is always the last column in the hierarchy mode.
		 */
		if (perf_hpp__use_color() && fmt->color)
			fmt->color(fmt, hpp, he);
		else
			fmt->entry(fmt, hpp, he);

		/*
		 * dynamic entries are right-aligned but we want left-aligned
		 * in the hierarchy mode
		 */
		printed += fprintf(fp, "%s%s", sep ?: "  ", ltrim(buf));
	}
	printed += putc('\n', fp);
589 590 591 592 593 594 595 596 597 598 599 600

	if (symbol_conf.use_callchain && he->leaf) {
		u64 total = hists__total_period(hists);

		printed += hist_entry_callchain__fprintf(he, total, 0, fp);
		goto out;
	}

out:
	return printed;
}

601
static int hist_entry__fprintf(struct hist_entry *he, size_t size,
602 603
			       char *bf, size_t bfsz, FILE *fp,
			       bool use_callchain)
604 605
{
	int ret;
606 607
	int callchain_ret = 0;
	int inline_ret = 0;
608 609 610 611
	struct perf_hpp hpp = {
		.buf		= bf,
		.size		= size,
	};
612
	struct hists *hists = he->hists;
613
	u64 total_period = hists->stats.total_period;
614

615 616
	if (size == 0 || size > bfsz)
		size = hpp.size = bfsz;
617

618 619
	if (symbol_conf.report_hierarchy)
		return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
620

621
	hist_entry__snprintf(he, &hpp);
622

623
	ret = fprintf(fp, "%s\n", bf);
624

625
	if (use_callchain)
626 627 628 629 630 631 632 633 634 635
		callchain_ret = hist_entry_callchain__fprintf(he, total_period,
							      0, fp);

	if (callchain_ret == 0 && symbol_conf.inline_name) {
		inline_ret = inline__fprintf(he->ms.map, he->ip, 0, 0, 0, fp);
		ret += inline_ret;
		if (inline_ret > 0)
			ret += fprintf(fp, "\n");
	} else
		ret += callchain_ret;
636

637
	return ret;
638 639
}

640
static int print_hierarchy_indent(const char *sep, int indent,
641 642
				  const char *line, FILE *fp)
{
643
	if (sep != NULL || indent < 2)
644 645
		return 0;

646
	return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
647 648
}

649 650
static int hists__fprintf_hierarchy_headers(struct hists *hists,
					    struct perf_hpp *hpp, FILE *fp)
651
{
652
	bool first_node, first_col;
653
	int indent;
654
	int depth;
655 656 657
	unsigned width = 0;
	unsigned header_width = 0;
	struct perf_hpp_fmt *fmt;
658
	struct perf_hpp_list_node *fmt_node;
659
	const char *sep = symbol_conf.field_sep;
660

661
	indent = hists->nr_hpp_node;
662 663

	/* preserve max indent depth for column headers */
664
	print_hierarchy_indent(sep, indent, spaces, fp);
665

666 667 668
	/* the first hpp_list_node is for overhead columns */
	fmt_node = list_first_entry(&hists->hpp_formats,
				    struct perf_hpp_list_node, list);
669

670
	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
671
		fmt->header(fmt, hpp, hists, 0, NULL);
672
		fprintf(fp, "%s%s", hpp->buf, sep ?: "  ");
673 674 675
	}

	/* combine sort headers with ' / ' */
676 677 678
	first_node = true;
	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
		if (!first_node)
679
			header_width += fprintf(fp, " / ");
680
		first_node = false;
681

682 683 684 685 686 687 688 689 690
		first_col = true;
		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
			if (perf_hpp__should_skip(fmt, hists))
				continue;

			if (!first_col)
				header_width += fprintf(fp, "+");
			first_col = false;

691
			fmt->header(fmt, hpp, hists, 0, NULL);
692

J
Jiri Olsa 已提交
693
			header_width += fprintf(fp, "%s", trim(hpp->buf));
694
		}
695 696 697 698 699
	}

	fprintf(fp, "\n# ");

	/* preserve max indent depth for initial dots */
700
	print_hierarchy_indent(sep, indent, dots, fp);
701

702 703 704
	/* the first hpp_list_node is for overhead columns */
	fmt_node = list_first_entry(&hists->hpp_formats,
				    struct perf_hpp_list_node, list);
705

706 707 708 709 710
	first_col = true;
	perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
		if (!first_col)
			fprintf(fp, "%s", sep ?: "..");
		first_col = false;
711

712
		width = fmt->width(fmt, hpp, hists);
713 714 715
		fprintf(fp, "%.*s", width, dots);
	}

716
	depth = 0;
717 718 719
	list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
		first_col = true;
		width = depth * HIERARCHY_INDENT;
720

721 722 723 724 725 726 727 728
		perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
			if (perf_hpp__should_skip(fmt, hists))
				continue;

			if (!first_col)
				width++;  /* for '+' sign between column header */
			first_col = false;

729
			width += fmt->width(fmt, hpp, hists);
730
		}
731

732 733
		if (width > header_width)
			header_width = width;
734 735

		depth++;
736 737 738 739 740 741 742 743 744
	}

	fprintf(fp, "%s%-.*s", sep ?: "  ", header_width, dots);

	fprintf(fp, "\n#\n");

	return 2;
}

745 746
static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
			 int line, FILE *fp)
747
{
748
	struct perf_hpp_fmt *fmt;
749
	const char *sep = symbol_conf.field_sep;
750
	bool first = true;
751
	int span = 0;
752

753
	hists__for_each_format(hists, fmt) {
754
		if (perf_hpp__should_skip(fmt, hists))
755 756
			continue;

757
		if (!first && !span)
758 759 760 761
			fprintf(fp, "%s", sep ?: "  ");
		else
			first = false;

762 763 764 765
		fmt->header(fmt, hpp, hists, line, &span);

		if (!span)
			fprintf(fp, "%s", hpp->buf);
766
	}
767
}
768

769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787
static int
hists__fprintf_standard_headers(struct hists *hists,
				struct perf_hpp *hpp,
				FILE *fp)
{
	struct perf_hpp_list *hpp_list = hists->hpp_list;
	struct perf_hpp_fmt *fmt;
	unsigned int width;
	const char *sep = symbol_conf.field_sep;
	bool first = true;
	int line;

	for (line = 0; line < hpp_list->nr_header_lines; line++) {
		/* first # is displayed one level up */
		if (line)
			fprintf(fp, "# ");
		fprintf_line(hists, hpp, line, fp);
		fprintf(fp, "\n");
	}
788 789

	if (sep)
790
		return hpp_list->nr_header_lines;
791

792 793
	first = true;

794 795
	fprintf(fp, "# ");

796
	hists__for_each_format(hists, fmt) {
797
		unsigned int i;
798

799
		if (perf_hpp__should_skip(fmt, hists))
800 801
			continue;

802
		if (!first)
803
			fprintf(fp, "%s", sep ?: "  ");
804 805
		else
			first = false;
806

807
		width = fmt->width(fmt, hpp, hists);
808 809
		for (i = 0; i < width; i++)
			fprintf(fp, ".");
810
	}
811

812 813
	fprintf(fp, "\n");
	fprintf(fp, "#\n");
814
	return hpp_list->nr_header_lines + 2;
J
Jiri Olsa 已提交
815 816
}

817
int hists__fprintf_headers(struct hists *hists, FILE *fp)
818
{
819
	char bf[1024];
820 821 822 823 824 825 826 827 828 829 830 831 832 833
	struct perf_hpp dummy_hpp = {
		.buf	= bf,
		.size	= sizeof(bf),
	};

	fprintf(fp, "# ");

	if (symbol_conf.report_hierarchy)
		return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
	else
		return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);

}

J
Jiri Olsa 已提交
834
size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
835 836
		      int max_cols, float min_pcnt, FILE *fp,
		      bool use_callchain)
J
Jiri Olsa 已提交
837 838 839 840 841 842 843 844 845 846 847
{
	struct rb_node *nd;
	size_t ret = 0;
	const char *sep = symbol_conf.field_sep;
	int nr_rows = 0;
	size_t linesz;
	char *line = NULL;
	unsigned indent;

	init_rem_hits();

848
	hists__reset_column_width(hists);
J
Jiri Olsa 已提交
849 850 851 852 853 854 855 856

	if (symbol_conf.col_width_list_str)
		perf_hpp__set_user_width(symbol_conf.col_width_list_str);

	if (show_header)
		nr_rows += hists__fprintf_headers(hists, fp);

	if (max_rows && nr_rows >= max_rows)
857 858
		goto out;

859
	linesz = hists__sort_list_width(hists) + 3 + 1;
860
	linesz += perf_hpp__color_overhead();
861 862 863 864 865 866
	line = malloc(linesz);
	if (line == NULL) {
		ret = -1;
		goto out;
	}

867 868
	indent = hists__overhead_width(hists) + 4;

869
	for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
870
		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
871
		float percent;
872 873 874 875

		if (h->filtered)
			continue;

876
		percent = hist_entry__get_percent_limit(h);
877 878 879
		if (percent < min_pcnt)
			continue;

880
		ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, use_callchain);
881 882

		if (max_rows && ++nr_rows >= max_rows)
883
			break;
884

885 886 887 888 889
		/*
		 * If all children are filtered out or percent-limited,
		 * display "no entry >= x.xx%" message.
		 */
		if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
890
			int depth = hists->nr_hpp_node + h->depth + 1;
891

892
			print_hierarchy_indent(sep, depth, spaces, fp);
893 894 895 896 897 898
			fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);

			if (max_rows && ++nr_rows >= max_rows)
				break;
		}

899
		if (h->ms.map == NULL && verbose > 1) {
900
			__map_groups__fprintf_maps(h->thread->mg,
901
						   MAP__FUNCTION, fp);
902 903 904
			fprintf(fp, "%.10s end\n", graph_dotted_line);
		}
	}
905 906

	free(line);
907
out:
908
	zfree(&rem_sq_bracket);
909 910 911 912

	return ret;
}

913
size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
914 915 916 917 918 919 920
{
	int i;
	size_t ret = 0;

	for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
		const char *name;

921
		if (stats->nr_events[i] == 0)
922 923 924 925 926 927 928
			continue;

		name = perf_event__name(i);
		if (!strcmp(name, "UNKNOWN"))
			continue;

		ret += fprintf(fp, "%16s events: %10d\n", name,
929
			       stats->nr_events[i]);
930 931 932 933
	}

	return ret;
}