builtin-top.c 35.9 KB
Newer Older
1
/*
2 3 4 5 6 7
 * builtin-top.c
 *
 * Builtin top command: Display a continuously updated profile of
 * any workload, CPU or specific PID.
 *
 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
8
 *		 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
9 10 11 12 13 14 15 16 17 18
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
 *
 * Released under the GPL v2. (and only v2, not any later version)
19
 */
20
#include "builtin.h"
21

22
#include "perf.h"
23

24
#include "util/annotate.h"
25
#include "util/config.h"
26
#include "util/color.h"
27
#include "util/drv_configs.h"
28
#include "util/evlist.h"
29
#include "util/evsel.h"
30
#include "util/event.h"
31
#include "util/machine.h"
32 33
#include "util/session.h"
#include "util/symbol.h"
34
#include "util/thread.h"
35
#include "util/thread_map.h"
36
#include "util/top.h"
37
#include <linux/rbtree.h>
38
#include <subcmd/parse-options.h>
39
#include "util/parse-events.h"
40
#include "util/cpumap.h"
41
#include "util/xyarray.h"
42
#include "util/sort.h"
43
#include "util/term.h"
44
#include "util/intlist.h"
45
#include "util/parse-branch-options.h"
46
#include "arch/common.h"
47

48 49
#include "util/debug.h"

50
#include <assert.h>
51
#include <elf.h>
52
#include <fcntl.h>
53

54
#include <stdio.h>
55 56
#include <termios.h>
#include <unistd.h>
57
#include <inttypes.h>
58

59 60 61
#include <errno.h>
#include <time.h>
#include <sched.h>
62
#include <signal.h>
63 64 65

#include <sys/syscall.h>
#include <sys/ioctl.h>
66
#include <poll.h>
67 68 69
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
70
#include <sys/utsname.h>
71 72
#include <sys/mman.h>

73
#include <linux/stringify.h>
74
#include <linux/time64.h>
75 76
#include <linux/types.h>

77 78
#include "sane_ctype.h"

79
static volatile int done;
80
static volatile int resize;
81

N
Namhyung Kim 已提交
82 83
#define HEADER_LINE_NR  5

84
static void perf_top__update_print_entries(struct perf_top *top)
85
{
N
Namhyung Kim 已提交
86
	top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
87 88
}

89
static void winch_sig(int sig __maybe_unused)
90
{
91 92
	resize = 1;
}
93

94 95
static void perf_top__resize(struct perf_top *top)
{
96 97
	get_term_dimensions(&top->winsize);
	perf_top__update_print_entries(top);
98 99
}

100
static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
101 102
{
	struct symbol *sym;
103
	struct annotation *notes;
104
	struct map *map;
105
	int err = -1;
106

107
	if (!he || !he->ms.sym)
108 109
		return -1;

110 111
	sym = he->ms.sym;
	map = he->ms.map;
112 113 114 115

	/*
	 * We can't annotate with just /proc/kallsyms
	 */
116 117
	if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
	    !dso__is_kcore(map->dso)) {
118 119 120
		pr_err("Can't annotate %s: No vmlinux file was found in the "
		       "path\n", sym->name);
		sleep(1);
121
		return -1;
122 123
	}

124 125 126
	notes = symbol__annotation(sym);
	if (notes->src != NULL) {
		pthread_mutex_lock(&notes->lock);
127 128 129
		goto out_assign;
	}

130
	pthread_mutex_lock(&notes->lock);
131

132
	if (symbol__alloc_hist(sym) < 0) {
133
		pthread_mutex_unlock(&notes->lock);
134 135
		pr_err("Not enough memory for annotating '%s' symbol!\n",
		       sym->name);
136
		sleep(1);
137
		return err;
138
	}
139

140
	err = symbol__disassemble(sym, map, NULL, 0, NULL, NULL);
141
	if (err == 0) {
142
out_assign:
143
		top->sym_filter_entry = he;
144 145 146 147
	} else {
		char msg[BUFSIZ];
		symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
		pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
148
	}
149

150
	pthread_mutex_unlock(&notes->lock);
151
	return err;
152 153
}

154
static void __zero_source_counters(struct hist_entry *he)
155
{
156
	struct symbol *sym = he->ms.sym;
157
	symbol__annotate_zero_histograms(sym);
158 159
}

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
{
	struct utsname uts;
	int err = uname(&uts);

	ui__warning("Out of bounds address found:\n\n"
		    "Addr:   %" PRIx64 "\n"
		    "DSO:    %s %c\n"
		    "Map:    %" PRIx64 "-%" PRIx64 "\n"
		    "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
		    "Arch:   %s\n"
		    "Kernel: %s\n"
		    "Tools:  %s\n\n"
		    "Not all samples will be on the annotation output.\n\n"
		    "Please report to linux-kernel@vger.kernel.org\n",
		    ip, map->dso->long_name, dso__symtab_origin(map->dso),
		    map->start, map->end, sym->start, sym->end,
		    sym->binding == STB_GLOBAL ? 'g' :
		    sym->binding == STB_LOCAL  ? 'l' : 'w', sym->name,
		    err ? "[unknown]" : uts.machine,
		    err ? "[unknown]" : uts.release, perf_version_string);
	if (use_browser <= 0)
		sleep(5);
183

184 185 186
	map->erange_warned = true;
}

187 188
static void perf_top__record_precise_ip(struct perf_top *top,
					struct hist_entry *he,
189
					struct perf_sample *sample,
190
					int counter, u64 ip)
191
{
192
	struct annotation *notes;
193
	struct symbol *sym = he->ms.sym;
194
	int err = 0;
195

196 197 198
	if (sym == NULL || (use_browser == 0 &&
			    (top->sym_filter_entry == NULL ||
			     top->sym_filter_entry->ms.sym != sym)))
199 200
		return;

201 202 203
	notes = symbol__annotation(sym);

	if (pthread_mutex_trylock(&notes->lock))
204 205
		return;

206
	err = hist_entry__inc_addr_samples(he, sample, counter, ip);
207

208
	pthread_mutex_unlock(&notes->lock);
209

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
	if (unlikely(err)) {
		/*
		 * This function is now called with he->hists->lock held.
		 * Release it before going to sleep.
		 */
		pthread_mutex_unlock(&he->hists->lock);

		if (err == -ERANGE && !he->ms.map->erange_warned)
			ui__warn_map_erange(he->ms.map, sym, ip);
		else if (err == -ENOMEM) {
			pr_err("Not enough memory for annotating '%s' symbol!\n",
			       sym->name);
			sleep(1);
		}

		pthread_mutex_lock(&he->hists->lock);
226
	}
227 228
}

229
static void perf_top__show_details(struct perf_top *top)
230
{
231
	struct hist_entry *he = top->sym_filter_entry;
232
	struct annotation *notes;
233
	struct symbol *symbol;
234
	int more;
235

236
	if (!he)
237 238
		return;

239
	symbol = he->ms.sym;
240 241 242 243 244 245
	notes = symbol__annotation(symbol);

	pthread_mutex_lock(&notes->lock);

	if (notes->src == NULL)
		goto out_unlock;
246

247
	printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
248
	printf("  Events  Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
249

250
	more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel,
251
				       0, top->sym_pcnt_filter, top->print_entries, 4);
252 253 254 255 256 257 258

	if (top->evlist->enabled) {
		if (top->zero)
			symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
		else
			symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
	}
259
	if (more != 0)
260
		printf("%d lines not displayed, maybe increase display entries [e]\n", more);
261 262
out_unlock:
	pthread_mutex_unlock(&notes->lock);
263
}
264

265
static void perf_top__print_sym_table(struct perf_top *top)
266
{
267 268
	char bf[160];
	int printed = 0;
269
	const int win_width = top->winsize.ws_col - 1;
270 271
	struct perf_evsel *evsel = top->sym_evsel;
	struct hists *hists = evsel__hists(evsel);
272

273
	puts(CONSOLE_CLEAR);
274

275
	perf_top__header_snprintf(top, bf, sizeof(bf));
276
	printf("%s\n", bf);
277

278
	perf_top__reset_sample_counters(top);
279

280
	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
281

282 283 284 285
	if (hists->stats.nr_lost_warned !=
	    hists->stats.nr_events[PERF_RECORD_LOST]) {
		hists->stats.nr_lost_warned =
			      hists->stats.nr_events[PERF_RECORD_LOST];
286 287
		color_fprintf(stdout, PERF_COLOR_RED,
			      "WARNING: LOST %d chunks, Check IO/CPU overload",
288
			      hists->stats.nr_lost_warned);
289
		++printed;
290 291
	}

292 293
	if (top->sym_filter_entry) {
		perf_top__show_details(top);
294 295 296
		return;
	}

297 298 299 300 301 302 303
	if (top->evlist->enabled) {
		if (top->zero) {
			hists__delete_entries(hists);
		} else {
			hists__decay_entries(hists, top->hide_user_symbols,
					     top->hide_kernel_symbols);
		}
N
Namhyung Kim 已提交
304 305
	}

306
	hists__collapse_resort(hists, NULL);
307
	perf_evsel__output_resort(evsel, NULL);
N
Namhyung Kim 已提交
308

309
	hists__output_recalc_col_len(hists, top->print_entries - printed);
310
	putchar('\n');
311
	hists__fprintf(hists, false, top->print_entries - printed, win_width,
312
		       top->min_percent, stdout, symbol_conf.use_callchain);
313 314
}

315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
static void prompt_integer(int *target, const char *msg)
{
	char *buf = malloc(0), *p;
	size_t dummy = 0;
	int tmp;

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		return;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	p = buf;
	while(*p) {
		if (!isdigit(*p))
			goto out_free;
		p++;
	}
	tmp = strtoul(buf, NULL, 10);
	*target = tmp;
out_free:
	free(buf);
}

static void prompt_percent(int *target, const char *msg)
{
	int tmp = 0;

	prompt_integer(&tmp, msg);
	if (tmp >= 0 && tmp <= 100)
		*target = tmp;
}

350
static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
351 352
{
	char *buf = malloc(0), *p;
353
	struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
354
	struct hists *hists = evsel__hists(top->sym_evsel);
355
	struct rb_node *next;
356 357 358 359 360
	size_t dummy = 0;

	/* zero counters of active symbol */
	if (syme) {
		__zero_source_counters(syme);
361
		top->sym_filter_entry = NULL;
362 363 364 365 366 367 368 369 370 371
	}

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		goto out_free;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

372
	next = rb_first(&hists->entries);
373 374 375 376
	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
			found = n;
377 378
			break;
		}
379
		next = rb_next(&n->rb_node);
380 381 382
	}

	if (!found) {
383
		fprintf(stderr, "Sorry, %s is not active.\n", buf);
384 385
		sleep(1);
	} else
386
		perf_top__parse_source(top, found);
387 388 389 390 391

out_free:
	free(buf);
}

392
static void perf_top__print_mapped_keys(struct perf_top *top)
393
{
394 395
	char *name = NULL;

396 397
	if (top->sym_filter_entry) {
		struct symbol *sym = top->sym_filter_entry->ms.sym;
398 399 400 401
		name = sym->name;
	}

	fprintf(stdout, "\nMapped keys:\n");
402 403
	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", top->delay_secs);
	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", top->print_entries);
404

405
	if (top->evlist->nr_entries > 1)
406
		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", perf_evsel__name(top->sym_evsel));
407

408
	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", top->count_filter);
409

410
	fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter);
411 412
	fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
	fprintf(stdout, "\t[S]     stop annotation.\n");
413

414
	fprintf(stdout,
415
		"\t[K]     hide kernel_symbols symbols.     \t(%s)\n",
416
		top->hide_kernel_symbols ? "yes" : "no");
417 418
	fprintf(stdout,
		"\t[U]     hide user symbols.               \t(%s)\n",
419 420
		top->hide_user_symbols ? "yes" : "no");
	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", top->zero ? 1 : 0);
421 422 423
	fprintf(stdout, "\t[qQ]    quit.\n");
}

424
static int perf_top__key_mapped(struct perf_top *top, int c)
425 426 427 428 429 430 431 432
{
	switch (c) {
		case 'd':
		case 'e':
		case 'f':
		case 'z':
		case 'q':
		case 'Q':
433 434
		case 'K':
		case 'U':
435 436 437
		case 'F':
		case 's':
		case 'S':
438 439
			return 1;
		case 'E':
440
			return top->evlist->nr_entries > 1 ? 1 : 0;
441 442
		default:
			break;
443 444 445
	}

	return 0;
446 447
}

448
static bool perf_top__handle_keypress(struct perf_top *top, int c)
449
{
450 451
	bool ret = true;

452
	if (!perf_top__key_mapped(top, c)) {
453
		struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
454
		struct termios save;
455

456
		perf_top__print_mapped_keys(top);
457 458 459
		fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
		fflush(stdout);

460
		set_term_quiet_input(&save);
461 462 463 464 465

		poll(&stdin_poll, 1, -1);
		c = getc(stdin);

		tcsetattr(0, TCSAFLUSH, &save);
466
		if (!perf_top__key_mapped(top, c))
467
			return ret;
468 469
	}

470 471
	switch (c) {
		case 'd':
472 473 474
			prompt_integer(&top->delay_secs, "Enter display delay");
			if (top->delay_secs < 1)
				top->delay_secs = 1;
475 476
			break;
		case 'e':
477 478
			prompt_integer(&top->print_entries, "Enter display entries (lines)");
			if (top->print_entries == 0) {
479
				perf_top__resize(top);
480
				signal(SIGWINCH, winch_sig);
481
			} else {
482
				signal(SIGWINCH, SIG_DFL);
483
			}
484 485
			break;
		case 'E':
486
			if (top->evlist->nr_entries > 1) {
487 488 489
				/* Select 0 as the default event: */
				int counter = 0;

490
				fprintf(stderr, "\nAvailable events:");
491

492
				evlist__for_each_entry(top->evlist, top->sym_evsel)
493
					fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
494

495
				prompt_integer(&counter, "Enter details event counter");
496

497
				if (counter >= top->evlist->nr_entries) {
498
					top->sym_evsel = perf_evlist__first(top->evlist);
499
					fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
500
					sleep(1);
501
					break;
502
				}
503
				evlist__for_each_entry(top->evlist, top->sym_evsel)
504
					if (top->sym_evsel->idx == counter)
505
						break;
506
			} else
507
				top->sym_evsel = perf_evlist__first(top->evlist);
508 509
			break;
		case 'f':
510
			prompt_integer(&top->count_filter, "Enter display event count filter");
511 512
			break;
		case 'F':
513 514
			prompt_percent(&top->sym_pcnt_filter,
				       "Enter details display event filter (percent)");
515
			break;
516
		case 'K':
517
			top->hide_kernel_symbols = !top->hide_kernel_symbols;
518
			break;
519 520 521
		case 'q':
		case 'Q':
			printf("exiting.\n");
522 523
			if (top->dump_symtab)
				perf_session__fprintf_dsos(top->session, stderr);
524 525
			ret = false;
			break;
526
		case 's':
527
			perf_top__prompt_symbol(top, "Enter details symbol");
528 529
			break;
		case 'S':
530
			if (!top->sym_filter_entry)
531 532
				break;
			else {
533
				struct hist_entry *syme = top->sym_filter_entry;
534

535
				top->sym_filter_entry = NULL;
536 537 538
				__zero_source_counters(syme);
			}
			break;
539
		case 'U':
540
			top->hide_user_symbols = !top->hide_user_symbols;
541
			break;
542
		case 'z':
543
			top->zero = !top->zero;
544
			break;
545 546
		default:
			break;
547
	}
548 549

	return ret;
550 551
}

552 553 554
static void perf_top__sort_new_samples(void *arg)
{
	struct perf_top *t = arg;
555
	struct perf_evsel *evsel = t->sym_evsel;
556 557
	struct hists *hists;

558 559 560 561 562
	perf_top__reset_sample_counters(t);

	if (t->evlist->selected != NULL)
		t->sym_evsel = t->evlist->selected;

563
	hists = evsel__hists(evsel);
564

565 566 567 568 569 570 571
	if (t->evlist->enabled) {
		if (t->zero) {
			hists__delete_entries(hists);
		} else {
			hists__decay_entries(hists, t->hide_user_symbols,
					     t->hide_kernel_symbols);
		}
N
Namhyung Kim 已提交
572 573
	}

574
	hists__collapse_resort(hists, NULL);
575
	perf_evsel__output_resort(evsel, NULL);
576 577
}

578
static void *display_thread_tui(void *arg)
579
{
580
	struct perf_evsel *pos;
581
	struct perf_top *top = arg;
582
	const char *help = "For a higher level overview, try: perf top --sort comm,dso";
583 584 585 586 587
	struct hist_browser_timer hbt = {
		.timer		= perf_top__sort_new_samples,
		.arg		= top,
		.refresh	= top->delay_secs,
	};
588

589 590 591 592 593 594 595
	/* In order to read symbols from other namespaces perf to  needs to call
	 * setns(2).  This isn't permitted if the struct_fs has multiple users.
	 * unshare(2) the fs so that we may continue to setns into namespaces
	 * that we're observing.
	 */
	unshare(CLONE_FS);

596
	perf_top__sort_new_samples(top);
597 598 599 600 601 602

	/*
	 * Initialize the uid_filter_str, in the future the TUI will allow
	 * Zooming in/out UIDs. For now juse use whatever the user passed
	 * via --uid.
	 */
603
	evlist__for_each_entry(top->evlist, pos) {
604 605 606
		struct hists *hists = evsel__hists(pos);
		hists->uid_filter_str = top->record_opts.target.uid_str;
	}
607

608 609 610
	perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
				      top->min_percent,
				      &top->session->header.env);
611

612
	done = 1;
613 614 615
	return NULL;
}

616 617 618 619 620 621 622
static void display_sig(int sig __maybe_unused)
{
	done = 1;
}

static void display_setup_sig(void)
{
623 624
	signal(SIGSEGV, sighandler_dump_stack);
	signal(SIGFPE, sighandler_dump_stack);
625 626 627 628 629
	signal(SIGINT,  display_sig);
	signal(SIGQUIT, display_sig);
	signal(SIGTERM, display_sig);
}

630
static void *display_thread(void *arg)
631
{
632
	struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
633
	struct termios save;
634
	struct perf_top *top = arg;
635 636
	int delay_msecs, c;

637 638 639 640 641 642 643
	/* In order to read symbols from other namespaces perf to  needs to call
	 * setns(2).  This isn't permitted if the struct_fs has multiple users.
	 * unshare(2) the fs so that we may continue to setns into namespaces
	 * that we're observing.
	 */
	unshare(CLONE_FS);

644
	display_setup_sig();
645
	pthread__unblock_sigwinch();
646
repeat:
647
	delay_msecs = top->delay_secs * MSEC_PER_SEC;
648
	set_term_quiet_input(&save);
649 650
	/* trash return*/
	getc(stdin);
651

652
	while (!done) {
653
		perf_top__print_sym_table(top);
654 655 656 657 658 659 660 661 662 663
		/*
		 * Either timeout expired or we got an EINTR due to SIGWINCH,
		 * refresh screen in both cases.
		 */
		switch (poll(&stdin_poll, 1, delay_msecs)) {
		case 0:
			continue;
		case -1:
			if (errno == EINTR)
				continue;
664
			__fallthrough;
665
		default:
666 667 668 669 670 671
			c = getc(stdin);
			tcsetattr(0, TCSAFLUSH, &save);

			if (perf_top__handle_keypress(top, c))
				goto repeat;
			done = 1;
672 673
		}
	}
674

675
	tcsetattr(0, TCSAFLUSH, &save);
676 677 678
	return NULL;
}

679 680 681 682 683 684 685 686
static int hist_iter__top_callback(struct hist_entry_iter *iter,
				   struct addr_location *al, bool single,
				   void *arg)
{
	struct perf_top *top = arg;
	struct hist_entry *he = iter->he;
	struct perf_evsel *evsel = iter->evsel;

687
	if (perf_hpp_list.sym && single)
688
		perf_top__record_precise_ip(top, he, iter->sample, evsel->idx, al->addr);
689

690 691
	hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
		     !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
692 693 694
	return 0;
}

695 696
static void perf_event__process_sample(struct perf_tool *tool,
				       const union perf_event *event,
697
				       struct perf_evsel *evsel,
698
				       struct perf_sample *sample,
699
				       struct machine *machine)
700
{
701
	struct perf_top *top = container_of(tool, struct perf_top, tool);
702
	struct addr_location al;
703
	int err;
704

705
	if (!machine && perf_guest) {
706 707 708
		static struct intlist *seen;

		if (!seen)
709
			seen = intlist__new(NULL);
710

711
		if (!intlist__has_entry(seen, sample->pid)) {
712
			pr_err("Can't find guest [%d]'s kernel information\n",
713 714
				sample->pid);
			intlist__add(seen, sample->pid);
715
		}
716 717 718
		return;
	}

719
	if (!machine) {
720
		pr_err("%u unprocessable samples recorded.\r",
721
		       top->session->evlist->stats.nr_unprocessable_samples++);
722 723 724
		return;
	}

725
	if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
726
		top->exact_samples++;
727

728
	if (machine__resolve(machine, &al, sample) < 0)
729
		return;
730

731
	if (!machine->kptr_restrict_warned &&
732 733
	    symbol_conf.kptr_restrict &&
	    al.cpumode == PERF_RECORD_MISC_KERNEL) {
734 735
		if (!perf_evlist__exclude_kernel(top->session->evlist)) {
			ui__warning(
736 737 738
"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
"Check /proc/sys/kernel/kptr_restrict.\n\n"
"Kernel%s samples will not be resolved.\n",
739
			  al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
740
			  " modules" : "");
741 742 743
			if (use_browser <= 0)
				sleep(5);
		}
744
		machine->kptr_restrict_warned = true;
745 746
	}

747
	if (al.sym == NULL) {
748
		const char *msg = "Kernel samples will not be resolved.\n";
749 750 751 752 753 754 755 756 757 758 759
		/*
		 * As we do lazy loading of symtabs we only will know if the
		 * specified vmlinux file is invalid when we actually have a
		 * hit in kernel space and then try to load it. So if we get
		 * here and there are _no_ symbols in the DSO backing the
		 * kernel map, bail out.
		 *
		 * We may never get here, for instance, if we use -K/
		 * --hide-kernel-symbols, even if the user specifies an
		 * invalid --vmlinux ;-)
		 */
760
		if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
761
		    al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
762
		    RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
763
			if (symbol_conf.vmlinux_name) {
764 765 766 767
				char serr[256];
				dso__strerror_load(al.map->dso, serr, sizeof(serr));
				ui__warning("The %s file can't be used: %s\n%s",
					    symbol_conf.vmlinux_name, serr, msg);
768 769 770 771 772 773 774
			} else {
				ui__warning("A vmlinux file was not found.\n%s",
					    msg);
			}

			if (use_browser <= 0)
				sleep(5);
775
			top->vmlinux_warned = true;
776
		}
777 778
	}

779
	if (al.sym == NULL || !al.sym->idle) {
780
		struct hists *hists = evsel__hists(evsel);
781
		struct hist_entry_iter iter = {
782 783 784
			.evsel		= evsel,
			.sample 	= sample,
			.add_entry_cb 	= hist_iter__top_callback,
785
		};
786

787 788 789 790
		if (symbol_conf.cumulate_callchain)
			iter.ops = &hist_iter_cumulative;
		else
			iter.ops = &hist_iter_normal;
791

792
		pthread_mutex_lock(&hists->lock);
793

794
		err = hist_entry_iter__add(&iter, &al, top->max_stack, top);
795 796
		if (err < 0)
			pr_err("Problem incrementing symbol period, skipping event\n");
797

798
		pthread_mutex_unlock(&hists->lock);
799
	}
800

801
	addr_location__put(&al);
802 803
}

804
static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
805
{
806
	struct perf_sample sample;
807
	struct perf_evsel *evsel;
808
	struct perf_session *session = top->session;
809
	union perf_event *event;
810
	struct machine *machine;
811
	int ret;
812

813
	while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
814
		ret = perf_evlist__parse_sample(top->evlist, event, &sample);
815 816
		if (ret) {
			pr_err("Can't parse sample, err = %d\n", ret);
817
			goto next_event;
818
		}
819

820
		evsel = perf_evlist__id2evsel(session->evlist, sample.id);
821 822
		assert(evsel != NULL);

823
		if (event->header.type == PERF_RECORD_SAMPLE)
824
			++top->samples;
825

826
		switch (sample.cpumode) {
827
		case PERF_RECORD_MISC_USER:
828 829
			++top->us_samples;
			if (top->hide_user_symbols)
830
				goto next_event;
831
			machine = &session->machines.host;
832 833
			break;
		case PERF_RECORD_MISC_KERNEL:
834 835
			++top->kernel_samples;
			if (top->hide_kernel_symbols)
836
				goto next_event;
837
			machine = &session->machines.host;
838 839
			break;
		case PERF_RECORD_MISC_GUEST_KERNEL:
840
			++top->guest_kernel_samples;
841 842
			machine = perf_session__find_machine(session,
							     sample.pid);
843 844
			break;
		case PERF_RECORD_MISC_GUEST_USER:
845
			++top->guest_us_samples;
846 847 848 849
			/*
			 * TODO: we don't process guest user from host side
			 * except simple counting.
			 */
850
			goto next_event;
851 852 853 854 855
		default:
			if (event->header.type == PERF_RECORD_SAMPLE)
				goto next_event;
			machine = &session->machines.host;
			break;
856 857 858
		}


859 860 861 862
		if (event->header.type == PERF_RECORD_SAMPLE) {
			perf_event__process_sample(&top->tool, event, evsel,
						   &sample, machine);
		} else if (event->header.type < PERF_RECORD_MAX) {
863
			hists__inc_nr_events(evsel__hists(evsel), event->header.type);
864
			machine__process_event(machine, event, &sample);
865
		} else
866
			++session->evlist->stats.nr_unknown_events;
867 868
next_event:
		perf_evlist__mmap_consume(top->evlist, idx);
869 870 871
	}
}

872
static void perf_top__mmap_read(struct perf_top *top)
873
{
874 875
	int i;

876 877
	for (i = 0; i < top->evlist->nr_mmaps; i++)
		perf_top__mmap_read_idx(top, i);
878 879
}

880
static int perf_top__start_counters(struct perf_top *top)
881
{
882
	char msg[BUFSIZ];
883
	struct perf_evsel *counter;
884
	struct perf_evlist *evlist = top->evlist;
885
	struct record_opts *opts = &top->record_opts;
886

887
	perf_evlist__config(evlist, opts, &callchain_param);
888

889
	evlist__for_each_entry(evlist, counter) {
890
try_again:
891
		if (perf_evsel__open(counter, top->evlist->cpus,
892
				     top->evlist->threads) < 0) {
893
			if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
894
				if (verbose > 0)
895
					ui__warning("%s\n", msg);
896 897
				goto try_again;
			}
898

899 900 901
			perf_evsel__open_strerror(counter, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
902
			goto out_err;
903
		}
904
	}
905

906
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
907
		ui__error("Failed to mmap with %d (%s)\n",
908
			    errno, str_error_r(errno, msg, sizeof(msg)));
909 910 911
		goto out_err;
	}

912
	return 0;
913 914

out_err:
915
	return -1;
916 917
}

918
static int callchain_param__setup_sample_type(struct callchain_param *callchain)
919
{
920
	if (!perf_hpp_list.sym) {
921
		if (callchain->enabled) {
922
			ui__error("Selected -g but \"sym\" not present in --sort/-s.");
923 924
			return -EINVAL;
		}
925 926
	} else if (callchain->mode != CHAIN_NONE) {
		if (callchain_register_param(callchain) < 0) {
927
			ui__error("Can't register callchain params.\n");
928 929 930 931 932 933 934
			return -EINVAL;
		}
	}

	return 0;
}

935
static int __cmd_top(struct perf_top *top)
936
{
937 938 939 940
	char msg[512];
	struct perf_evsel *pos;
	struct perf_evsel_config_term *err_term;
	struct perf_evlist *evlist = top->evlist;
941
	struct record_opts *opts = &top->record_opts;
942
	pthread_t thread;
943
	int ret;
944

945
	top->session = perf_session__new(NULL, false, NULL);
946
	if (top->session == NULL)
947
		return -1;
948

949
	if (!objdump_path) {
950
		ret = perf_env__lookup_objdump(&top->session->header.env);
951 952 953 954
		if (ret)
			goto out_delete;
	}

955
	ret = callchain_param__setup_sample_type(&callchain_param);
956 957 958
	if (ret)
		goto out_delete;

959
	if (perf_session__register_idle_thread(top->session) < 0)
N
Namhyung Kim 已提交
960 961
		goto out_delete;

962 963
	if (top->nr_threads_synthesize > 1)
		perf_set_multithreaded();
964

965
	machine__synthesize_threads(&top->session->machines.host, &opts->target,
966 967
				    top->evlist->threads, false,
				    opts->proc_map_timeout,
968
				    top->nr_threads_synthesize);
969

970 971
	if (top->nr_threads_synthesize > 1)
		perf_set_singlethreaded();
972

973
	if (perf_hpp_list.socket) {
974 975 976 977 978
		ret = perf_env__read_cpu_topology_map(&perf_env);
		if (ret < 0)
			goto out_err_cpu_topo;
	}

979 980 981 982
	ret = perf_top__start_counters(top);
	if (ret)
		goto out_delete;

983 984
	ret = perf_evlist__apply_drv_configs(evlist, &pos, &err_term);
	if (ret) {
985
		pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
986 987 988 989 990
			err_term->val.drv_cfg, perf_evsel__name(pos), errno,
			str_error_r(errno, msg, sizeof(msg)));
		goto out_delete;
	}

991
	top->session->evlist = top->evlist;
992
	perf_session__set_id_hdr_size(top->session);
993

994 995 996 997 998 999 1000 1001
	/*
	 * When perf is starting the traced process, all the events (apart from
	 * group members) have enable_on_exec=1 set, so don't spoil it by
	 * prematurely enabling them.
	 *
	 * XXX 'top' still doesn't start workloads like record, trace, but should,
	 * so leave the check here.
	 */
1002
        if (!target__none(&opts->target))
1003 1004
                perf_evlist__enable(top->evlist);

1005
	/* Wait for a minimal set of events before starting the snapshot */
1006
	perf_evlist__poll(top->evlist, 100);
1007

1008
	perf_top__mmap_read(top);
1009

1010
	ret = -1;
1011
	if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
1012
							    display_thread), top)) {
1013
		ui__error("Could not create display thread.\n");
1014
		goto out_delete;
1015 1016
	}

1017
	if (top->realtime_prio) {
1018 1019
		struct sched_param param;

1020
		param.sched_priority = top->realtime_prio;
1021
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
1022
			ui__error("Could not set realtime priority.\n");
1023
			goto out_join;
1024 1025 1026
		}
	}

1027
	while (!done) {
1028
		u64 hits = top->samples;
1029

1030
		perf_top__mmap_read(top);
1031

1032
		if (hits == top->samples)
1033
			ret = perf_evlist__poll(top->evlist, 100);
1034 1035 1036 1037 1038

		if (resize) {
			perf_top__resize(top);
			resize = 0;
		}
1039 1040
	}

1041
	ret = 0;
1042 1043
out_join:
	pthread_join(thread, NULL);
1044
out_delete:
1045 1046
	perf_session__delete(top->session);
	top->session = NULL;
1047

1048
	return ret;
1049 1050 1051

out_err_cpu_topo: {
	char errbuf[BUFSIZ];
1052
	const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
1053 1054 1055 1056

	ui__error("Could not read the CPU topology map: %s\n", err);
	goto out_delete;
}
1057 1058 1059
}

static int
J
Jiri Olsa 已提交
1060
callchain_opt(const struct option *opt, const char *arg, int unset)
1061 1062
{
	symbol_conf.use_callchain = true;
J
Jiri Olsa 已提交
1063 1064
	return record_callchain_opt(opt, arg, unset);
}
1065

J
Jiri Olsa 已提交
1066 1067 1068
static int
parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
1069
	struct callchain_param *callchain = opt->value;
1070

1071 1072
	callchain->enabled = !unset;
	callchain->record_mode = CALLCHAIN_FP;
1073 1074 1075 1076 1077 1078

	/*
	 * --no-call-graph
	 */
	if (unset) {
		symbol_conf.use_callchain = false;
1079
		callchain->record_mode = CALLCHAIN_NONE;
1080 1081 1082 1083
		return 0;
	}

	return parse_callchain_top_opt(arg);
1084
}
1085

1086
static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
1087 1088
{
	if (!strcmp(var, "top.call-graph"))
1089
		var = "call-graph.record-mode"; /* fall-through */
1090 1091 1092 1093
	if (!strcmp(var, "top.children")) {
		symbol_conf.cumulate_callchain = perf_config_bool(var, value);
		return 0;
	}
1094

1095
	return 0;
1096 1097
}

1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
static int
parse_percent_limit(const struct option *opt, const char *arg,
		    int unset __maybe_unused)
{
	struct perf_top *top = opt->value;

	top->min_percent = strtof(arg, NULL);
	return 0;
}

1108 1109
const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
	"\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
1110

1111
int cmd_top(int argc, const char **argv)
1112
{
1113
	char errbuf[BUFSIZ];
1114 1115 1116
	struct perf_top top = {
		.count_filter	     = 5,
		.delay_secs	     = 2,
1117 1118 1119 1120 1121
		.record_opts = {
			.mmap_pages	= UINT_MAX,
			.user_freq	= UINT_MAX,
			.user_interval	= ULLONG_MAX,
			.freq		= 4000, /* 4 KHz */
1122
			.target		= {
1123 1124
				.uses_mmap   = true,
			},
1125
			.proc_map_timeout    = 500,
N
Namhyung Kim 已提交
1126
		},
1127
		.max_stack	     = sysctl_perf_event_max_stack,
1128
		.sym_pcnt_filter     = 5,
1129
		.nr_threads_synthesize = UINT_MAX,
1130
	};
1131
	struct record_opts *opts = &top.record_opts;
1132
	struct target *target = &opts->target;
1133
	const struct option options[] = {
1134
	OPT_CALLBACK('e', "event", &top.evlist, "event",
1135
		     "event selector. use 'perf list' to list available events",
1136
		     parse_events_option),
1137 1138
	OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
	OPT_STRING('p', "pid", &target->pid, "pid",
1139
		    "profile events on existing process id"),
1140
	OPT_STRING('t', "tid", &target->tid, "tid",
1141
		    "profile events on existing thread id"),
1142
	OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
1143
			    "system-wide collection from all CPUs"),
1144
	OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
1145
		    "list of cpus to monitor"),
1146 1147
	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
		   "file", "vmlinux pathname"),
1148 1149
	OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
		    "don't load vmlinux even if found"),
1150
	OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
1151
		    "hide kernel symbols"),
1152 1153 1154
	OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
1155
	OPT_INTEGER('r', "realtime", &top.realtime_prio,
1156
		    "collect data with this RT SCHED_FIFO priority"),
1157
	OPT_INTEGER('d', "delay", &top.delay_secs,
1158
		    "number of seconds to delay between refreshes"),
1159
	OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
1160
			    "dump the symbol table used for profiling"),
1161
	OPT_INTEGER('f', "count-filter", &top.count_filter,
1162
		    "only display functions with more events than this"),
1163
	OPT_BOOLEAN(0, "group", &opts->group,
1164
			    "put the counters into a counter group"),
1165 1166
	OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
		    "child tasks do not inherit counters"),
1167
	OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
1168
		    "symbol to annotate"),
1169 1170
	OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
	OPT_UINTEGER('F', "freq", &opts->user_freq, "profile at this frequency"),
1171
	OPT_INTEGER('E', "entries", &top.print_entries,
1172
		    "display this many functions"),
1173
	OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
1174
		    "hide user symbols"),
1175 1176
	OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
	OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
1177
	OPT_INCR('v', "verbose", &verbose,
1178
		    "be more verbose (show counter open errors, etc)"),
1179
	OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1180 1181
		   "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
		   " Please refer the man page for the complete list."),
1182 1183
	OPT_STRING(0, "fields", &field_order, "key[,keys...]",
		   "output field(s): overhead, period, sample plus all of sort keys"),
1184 1185
	OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
		    "Show a column with the number of samples"),
1186
	OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
1187
			   NULL, "enables call-graph recording and display",
J
Jiri Olsa 已提交
1188
			   &callchain_opt),
1189
	OPT_CALLBACK(0, "call-graph", &callchain_param,
1190
		     "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
1191
		     top_callchain_help, &parse_callchain_opt),
N
Namhyung Kim 已提交
1192 1193
	OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
		    "Accumulate callchains of children and show total overhead as well"),
1194 1195
	OPT_INTEGER(0, "max-stack", &top.max_stack,
		    "Set the maximum stack depth when parsing the callchain. "
1196
		    "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
1197 1198 1199
	OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
		   "ignore callees of these functions in call graphs",
		   report_parse_ignore_callees_opt),
1200 1201 1202 1203 1204 1205 1206 1207
	OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
		    "Show a column with the sum of periods"),
	OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
		   "only consider symbols in these dsos"),
	OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
		   "only consider symbols in these comms"),
	OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
		   "only consider these symbols"),
1208 1209 1210 1211
	OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
		    "Interleave source code with assembly code (default)"),
	OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
		    "Display raw encoding of assembly instructions (default)"),
1212 1213
	OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
		    "Enable kernel symbol demangling"),
1214 1215
	OPT_STRING(0, "objdump", &objdump_path, "path",
		    "objdump binary to use for disassembly and annotations"),
1216 1217
	OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
		   "Specify disassembler style (e.g. -M intel for intel syntax)"),
1218
	OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
1219 1220
	OPT_CALLBACK(0, "percent-limit", &top, "percent",
		     "Don't show entries under that percent", parse_percent_limit),
N
Namhyung Kim 已提交
1221 1222
	OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
		     "How to display percentage of filtered entries", parse_filter_percentage),
1223 1224 1225
	OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
		   "width[,width...]",
		   "don't try to adjust column width, use these fixed values"),
1226 1227
	OPT_UINTEGER(0, "proc-map-timeout", &opts->proc_map_timeout,
			"per thread proc mmap processing timeout in ms"),
1228 1229 1230 1231 1232 1233
	OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),
	OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
		     "branch filter mask", "branch stack filter modes",
		     parse_branch_stack),
1234 1235
	OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
		    "Show raw trace event output (do not use print fmt or plugins)"),
N
Namhyung Kim 已提交
1236 1237
	OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
		    "Show entries in a hierarchy"),
1238
	OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
1239 1240
	OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
			"number of thread to run event synthesize"),
1241
	OPT_END()
1242
	};
1243 1244 1245 1246
	const char * const top_usage[] = {
		"perf top [<options>]",
		NULL
	};
1247 1248 1249 1250
	int status = hists__init();

	if (status < 0)
		return status;
1251

1252
	top.evlist = perf_evlist__new();
1253
	if (top.evlist == NULL)
1254 1255
		return -ENOMEM;

1256 1257 1258
	status = perf_config(perf_top_config, &top);
	if (status)
		return status;
1259

1260 1261 1262 1263
	argc = parse_options(argc, argv, options, top_usage, 0);
	if (argc)
		usage_with_options(top_usage, options);

1264 1265 1266 1267 1268 1269
	if (!top.evlist->nr_entries &&
	    perf_evlist__add_default(top.evlist) < 0) {
		pr_err("Not enough memory for event selector list\n");
		goto out_delete_evlist;
	}

N
Namhyung Kim 已提交
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
	if (symbol_conf.report_hierarchy) {
		/* disable incompatible options */
		symbol_conf.event_group = false;
		symbol_conf.cumulate_callchain = false;

		if (field_order) {
			pr_err("Error: --hierarchy and --fields options cannot be used together\n");
			parse_options_usage(top_usage, options, "fields", 0);
			parse_options_usage(NULL, options, "hierarchy", 0);
			goto out_delete_evlist;
		}
	}

1283
	sort__mode = SORT_MODE__TOP;
1284
	/* display thread wants entries to be collapsed in a different tree */
1285
	perf_hpp_list.need_collapse = 1;
1286

1287 1288 1289 1290 1291 1292 1293
	if (top.use_stdio)
		use_browser = 0;
	else if (top.use_tui)
		use_browser = 1;

	setup_browser(false);

1294
	if (setup_sorting(top.evlist) < 0) {
1295 1296 1297 1298 1299
		if (sort_order)
			parse_options_usage(top_usage, options, "s", 1);
		if (field_order)
			parse_options_usage(sort_order ? NULL : top_usage,
					    options, "fields", 0);
1300 1301
		goto out_delete_evlist;
	}
1302

1303
	status = target__validate(target);
1304
	if (status) {
1305
		target__strerror(target, status, errbuf, BUFSIZ);
1306
		ui__warning("%s\n", errbuf);
1307 1308
	}

1309
	status = target__parse_uid(target);
1310 1311
	if (status) {
		int saved_errno = errno;
1312

1313
		target__strerror(target, status, errbuf, BUFSIZ);
1314
		ui__error("%s\n", errbuf);
1315 1316

		status = -saved_errno;
1317
		goto out_delete_evlist;
1318
	}
1319

1320
	if (target__none(target))
1321
		target->system_wide = true;
1322

1323 1324
	if (perf_evlist__create_maps(top.evlist, target) < 0) {
		ui__error("Couldn't create thread/CPU maps: %s\n",
1325
			  errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
1326
		goto out_delete_evlist;
1327
	}
1328

1329 1330
	symbol_conf.nr_events = top.evlist->nr_entries;

1331 1332
	if (top.delay_secs < 1)
		top.delay_secs = 1;
1333

1334
	if (record_opts__config(opts)) {
1335
		status = -EINVAL;
1336
		goto out_delete_evlist;
1337 1338
	}

1339
	top.sym_evsel = perf_evlist__first(top.evlist);
1340

1341
	if (!callchain_param.enabled) {
N
Namhyung Kim 已提交
1342 1343 1344 1345
		symbol_conf.cumulate_callchain = false;
		perf_hpp__cancel_cumulate();
	}

1346 1347 1348
	if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
		callchain_param.order = ORDER_CALLER;

1349 1350 1351
	status = symbol__annotation_init();
	if (status < 0)
		goto out_delete_evlist;
1352 1353

	symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
1354
	if (symbol__init(NULL) < 0)
1355 1356
		return -1;

1357
	sort__setup_elide(stdout);
1358

1359
	get_term_dimensions(&top.winsize);
1360
	if (top.print_entries == 0) {
1361
		perf_top__update_print_entries(&top);
1362
		signal(SIGWINCH, winch_sig);
1363 1364
	}

1365
	status = __cmd_top(&top);
1366

1367
out_delete_evlist:
1368
	perf_evlist__delete(top.evlist);
1369 1370

	return status;
1371
}