builtin-top.c 36.2 KB
Newer Older
1
/*
2 3 4 5 6 7
 * builtin-top.c
 *
 * Builtin top command: Display a continuously updated profile of
 * any workload, CPU or specific PID.
 *
 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
8
 *		 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
9 10 11 12 13 14 15 16 17 18
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
 *
 * Released under the GPL v2. (and only v2, not any later version)
19
 */
20
#include "builtin.h"
21

22
#include "perf.h"
23

24
#include "util/annotate.h"
25
#include "util/config.h"
26
#include "util/color.h"
27
#include "util/drv_configs.h"
28
#include "util/evlist.h"
29
#include "util/evsel.h"
30
#include "util/event.h"
31
#include "util/machine.h"
32 33
#include "util/session.h"
#include "util/symbol.h"
34
#include "util/thread.h"
35
#include "util/thread_map.h"
36
#include "util/top.h"
37
#include <linux/rbtree.h>
38
#include <subcmd/parse-options.h>
39
#include "util/parse-events.h"
40
#include "util/cpumap.h"
41
#include "util/xyarray.h"
42
#include "util/sort.h"
43
#include "util/term.h"
44
#include "util/intlist.h"
45
#include "util/parse-branch-options.h"
46
#include "arch/common.h"
47

48 49
#include "util/debug.h"

50
#include <assert.h>
51
#include <elf.h>
52
#include <fcntl.h>
53

54
#include <stdio.h>
55 56
#include <termios.h>
#include <unistd.h>
57
#include <inttypes.h>
58

59 60 61
#include <errno.h>
#include <time.h>
#include <sched.h>
62
#include <signal.h>
63 64 65

#include <sys/syscall.h>
#include <sys/ioctl.h>
66
#include <poll.h>
67 68 69
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
70
#include <sys/utsname.h>
71 72
#include <sys/mman.h>

73
#include <linux/stringify.h>
74
#include <linux/time64.h>
75 76
#include <linux/types.h>

77 78
#include "sane_ctype.h"

79
static volatile int done;
80
static volatile int resize;
81

N
Namhyung Kim 已提交
82 83
#define HEADER_LINE_NR  5

84
static void perf_top__update_print_entries(struct perf_top *top)
85
{
N
Namhyung Kim 已提交
86
	top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
87 88
}

89
static void perf_top__sig_winch(int sig __maybe_unused,
90
				siginfo_t *info __maybe_unused, void *arg __maybe_unused)
91
{
92 93
	resize = 1;
}
94

95 96
static void perf_top__resize(struct perf_top *top)
{
97 98
	get_term_dimensions(&top->winsize);
	perf_top__update_print_entries(top);
99 100
}

101
static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
102 103
{
	struct symbol *sym;
104
	struct annotation *notes;
105
	struct map *map;
106
	int err = -1;
107

108
	if (!he || !he->ms.sym)
109 110
		return -1;

111 112
	sym = he->ms.sym;
	map = he->ms.map;
113 114 115 116

	/*
	 * We can't annotate with just /proc/kallsyms
	 */
117 118
	if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
	    !dso__is_kcore(map->dso)) {
119 120 121
		pr_err("Can't annotate %s: No vmlinux file was found in the "
		       "path\n", sym->name);
		sleep(1);
122
		return -1;
123 124
	}

125 126 127
	notes = symbol__annotation(sym);
	if (notes->src != NULL) {
		pthread_mutex_lock(&notes->lock);
128 129 130
		goto out_assign;
	}

131
	pthread_mutex_lock(&notes->lock);
132

133
	if (symbol__alloc_hist(sym) < 0) {
134
		pthread_mutex_unlock(&notes->lock);
135 136
		pr_err("Not enough memory for annotating '%s' symbol!\n",
		       sym->name);
137
		sleep(1);
138
		return err;
139
	}
140

141
	err = symbol__disassemble(sym, map, NULL, 0, NULL, NULL);
142
	if (err == 0) {
143
out_assign:
144
		top->sym_filter_entry = he;
145 146 147 148
	} else {
		char msg[BUFSIZ];
		symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
		pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
149
	}
150

151
	pthread_mutex_unlock(&notes->lock);
152
	return err;
153 154
}

155
static void __zero_source_counters(struct hist_entry *he)
156
{
157
	struct symbol *sym = he->ms.sym;
158
	symbol__annotate_zero_histograms(sym);
159 160
}

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
{
	struct utsname uts;
	int err = uname(&uts);

	ui__warning("Out of bounds address found:\n\n"
		    "Addr:   %" PRIx64 "\n"
		    "DSO:    %s %c\n"
		    "Map:    %" PRIx64 "-%" PRIx64 "\n"
		    "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
		    "Arch:   %s\n"
		    "Kernel: %s\n"
		    "Tools:  %s\n\n"
		    "Not all samples will be on the annotation output.\n\n"
		    "Please report to linux-kernel@vger.kernel.org\n",
		    ip, map->dso->long_name, dso__symtab_origin(map->dso),
		    map->start, map->end, sym->start, sym->end,
		    sym->binding == STB_GLOBAL ? 'g' :
		    sym->binding == STB_LOCAL  ? 'l' : 'w', sym->name,
		    err ? "[unknown]" : uts.machine,
		    err ? "[unknown]" : uts.release, perf_version_string);
	if (use_browser <= 0)
		sleep(5);
184

185 186 187
	map->erange_warned = true;
}

188 189
static void perf_top__record_precise_ip(struct perf_top *top,
					struct hist_entry *he,
190
					struct perf_sample *sample,
191
					int counter, u64 ip)
192
{
193
	struct annotation *notes;
194
	struct symbol *sym = he->ms.sym;
195
	int err = 0;
196

197 198 199
	if (sym == NULL || (use_browser == 0 &&
			    (top->sym_filter_entry == NULL ||
			     top->sym_filter_entry->ms.sym != sym)))
200 201
		return;

202 203 204
	notes = symbol__annotation(sym);

	if (pthread_mutex_trylock(&notes->lock))
205 206
		return;

207
	err = hist_entry__inc_addr_samples(he, sample, counter, ip);
208

209
	pthread_mutex_unlock(&notes->lock);
210

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
	if (unlikely(err)) {
		/*
		 * This function is now called with he->hists->lock held.
		 * Release it before going to sleep.
		 */
		pthread_mutex_unlock(&he->hists->lock);

		if (err == -ERANGE && !he->ms.map->erange_warned)
			ui__warn_map_erange(he->ms.map, sym, ip);
		else if (err == -ENOMEM) {
			pr_err("Not enough memory for annotating '%s' symbol!\n",
			       sym->name);
			sleep(1);
		}

		pthread_mutex_lock(&he->hists->lock);
227
	}
228 229
}

230
static void perf_top__show_details(struct perf_top *top)
231
{
232
	struct hist_entry *he = top->sym_filter_entry;
233
	struct annotation *notes;
234
	struct symbol *symbol;
235
	int more;
236

237
	if (!he)
238 239
		return;

240
	symbol = he->ms.sym;
241 242 243 244 245 246
	notes = symbol__annotation(symbol);

	pthread_mutex_lock(&notes->lock);

	if (notes->src == NULL)
		goto out_unlock;
247

248
	printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
249
	printf("  Events  Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
250

251
	more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel,
252
				       0, top->sym_pcnt_filter, top->print_entries, 4);
253 254 255 256 257 258 259

	if (top->evlist->enabled) {
		if (top->zero)
			symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
		else
			symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
	}
260
	if (more != 0)
261
		printf("%d lines not displayed, maybe increase display entries [e]\n", more);
262 263
out_unlock:
	pthread_mutex_unlock(&notes->lock);
264
}
265

266
static void perf_top__print_sym_table(struct perf_top *top)
267
{
268 269
	char bf[160];
	int printed = 0;
270
	const int win_width = top->winsize.ws_col - 1;
271 272
	struct perf_evsel *evsel = top->sym_evsel;
	struct hists *hists = evsel__hists(evsel);
273

274
	puts(CONSOLE_CLEAR);
275

276
	perf_top__header_snprintf(top, bf, sizeof(bf));
277
	printf("%s\n", bf);
278

279
	perf_top__reset_sample_counters(top);
280

281
	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
282

283 284 285 286
	if (hists->stats.nr_lost_warned !=
	    hists->stats.nr_events[PERF_RECORD_LOST]) {
		hists->stats.nr_lost_warned =
			      hists->stats.nr_events[PERF_RECORD_LOST];
287 288
		color_fprintf(stdout, PERF_COLOR_RED,
			      "WARNING: LOST %d chunks, Check IO/CPU overload",
289
			      hists->stats.nr_lost_warned);
290
		++printed;
291 292
	}

293 294
	if (top->sym_filter_entry) {
		perf_top__show_details(top);
295 296 297
		return;
	}

298 299 300 301 302 303 304
	if (top->evlist->enabled) {
		if (top->zero) {
			hists__delete_entries(hists);
		} else {
			hists__decay_entries(hists, top->hide_user_symbols,
					     top->hide_kernel_symbols);
		}
N
Namhyung Kim 已提交
305 306
	}

307
	hists__collapse_resort(hists, NULL);
308
	perf_evsel__output_resort(evsel, NULL);
N
Namhyung Kim 已提交
309

310
	hists__output_recalc_col_len(hists, top->print_entries - printed);
311
	putchar('\n');
312
	hists__fprintf(hists, false, top->print_entries - printed, win_width,
313
		       top->min_percent, stdout, symbol_conf.use_callchain);
314 315
}

316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
static void prompt_integer(int *target, const char *msg)
{
	char *buf = malloc(0), *p;
	size_t dummy = 0;
	int tmp;

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		return;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	p = buf;
	while(*p) {
		if (!isdigit(*p))
			goto out_free;
		p++;
	}
	tmp = strtoul(buf, NULL, 10);
	*target = tmp;
out_free:
	free(buf);
}

static void prompt_percent(int *target, const char *msg)
{
	int tmp = 0;

	prompt_integer(&tmp, msg);
	if (tmp >= 0 && tmp <= 100)
		*target = tmp;
}

351
static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
352 353
{
	char *buf = malloc(0), *p;
354
	struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
355
	struct hists *hists = evsel__hists(top->sym_evsel);
356
	struct rb_node *next;
357 358 359 360 361
	size_t dummy = 0;

	/* zero counters of active symbol */
	if (syme) {
		__zero_source_counters(syme);
362
		top->sym_filter_entry = NULL;
363 364 365 366 367 368 369 370 371 372
	}

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		goto out_free;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

373
	next = rb_first(&hists->entries);
374 375 376 377
	while (next) {
		n = rb_entry(next, struct hist_entry, rb_node);
		if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
			found = n;
378 379
			break;
		}
380
		next = rb_next(&n->rb_node);
381 382 383
	}

	if (!found) {
384
		fprintf(stderr, "Sorry, %s is not active.\n", buf);
385 386
		sleep(1);
	} else
387
		perf_top__parse_source(top, found);
388 389 390 391 392

out_free:
	free(buf);
}

393
static void perf_top__print_mapped_keys(struct perf_top *top)
394
{
395 396
	char *name = NULL;

397 398
	if (top->sym_filter_entry) {
		struct symbol *sym = top->sym_filter_entry->ms.sym;
399 400 401 402
		name = sym->name;
	}

	fprintf(stdout, "\nMapped keys:\n");
403 404
	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", top->delay_secs);
	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", top->print_entries);
405

406
	if (top->evlist->nr_entries > 1)
407
		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", perf_evsel__name(top->sym_evsel));
408

409
	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", top->count_filter);
410

411
	fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter);
412 413
	fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
	fprintf(stdout, "\t[S]     stop annotation.\n");
414

415
	fprintf(stdout,
416
		"\t[K]     hide kernel_symbols symbols.     \t(%s)\n",
417
		top->hide_kernel_symbols ? "yes" : "no");
418 419
	fprintf(stdout,
		"\t[U]     hide user symbols.               \t(%s)\n",
420 421
		top->hide_user_symbols ? "yes" : "no");
	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", top->zero ? 1 : 0);
422 423 424
	fprintf(stdout, "\t[qQ]    quit.\n");
}

425
static int perf_top__key_mapped(struct perf_top *top, int c)
426 427 428 429 430 431 432 433
{
	switch (c) {
		case 'd':
		case 'e':
		case 'f':
		case 'z':
		case 'q':
		case 'Q':
434 435
		case 'K':
		case 'U':
436 437 438
		case 'F':
		case 's':
		case 'S':
439 440
			return 1;
		case 'E':
441
			return top->evlist->nr_entries > 1 ? 1 : 0;
442 443
		default:
			break;
444 445 446
	}

	return 0;
447 448
}

449
static bool perf_top__handle_keypress(struct perf_top *top, int c)
450
{
451 452
	bool ret = true;

453
	if (!perf_top__key_mapped(top, c)) {
454
		struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
455
		struct termios save;
456

457
		perf_top__print_mapped_keys(top);
458 459 460
		fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
		fflush(stdout);

461
		set_term_quiet_input(&save);
462 463 464 465 466

		poll(&stdin_poll, 1, -1);
		c = getc(stdin);

		tcsetattr(0, TCSAFLUSH, &save);
467
		if (!perf_top__key_mapped(top, c))
468
			return ret;
469 470
	}

471 472
	switch (c) {
		case 'd':
473 474 475
			prompt_integer(&top->delay_secs, "Enter display delay");
			if (top->delay_secs < 1)
				top->delay_secs = 1;
476 477
			break;
		case 'e':
478 479 480 481 482 483
			prompt_integer(&top->print_entries, "Enter display entries (lines)");
			if (top->print_entries == 0) {
				struct sigaction act = {
					.sa_sigaction = perf_top__sig_winch,
					.sa_flags     = SA_SIGINFO,
				};
484
				perf_top__resize(top);
485
				sigaction(SIGWINCH, &act, NULL);
486
			} else {
487
				signal(SIGWINCH, SIG_DFL);
488
			}
489 490
			break;
		case 'E':
491
			if (top->evlist->nr_entries > 1) {
492 493 494
				/* Select 0 as the default event: */
				int counter = 0;

495
				fprintf(stderr, "\nAvailable events:");
496

497
				evlist__for_each_entry(top->evlist, top->sym_evsel)
498
					fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
499

500
				prompt_integer(&counter, "Enter details event counter");
501

502
				if (counter >= top->evlist->nr_entries) {
503
					top->sym_evsel = perf_evlist__first(top->evlist);
504
					fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
505
					sleep(1);
506
					break;
507
				}
508
				evlist__for_each_entry(top->evlist, top->sym_evsel)
509
					if (top->sym_evsel->idx == counter)
510
						break;
511
			} else
512
				top->sym_evsel = perf_evlist__first(top->evlist);
513 514
			break;
		case 'f':
515
			prompt_integer(&top->count_filter, "Enter display event count filter");
516 517
			break;
		case 'F':
518 519
			prompt_percent(&top->sym_pcnt_filter,
				       "Enter details display event filter (percent)");
520
			break;
521
		case 'K':
522
			top->hide_kernel_symbols = !top->hide_kernel_symbols;
523
			break;
524 525 526
		case 'q':
		case 'Q':
			printf("exiting.\n");
527 528
			if (top->dump_symtab)
				perf_session__fprintf_dsos(top->session, stderr);
529 530
			ret = false;
			break;
531
		case 's':
532
			perf_top__prompt_symbol(top, "Enter details symbol");
533 534
			break;
		case 'S':
535
			if (!top->sym_filter_entry)
536 537
				break;
			else {
538
				struct hist_entry *syme = top->sym_filter_entry;
539

540
				top->sym_filter_entry = NULL;
541 542 543
				__zero_source_counters(syme);
			}
			break;
544
		case 'U':
545
			top->hide_user_symbols = !top->hide_user_symbols;
546
			break;
547
		case 'z':
548
			top->zero = !top->zero;
549
			break;
550 551
		default:
			break;
552
	}
553 554

	return ret;
555 556
}

557 558 559
static void perf_top__sort_new_samples(void *arg)
{
	struct perf_top *t = arg;
560
	struct perf_evsel *evsel = t->sym_evsel;
561 562
	struct hists *hists;

563 564 565 566 567
	perf_top__reset_sample_counters(t);

	if (t->evlist->selected != NULL)
		t->sym_evsel = t->evlist->selected;

568
	hists = evsel__hists(evsel);
569

570 571 572 573 574 575 576
	if (t->evlist->enabled) {
		if (t->zero) {
			hists__delete_entries(hists);
		} else {
			hists__decay_entries(hists, t->hide_user_symbols,
					     t->hide_kernel_symbols);
		}
N
Namhyung Kim 已提交
577 578
	}

579
	hists__collapse_resort(hists, NULL);
580
	perf_evsel__output_resort(evsel, NULL);
581 582
}

583
static void *display_thread_tui(void *arg)
584
{
585
	struct perf_evsel *pos;
586
	struct perf_top *top = arg;
587
	const char *help = "For a higher level overview, try: perf top --sort comm,dso";
588 589 590 591 592
	struct hist_browser_timer hbt = {
		.timer		= perf_top__sort_new_samples,
		.arg		= top,
		.refresh	= top->delay_secs,
	};
593

594 595 596 597 598 599 600
	/* In order to read symbols from other namespaces perf to  needs to call
	 * setns(2).  This isn't permitted if the struct_fs has multiple users.
	 * unshare(2) the fs so that we may continue to setns into namespaces
	 * that we're observing.
	 */
	unshare(CLONE_FS);

601
	perf_top__sort_new_samples(top);
602 603 604 605 606 607

	/*
	 * Initialize the uid_filter_str, in the future the TUI will allow
	 * Zooming in/out UIDs. For now juse use whatever the user passed
	 * via --uid.
	 */
608
	evlist__for_each_entry(top->evlist, pos) {
609 610 611
		struct hists *hists = evsel__hists(pos);
		hists->uid_filter_str = top->record_opts.target.uid_str;
	}
612

613 614 615
	perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
				      top->min_percent,
				      &top->session->header.env);
616

617
	done = 1;
618 619 620
	return NULL;
}

621 622 623 624 625 626 627
static void display_sig(int sig __maybe_unused)
{
	done = 1;
}

static void display_setup_sig(void)
{
628 629
	signal(SIGSEGV, sighandler_dump_stack);
	signal(SIGFPE, sighandler_dump_stack);
630 631 632 633 634
	signal(SIGINT,  display_sig);
	signal(SIGQUIT, display_sig);
	signal(SIGTERM, display_sig);
}

635
static void *display_thread(void *arg)
636
{
637
	struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
638
	struct termios save;
639
	struct perf_top *top = arg;
640 641
	int delay_msecs, c;

642 643 644 645 646 647 648
	/* In order to read symbols from other namespaces perf to  needs to call
	 * setns(2).  This isn't permitted if the struct_fs has multiple users.
	 * unshare(2) the fs so that we may continue to setns into namespaces
	 * that we're observing.
	 */
	unshare(CLONE_FS);

649
	display_setup_sig();
650
	pthread__unblock_sigwinch();
651
repeat:
652
	delay_msecs = top->delay_secs * MSEC_PER_SEC;
653
	set_term_quiet_input(&save);
654 655
	/* trash return*/
	getc(stdin);
656

657
	while (!done) {
658
		perf_top__print_sym_table(top);
659 660 661 662 663 664 665 666 667 668
		/*
		 * Either timeout expired or we got an EINTR due to SIGWINCH,
		 * refresh screen in both cases.
		 */
		switch (poll(&stdin_poll, 1, delay_msecs)) {
		case 0:
			continue;
		case -1:
			if (errno == EINTR)
				continue;
669
			__fallthrough;
670
		default:
671 672 673 674 675 676
			c = getc(stdin);
			tcsetattr(0, TCSAFLUSH, &save);

			if (perf_top__handle_keypress(top, c))
				goto repeat;
			done = 1;
677 678
		}
	}
679

680
	tcsetattr(0, TCSAFLUSH, &save);
681 682 683
	return NULL;
}

684 685 686 687 688 689 690 691
static int hist_iter__top_callback(struct hist_entry_iter *iter,
				   struct addr_location *al, bool single,
				   void *arg)
{
	struct perf_top *top = arg;
	struct hist_entry *he = iter->he;
	struct perf_evsel *evsel = iter->evsel;

692
	if (perf_hpp_list.sym && single)
693
		perf_top__record_precise_ip(top, he, iter->sample, evsel->idx, al->addr);
694

695 696
	hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
		     !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
697 698 699
	return 0;
}

700 701
static void perf_event__process_sample(struct perf_tool *tool,
				       const union perf_event *event,
702
				       struct perf_evsel *evsel,
703
				       struct perf_sample *sample,
704
				       struct machine *machine)
705
{
706
	struct perf_top *top = container_of(tool, struct perf_top, tool);
707
	struct addr_location al;
708
	int err;
709

710
	if (!machine && perf_guest) {
711 712 713
		static struct intlist *seen;

		if (!seen)
714
			seen = intlist__new(NULL);
715

716
		if (!intlist__has_entry(seen, sample->pid)) {
717
			pr_err("Can't find guest [%d]'s kernel information\n",
718 719
				sample->pid);
			intlist__add(seen, sample->pid);
720
		}
721 722 723
		return;
	}

724
	if (!machine) {
725
		pr_err("%u unprocessable samples recorded.\r",
726
		       top->session->evlist->stats.nr_unprocessable_samples++);
727 728 729
		return;
	}

730
	if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
731
		top->exact_samples++;
732

733
	if (machine__resolve(machine, &al, sample) < 0)
734
		return;
735

736
	if (!machine->kptr_restrict_warned &&
737 738
	    symbol_conf.kptr_restrict &&
	    al.cpumode == PERF_RECORD_MISC_KERNEL) {
739 740
		if (!perf_evlist__exclude_kernel(top->session->evlist)) {
			ui__warning(
741 742 743
"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
"Check /proc/sys/kernel/kptr_restrict.\n\n"
"Kernel%s samples will not be resolved.\n",
744
			  al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
745
			  " modules" : "");
746 747 748
			if (use_browser <= 0)
				sleep(5);
		}
749
		machine->kptr_restrict_warned = true;
750 751
	}

752
	if (al.sym == NULL) {
753
		const char *msg = "Kernel samples will not be resolved.\n";
754 755 756 757 758 759 760 761 762 763 764
		/*
		 * As we do lazy loading of symtabs we only will know if the
		 * specified vmlinux file is invalid when we actually have a
		 * hit in kernel space and then try to load it. So if we get
		 * here and there are _no_ symbols in the DSO backing the
		 * kernel map, bail out.
		 *
		 * We may never get here, for instance, if we use -K/
		 * --hide-kernel-symbols, even if the user specifies an
		 * invalid --vmlinux ;-)
		 */
765
		if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
766
		    al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
767
		    RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
768
			if (symbol_conf.vmlinux_name) {
769 770 771 772
				char serr[256];
				dso__strerror_load(al.map->dso, serr, sizeof(serr));
				ui__warning("The %s file can't be used: %s\n%s",
					    symbol_conf.vmlinux_name, serr, msg);
773 774 775 776 777 778 779
			} else {
				ui__warning("A vmlinux file was not found.\n%s",
					    msg);
			}

			if (use_browser <= 0)
				sleep(5);
780
			top->vmlinux_warned = true;
781
		}
782 783
	}

784
	if (al.sym == NULL || !al.sym->idle) {
785
		struct hists *hists = evsel__hists(evsel);
786
		struct hist_entry_iter iter = {
787 788 789
			.evsel		= evsel,
			.sample 	= sample,
			.add_entry_cb 	= hist_iter__top_callback,
790
		};
791

792 793 794 795
		if (symbol_conf.cumulate_callchain)
			iter.ops = &hist_iter_cumulative;
		else
			iter.ops = &hist_iter_normal;
796

797
		pthread_mutex_lock(&hists->lock);
798

799
		err = hist_entry_iter__add(&iter, &al, top->max_stack, top);
800 801
		if (err < 0)
			pr_err("Problem incrementing symbol period, skipping event\n");
802

803
		pthread_mutex_unlock(&hists->lock);
804
	}
805

806
	addr_location__put(&al);
807 808
}

809
static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
810
{
811
	struct perf_sample sample;
812
	struct perf_evsel *evsel;
813
	struct perf_session *session = top->session;
814
	union perf_event *event;
815
	struct machine *machine;
816
	int ret;
817

818
	while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
819
		ret = perf_evlist__parse_sample(top->evlist, event, &sample);
820 821
		if (ret) {
			pr_err("Can't parse sample, err = %d\n", ret);
822
			goto next_event;
823
		}
824

825
		evsel = perf_evlist__id2evsel(session->evlist, sample.id);
826 827
		assert(evsel != NULL);

828
		if (event->header.type == PERF_RECORD_SAMPLE)
829
			++top->samples;
830

831
		switch (sample.cpumode) {
832
		case PERF_RECORD_MISC_USER:
833 834
			++top->us_samples;
			if (top->hide_user_symbols)
835
				goto next_event;
836
			machine = &session->machines.host;
837 838
			break;
		case PERF_RECORD_MISC_KERNEL:
839 840
			++top->kernel_samples;
			if (top->hide_kernel_symbols)
841
				goto next_event;
842
			machine = &session->machines.host;
843 844
			break;
		case PERF_RECORD_MISC_GUEST_KERNEL:
845
			++top->guest_kernel_samples;
846 847
			machine = perf_session__find_machine(session,
							     sample.pid);
848 849
			break;
		case PERF_RECORD_MISC_GUEST_USER:
850
			++top->guest_us_samples;
851 852 853 854
			/*
			 * TODO: we don't process guest user from host side
			 * except simple counting.
			 */
855
			goto next_event;
856 857 858 859 860
		default:
			if (event->header.type == PERF_RECORD_SAMPLE)
				goto next_event;
			machine = &session->machines.host;
			break;
861 862 863
		}


864 865 866 867
		if (event->header.type == PERF_RECORD_SAMPLE) {
			perf_event__process_sample(&top->tool, event, evsel,
						   &sample, machine);
		} else if (event->header.type < PERF_RECORD_MAX) {
868
			hists__inc_nr_events(evsel__hists(evsel), event->header.type);
869
			machine__process_event(machine, event, &sample);
870
		} else
871
			++session->evlist->stats.nr_unknown_events;
872 873
next_event:
		perf_evlist__mmap_consume(top->evlist, idx);
874 875 876
	}
}

877
static void perf_top__mmap_read(struct perf_top *top)
878
{
879 880
	int i;

881 882
	for (i = 0; i < top->evlist->nr_mmaps; i++)
		perf_top__mmap_read_idx(top, i);
883 884
}

885
static int perf_top__start_counters(struct perf_top *top)
886
{
887
	char msg[BUFSIZ];
888
	struct perf_evsel *counter;
889
	struct perf_evlist *evlist = top->evlist;
890
	struct record_opts *opts = &top->record_opts;
891

892
	perf_evlist__config(evlist, opts, &callchain_param);
893

894
	evlist__for_each_entry(evlist, counter) {
895
try_again:
896
		if (perf_evsel__open(counter, top->evlist->cpus,
897
				     top->evlist->threads) < 0) {
898
			if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
899
				if (verbose > 0)
900
					ui__warning("%s\n", msg);
901 902
				goto try_again;
			}
903

904 905 906
			perf_evsel__open_strerror(counter, &opts->target,
						  errno, msg, sizeof(msg));
			ui__error("%s\n", msg);
907
			goto out_err;
908
		}
909
	}
910

911
	if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
912
		ui__error("Failed to mmap with %d (%s)\n",
913
			    errno, str_error_r(errno, msg, sizeof(msg)));
914 915 916
		goto out_err;
	}

917
	return 0;
918 919

out_err:
920
	return -1;
921 922
}

923
static int callchain_param__setup_sample_type(struct callchain_param *callchain)
924
{
925
	if (!perf_hpp_list.sym) {
926
		if (callchain->enabled) {
927
			ui__error("Selected -g but \"sym\" not present in --sort/-s.");
928 929
			return -EINVAL;
		}
930 931
	} else if (callchain->mode != CHAIN_NONE) {
		if (callchain_register_param(callchain) < 0) {
932
			ui__error("Can't register callchain params.\n");
933 934 935 936 937 938 939
			return -EINVAL;
		}
	}

	return 0;
}

940
static int __cmd_top(struct perf_top *top)
941
{
942 943 944 945
	char msg[512];
	struct perf_evsel *pos;
	struct perf_evsel_config_term *err_term;
	struct perf_evlist *evlist = top->evlist;
946
	struct record_opts *opts = &top->record_opts;
947
	pthread_t thread;
948
	int ret;
949

950
	top->session = perf_session__new(NULL, false, NULL);
951
	if (top->session == NULL)
952
		return -1;
953

954
	if (!objdump_path) {
955
		ret = perf_env__lookup_objdump(&top->session->header.env);
956 957 958 959
		if (ret)
			goto out_delete;
	}

960
	ret = callchain_param__setup_sample_type(&callchain_param);
961 962 963
	if (ret)
		goto out_delete;

964
	if (perf_session__register_idle_thread(top->session) < 0)
N
Namhyung Kim 已提交
965 966
		goto out_delete;

967 968
	if (top->nr_threads_synthesize > 1)
		perf_set_multithreaded();
969

970
	machine__synthesize_threads(&top->session->machines.host, &opts->target,
971 972
				    top->evlist->threads, false,
				    opts->proc_map_timeout,
973
				    top->nr_threads_synthesize);
974

975 976
	if (top->nr_threads_synthesize > 1)
		perf_set_singlethreaded();
977

978
	if (perf_hpp_list.socket) {
979 980 981 982 983
		ret = perf_env__read_cpu_topology_map(&perf_env);
		if (ret < 0)
			goto out_err_cpu_topo;
	}

984 985 986 987
	ret = perf_top__start_counters(top);
	if (ret)
		goto out_delete;

988 989
	ret = perf_evlist__apply_drv_configs(evlist, &pos, &err_term);
	if (ret) {
990
		pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
991 992 993 994 995
			err_term->val.drv_cfg, perf_evsel__name(pos), errno,
			str_error_r(errno, msg, sizeof(msg)));
		goto out_delete;
	}

996
	top->session->evlist = top->evlist;
997
	perf_session__set_id_hdr_size(top->session);
998

999 1000 1001 1002 1003 1004 1005 1006
	/*
	 * When perf is starting the traced process, all the events (apart from
	 * group members) have enable_on_exec=1 set, so don't spoil it by
	 * prematurely enabling them.
	 *
	 * XXX 'top' still doesn't start workloads like record, trace, but should,
	 * so leave the check here.
	 */
1007
        if (!target__none(&opts->target))
1008 1009
                perf_evlist__enable(top->evlist);

1010
	/* Wait for a minimal set of events before starting the snapshot */
1011
	perf_evlist__poll(top->evlist, 100);
1012

1013
	perf_top__mmap_read(top);
1014

1015
	ret = -1;
1016
	if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
1017
							    display_thread), top)) {
1018
		ui__error("Could not create display thread.\n");
1019
		goto out_delete;
1020 1021
	}

1022
	if (top->realtime_prio) {
1023 1024
		struct sched_param param;

1025
		param.sched_priority = top->realtime_prio;
1026
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
1027
			ui__error("Could not set realtime priority.\n");
1028
			goto out_join;
1029 1030 1031
		}
	}

1032
	while (!done) {
1033
		u64 hits = top->samples;
1034

1035
		perf_top__mmap_read(top);
1036

1037
		if (hits == top->samples)
1038
			ret = perf_evlist__poll(top->evlist, 100);
1039 1040 1041 1042 1043

		if (resize) {
			perf_top__resize(top);
			resize = 0;
		}
1044 1045
	}

1046
	ret = 0;
1047 1048
out_join:
	pthread_join(thread, NULL);
1049
out_delete:
1050 1051
	perf_session__delete(top->session);
	top->session = NULL;
1052

1053
	return ret;
1054 1055 1056

out_err_cpu_topo: {
	char errbuf[BUFSIZ];
1057
	const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
1058 1059 1060 1061

	ui__error("Could not read the CPU topology map: %s\n", err);
	goto out_delete;
}
1062 1063 1064
}

static int
J
Jiri Olsa 已提交
1065
callchain_opt(const struct option *opt, const char *arg, int unset)
1066 1067
{
	symbol_conf.use_callchain = true;
J
Jiri Olsa 已提交
1068 1069
	return record_callchain_opt(opt, arg, unset);
}
1070

J
Jiri Olsa 已提交
1071 1072 1073
static int
parse_callchain_opt(const struct option *opt, const char *arg, int unset)
{
1074
	struct callchain_param *callchain = opt->value;
1075

1076 1077
	callchain->enabled = !unset;
	callchain->record_mode = CALLCHAIN_FP;
1078 1079 1080 1081 1082 1083

	/*
	 * --no-call-graph
	 */
	if (unset) {
		symbol_conf.use_callchain = false;
1084
		callchain->record_mode = CALLCHAIN_NONE;
1085 1086 1087 1088
		return 0;
	}

	return parse_callchain_top_opt(arg);
1089
}
1090

1091
static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
1092 1093
{
	if (!strcmp(var, "top.call-graph"))
1094
		var = "call-graph.record-mode"; /* fall-through */
1095 1096 1097 1098
	if (!strcmp(var, "top.children")) {
		symbol_conf.cumulate_callchain = perf_config_bool(var, value);
		return 0;
	}
1099

1100
	return 0;
1101 1102
}

1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
static int
parse_percent_limit(const struct option *opt, const char *arg,
		    int unset __maybe_unused)
{
	struct perf_top *top = opt->value;

	top->min_percent = strtof(arg, NULL);
	return 0;
}

1113 1114
const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
	"\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
1115

1116
int cmd_top(int argc, const char **argv)
1117
{
1118
	char errbuf[BUFSIZ];
1119 1120 1121
	struct perf_top top = {
		.count_filter	     = 5,
		.delay_secs	     = 2,
1122 1123 1124 1125 1126
		.record_opts = {
			.mmap_pages	= UINT_MAX,
			.user_freq	= UINT_MAX,
			.user_interval	= ULLONG_MAX,
			.freq		= 4000, /* 4 KHz */
1127
			.target		= {
1128 1129
				.uses_mmap   = true,
			},
1130
			.proc_map_timeout    = 500,
N
Namhyung Kim 已提交
1131
		},
1132
		.max_stack	     = sysctl_perf_event_max_stack,
1133
		.sym_pcnt_filter     = 5,
1134
		.nr_threads_synthesize = UINT_MAX,
1135
	};
1136
	struct record_opts *opts = &top.record_opts;
1137
	struct target *target = &opts->target;
1138
	const struct option options[] = {
1139
	OPT_CALLBACK('e', "event", &top.evlist, "event",
1140
		     "event selector. use 'perf list' to list available events",
1141
		     parse_events_option),
1142 1143
	OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
	OPT_STRING('p', "pid", &target->pid, "pid",
1144
		    "profile events on existing process id"),
1145
	OPT_STRING('t', "tid", &target->tid, "tid",
1146
		    "profile events on existing thread id"),
1147
	OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
1148
			    "system-wide collection from all CPUs"),
1149
	OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
1150
		    "list of cpus to monitor"),
1151 1152
	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
		   "file", "vmlinux pathname"),
1153 1154
	OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
		    "don't load vmlinux even if found"),
1155
	OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
1156
		    "hide kernel symbols"),
1157 1158 1159
	OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
		     "number of mmap data pages",
		     perf_evlist__parse_mmap_pages),
1160
	OPT_INTEGER('r', "realtime", &top.realtime_prio,
1161
		    "collect data with this RT SCHED_FIFO priority"),
1162
	OPT_INTEGER('d', "delay", &top.delay_secs,
1163
		    "number of seconds to delay between refreshes"),
1164
	OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
1165
			    "dump the symbol table used for profiling"),
1166
	OPT_INTEGER('f', "count-filter", &top.count_filter,
1167
		    "only display functions with more events than this"),
1168
	OPT_BOOLEAN(0, "group", &opts->group,
1169
			    "put the counters into a counter group"),
1170 1171
	OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
		    "child tasks do not inherit counters"),
1172
	OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
1173
		    "symbol to annotate"),
1174 1175
	OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
	OPT_UINTEGER('F', "freq", &opts->user_freq, "profile at this frequency"),
1176
	OPT_INTEGER('E', "entries", &top.print_entries,
1177
		    "display this many functions"),
1178
	OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
1179
		    "hide user symbols"),
1180 1181
	OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
	OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
1182
	OPT_INCR('v', "verbose", &verbose,
1183
		    "be more verbose (show counter open errors, etc)"),
1184
	OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1185 1186
		   "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
		   " Please refer the man page for the complete list."),
1187 1188
	OPT_STRING(0, "fields", &field_order, "key[,keys...]",
		   "output field(s): overhead, period, sample plus all of sort keys"),
1189 1190
	OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
		    "Show a column with the number of samples"),
1191
	OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
1192
			   NULL, "enables call-graph recording and display",
J
Jiri Olsa 已提交
1193
			   &callchain_opt),
1194
	OPT_CALLBACK(0, "call-graph", &callchain_param,
1195
		     "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
1196
		     top_callchain_help, &parse_callchain_opt),
N
Namhyung Kim 已提交
1197 1198
	OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
		    "Accumulate callchains of children and show total overhead as well"),
1199 1200
	OPT_INTEGER(0, "max-stack", &top.max_stack,
		    "Set the maximum stack depth when parsing the callchain. "
1201
		    "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
1202 1203 1204
	OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
		   "ignore callees of these functions in call graphs",
		   report_parse_ignore_callees_opt),
1205 1206 1207 1208 1209 1210 1211 1212
	OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
		    "Show a column with the sum of periods"),
	OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
		   "only consider symbols in these dsos"),
	OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
		   "only consider symbols in these comms"),
	OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
		   "only consider these symbols"),
1213 1214 1215 1216
	OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
		    "Interleave source code with assembly code (default)"),
	OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
		    "Display raw encoding of assembly instructions (default)"),
1217 1218
	OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
		    "Enable kernel symbol demangling"),
1219 1220
	OPT_STRING(0, "objdump", &objdump_path, "path",
		    "objdump binary to use for disassembly and annotations"),
1221 1222
	OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
		   "Specify disassembler style (e.g. -M intel for intel syntax)"),
1223
	OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
1224 1225
	OPT_CALLBACK(0, "percent-limit", &top, "percent",
		     "Don't show entries under that percent", parse_percent_limit),
N
Namhyung Kim 已提交
1226 1227
	OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
		     "How to display percentage of filtered entries", parse_filter_percentage),
1228 1229 1230
	OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
		   "width[,width...]",
		   "don't try to adjust column width, use these fixed values"),
1231 1232
	OPT_UINTEGER(0, "proc-map-timeout", &opts->proc_map_timeout,
			"per thread proc mmap processing timeout in ms"),
1233 1234 1235 1236 1237 1238
	OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
		     "branch any", "sample any taken branches",
		     parse_branch_stack),
	OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
		     "branch filter mask", "branch stack filter modes",
		     parse_branch_stack),
1239 1240
	OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
		    "Show raw trace event output (do not use print fmt or plugins)"),
N
Namhyung Kim 已提交
1241 1242
	OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
		    "Show entries in a hierarchy"),
1243
	OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
1244 1245
	OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
			"number of thread to run event synthesize"),
1246
	OPT_END()
1247
	};
1248 1249 1250 1251
	const char * const top_usage[] = {
		"perf top [<options>]",
		NULL
	};
1252 1253 1254 1255
	int status = hists__init();

	if (status < 0)
		return status;
1256

1257
	top.evlist = perf_evlist__new();
1258
	if (top.evlist == NULL)
1259 1260
		return -ENOMEM;

1261 1262 1263
	status = perf_config(perf_top_config, &top);
	if (status)
		return status;
1264

1265 1266 1267 1268
	argc = parse_options(argc, argv, options, top_usage, 0);
	if (argc)
		usage_with_options(top_usage, options);

1269 1270 1271 1272 1273 1274
	if (!top.evlist->nr_entries &&
	    perf_evlist__add_default(top.evlist) < 0) {
		pr_err("Not enough memory for event selector list\n");
		goto out_delete_evlist;
	}

N
Namhyung Kim 已提交
1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
	if (symbol_conf.report_hierarchy) {
		/* disable incompatible options */
		symbol_conf.event_group = false;
		symbol_conf.cumulate_callchain = false;

		if (field_order) {
			pr_err("Error: --hierarchy and --fields options cannot be used together\n");
			parse_options_usage(top_usage, options, "fields", 0);
			parse_options_usage(NULL, options, "hierarchy", 0);
			goto out_delete_evlist;
		}
	}

1288
	sort__mode = SORT_MODE__TOP;
1289
	/* display thread wants entries to be collapsed in a different tree */
1290
	perf_hpp_list.need_collapse = 1;
1291

1292 1293 1294 1295 1296 1297 1298
	if (top.use_stdio)
		use_browser = 0;
	else if (top.use_tui)
		use_browser = 1;

	setup_browser(false);

1299
	if (setup_sorting(top.evlist) < 0) {
1300 1301 1302 1303 1304
		if (sort_order)
			parse_options_usage(top_usage, options, "s", 1);
		if (field_order)
			parse_options_usage(sort_order ? NULL : top_usage,
					    options, "fields", 0);
1305 1306
		goto out_delete_evlist;
	}
1307

1308
	status = target__validate(target);
1309
	if (status) {
1310
		target__strerror(target, status, errbuf, BUFSIZ);
1311
		ui__warning("%s\n", errbuf);
1312 1313
	}

1314
	status = target__parse_uid(target);
1315 1316
	if (status) {
		int saved_errno = errno;
1317

1318
		target__strerror(target, status, errbuf, BUFSIZ);
1319
		ui__error("%s\n", errbuf);
1320 1321

		status = -saved_errno;
1322
		goto out_delete_evlist;
1323
	}
1324

1325
	if (target__none(target))
1326
		target->system_wide = true;
1327

1328 1329
	if (perf_evlist__create_maps(top.evlist, target) < 0) {
		ui__error("Couldn't create thread/CPU maps: %s\n",
1330
			  errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
1331
		goto out_delete_evlist;
1332
	}
1333

1334 1335
	symbol_conf.nr_events = top.evlist->nr_entries;

1336 1337
	if (top.delay_secs < 1)
		top.delay_secs = 1;
1338

1339
	if (record_opts__config(opts)) {
1340
		status = -EINVAL;
1341
		goto out_delete_evlist;
1342 1343
	}

1344
	top.sym_evsel = perf_evlist__first(top.evlist);
1345

1346
	if (!callchain_param.enabled) {
N
Namhyung Kim 已提交
1347 1348 1349 1350
		symbol_conf.cumulate_callchain = false;
		perf_hpp__cancel_cumulate();
	}

1351 1352 1353
	if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
		callchain_param.order = ORDER_CALLER;

1354 1355 1356
	status = symbol__annotation_init();
	if (status < 0)
		goto out_delete_evlist;
1357 1358

	symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
1359
	if (symbol__init(NULL) < 0)
1360 1361
		return -1;

1362
	sort__setup_elide(stdout);
1363

1364
	get_term_dimensions(&top.winsize);
1365
	if (top.print_entries == 0) {
1366 1367 1368 1369 1370 1371
		struct sigaction act = {
			.sa_sigaction = perf_top__sig_winch,
			.sa_flags     = SA_SIGINFO,
		};
		perf_top__update_print_entries(&top);
		sigaction(SIGWINCH, &act, NULL);
1372 1373
	}

1374
	status = __cmd_top(&top);
1375

1376
out_delete_evlist:
1377
	perf_evlist__delete(top.evlist);
1378 1379

	return status;
1380
}