builtin-top.c 26.8 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 * builtin-top.c
 *
 * Builtin top command: Display a continuously updated profile of
 * any workload, CPU or specific PID.
 *
 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
 *
 * Released under the GPL v2. (and only v2, not any later version)
18
 */
19
#include "builtin.h"
20

21
#include "perf.h"
22

23
#include "util/annotate.h"
24
#include "util/cache.h"
25
#include "util/color.h"
26
#include "util/evlist.h"
27
#include "util/evsel.h"
28 29
#include "util/session.h"
#include "util/symbol.h"
30
#include "util/thread.h"
31
#include "util/thread_map.h"
32
#include "util/top.h"
33
#include "util/util.h"
34
#include <linux/rbtree.h>
35 36
#include "util/parse-options.h"
#include "util/parse-events.h"
37
#include "util/cpumap.h"
38
#include "util/xyarray.h"
39

40 41
#include "util/debug.h"

42 43
#include <assert.h>
#include <fcntl.h>
44

45
#include <stdio.h>
46 47
#include <termios.h>
#include <unistd.h>
48
#include <inttypes.h>
49

50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#include <errno.h>
#include <time.h>
#include <sched.h>

#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <sys/poll.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include <sys/mman.h>

#include <linux/unistd.h>
#include <linux/types.h>

65
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
66

67 68 69 70 71 72 73 74
static struct perf_top top = {
	.count_filter		= 5,
	.delay_secs		= 2,
	.display_weighted	= -1,
	.target_pid		= -1,
	.target_tid		= -1,
	.active_symbols		= LIST_HEAD_INIT(top.active_symbols),
	.active_symbols_lock	= PTHREAD_MUTEX_INITIALIZER,
75
	.active_symbols_cond	= PTHREAD_COND_INITIALIZER,
76 77
	.freq			= 1000, /* 1 KHz */
};
78

79
static bool			system_wide			=  false;
80

81 82
static bool			use_tui, use_stdio;

83
static int			default_interval		=      0;
84

85
static bool			inherit				=  false;
86
static int			realtime_prio			=      0;
87
static bool			group				=  false;
88
static unsigned int		page_size;
89
static unsigned int		mmap_pages			=    128;
90

91
static bool			dump_symtab                     =  false;
92

93
static struct winsize		winsize;
94

95
static const char		*sym_filter			=   NULL;
96
struct sym_entry		*sym_filter_entry_sched		=   NULL;
97
static int			sym_pcnt_filter			=      5;
98

99 100 101 102
/*
 * Source functions
 */

103
void get_term_dimensions(struct winsize *ws)
104
{
105 106 107 108 109 110 111 112 113 114
	char *s = getenv("LINES");

	if (s != NULL) {
		ws->ws_row = atoi(s);
		s = getenv("COLUMNS");
		if (s != NULL) {
			ws->ws_col = atoi(s);
			if (ws->ws_row && ws->ws_col)
				return;
		}
115
	}
116 117 118 119
#ifdef TIOCGWINSZ
	if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
	    ws->ws_row && ws->ws_col)
		return;
120
#endif
121 122
	ws->ws_row = 25;
	ws->ws_col = 80;
123 124
}

125
static void update_print_entries(struct winsize *ws)
126
{
127
	top.print_entries = ws->ws_row;
128

129 130
	if (top.print_entries > 9)
		top.print_entries -= 9;
131 132 133 134
}

static void sig_winch_handler(int sig __used)
{
135 136
	get_term_dimensions(&winsize);
	update_print_entries(&winsize);
137 138
}

139
static int parse_source(struct sym_entry *syme)
140 141
{
	struct symbol *sym;
142
	struct annotation *notes;
143
	struct map *map;
144
	int err = -1;
145 146

	if (!syme)
147 148 149 150 151 152 153 154
		return -1;

	sym = sym_entry__symbol(syme);
	map = syme->map;

	/*
	 * We can't annotate with just /proc/kallsyms
	 */
155 156 157 158
	if (map->dso->origin == DSO__ORIG_KERNEL) {
		pr_err("Can't annotate %s: No vmlinux file was found in the "
		       "path\n", sym->name);
		sleep(1);
159
		return -1;
160 161
	}

162 163 164
	notes = symbol__annotation(sym);
	if (notes->src != NULL) {
		pthread_mutex_lock(&notes->lock);
165 166 167
		goto out_assign;
	}

168
	pthread_mutex_lock(&notes->lock);
169

170
	if (symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) {
171
		pthread_mutex_unlock(&notes->lock);
172 173
		pr_err("Not enough memory for annotating '%s' symbol!\n",
		       sym->name);
174
		sleep(1);
175
		return err;
176
	}
177

178
	err = symbol__annotate(sym, syme->map, 0);
179
	if (err == 0) {
180
out_assign:
181
		top.sym_filter_entry = syme;
182
	}
183

184
	pthread_mutex_unlock(&notes->lock);
185
	return err;
186 187 188 189
}

static void __zero_source_counters(struct sym_entry *syme)
{
190 191
	struct symbol *sym = sym_entry__symbol(syme);
	symbol__annotate_zero_histograms(sym);
192 193 194 195
}

static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
{
196 197 198
	struct annotation *notes;
	struct symbol *sym;

199
	if (syme != top.sym_filter_entry)
200 201
		return;

202 203 204 205
	sym = sym_entry__symbol(syme);
	notes = symbol__annotation(sym);

	if (pthread_mutex_trylock(&notes->lock))
206 207
		return;

208
	ip = syme->map->map_ip(syme->map, ip);
209
	symbol__inc_addr_samples(sym, syme->map, counter, ip);
210

211
	pthread_mutex_unlock(&notes->lock);
212 213 214 215
}

static void show_details(struct sym_entry *syme)
{
216
	struct annotation *notes;
217
	struct symbol *symbol;
218
	int more;
219 220 221 222

	if (!syme)
		return;

223
	symbol = sym_entry__symbol(syme);
224 225 226 227 228 229
	notes = symbol__annotation(symbol);

	pthread_mutex_lock(&notes->lock);

	if (notes->src == NULL)
		goto out_unlock;
230

231
	printf("Showing %s for %s\n", event_name(top.sym_evsel), symbol->name);
232 233
	printf("  Events  Pcnt (>=%d%%)\n", sym_pcnt_filter);

234
	more = symbol__annotate_printf(symbol, syme->map, top.sym_evsel->idx,
235
				       0, sym_pcnt_filter, top.print_entries, 4);
236 237 238
	if (top.zero)
		symbol__annotate_zero_histogram(symbol, top.sym_evsel->idx);
	else
239
		symbol__annotate_decay_histogram(symbol, top.sym_evsel->idx);
240
	if (more != 0)
241
		printf("%d lines not displayed, maybe increase display entries [e]\n", more);
242 243
out_unlock:
	pthread_mutex_unlock(&notes->lock);
244
}
245 246 247

static const char		CONSOLE_CLEAR[] = "";

248
static void __list_insert_active_sym(struct sym_entry *syme)
249
{
250
	list_add(&syme->node, &top.active_symbols);
251
}
252

253
static void print_sym_table(struct perf_session *session)
254
{
255 256
	char bf[160];
	int printed = 0;
257
	struct rb_node *nd;
258 259
	struct sym_entry *syme;
	struct rb_root tmp = RB_ROOT;
260
	const int win_width = winsize.ws_col - 1;
261 262
	int sym_width, dso_width, dso_short_width;
	float sum_ksamples = perf_top__decay_samples(&top, &tmp);
263

264
	puts(CONSOLE_CLEAR);
265

266 267
	perf_top__header_snprintf(&top, bf, sizeof(bf));
	printf("%s\n", bf);
268

269
	perf_top__reset_sample_counters(&top);
270

271
	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
272

273 274 275 276 277 278
	if (session->hists.stats.total_lost != 0) {
		color_fprintf(stdout, PERF_COLOR_RED, "WARNING:");
		printf(" LOST %" PRIu64 " events, Check IO/CPU overload\n",
		       session->hists.stats.total_lost);
	}

279 280
	if (top.sym_filter_entry) {
		show_details(top.sym_filter_entry);
281 282 283
		return;
	}

284 285
	perf_top__find_widths(&top, &tmp, &dso_width, &dso_short_width,
			      &sym_width);
286

287 288 289 290 291
	if (sym_width + dso_width > winsize.ws_col - 29) {
		dso_width = dso_short_width;
		if (sym_width + dso_width > winsize.ws_col - 29)
			sym_width = winsize.ws_col - dso_width - 29;
	}
292
	putchar('\n');
293
	if (top.evlist->nr_entries == 1)
294
		printf("             samples  pcnt");
295
	else
296
		printf("   weight    samples  pcnt");
297

298 299
	if (verbose)
		printf("         RIP       ");
300
	printf(" %-*.*s DSO\n", sym_width, sym_width, "function");
301
	printf("   %s    _______ _____",
302
	       top.evlist->nr_entries == 1 ? "      " : "______");
303
	if (verbose)
304
		printf(" ________________");
305
	printf(" %-*.*s", sym_width, sym_width, graph_line);
306
	printf(" %-*.*s", dso_width, dso_width, graph_line);
307
	puts("\n");
308

309
	for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
310
		struct symbol *sym;
311
		double pcnt;
312

313
		syme = rb_entry(nd, struct sym_entry, rb_node);
314
		sym = sym_entry__symbol(syme);
315 316
		if (++printed > top.print_entries ||
		    (int)syme->snap_count < top.count_filter)
317
			continue;
318

319 320
		pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
					 sum_ksamples));
321

322
		if (top.evlist->nr_entries == 1 || !top.display_weighted)
323
			printf("%20.2f ", syme->weight);
324
		else
325
			printf("%9.1f %10ld ", syme->weight, syme->snap_count);
326

327
		percent_color_fprintf(stdout, "%4.1f%%", pcnt);
328
		if (verbose)
329
			printf(" %016" PRIx64, sym->start);
330
		printf(" %-*.*s", sym_width, sym_width, sym->name);
331 332 333 334
		printf(" %-*.*s\n", dso_width, dso_width,
		       dso_width >= syme->map->dso->long_name_len ?
					syme->map->dso->long_name :
					syme->map->dso->short_name);
335 336 337
	}
}

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
static void prompt_integer(int *target, const char *msg)
{
	char *buf = malloc(0), *p;
	size_t dummy = 0;
	int tmp;

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		return;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	p = buf;
	while(*p) {
		if (!isdigit(*p))
			goto out_free;
		p++;
	}
	tmp = strtoul(buf, NULL, 10);
	*target = tmp;
out_free:
	free(buf);
}

static void prompt_percent(int *target, const char *msg)
{
	int tmp = 0;

	prompt_integer(&tmp, msg);
	if (tmp >= 0 && tmp <= 100)
		*target = tmp;
}

static void prompt_symbol(struct sym_entry **target, const char *msg)
{
	char *buf = malloc(0), *p;
	struct sym_entry *syme = *target, *n, *found = NULL;
	size_t dummy = 0;

	/* zero counters of active symbol */
	if (syme) {
		__zero_source_counters(syme);
		*target = NULL;
	}

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		goto out_free;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

393 394 395
	pthread_mutex_lock(&top.active_symbols_lock);
	syme = list_entry(top.active_symbols.next, struct sym_entry, node);
	pthread_mutex_unlock(&top.active_symbols_lock);
396

397
	list_for_each_entry_safe_from(syme, n, &top.active_symbols, node) {
398
		struct symbol *sym = sym_entry__symbol(syme);
399 400 401 402 403 404 405 406

		if (!strcmp(buf, sym->name)) {
			found = syme;
			break;
		}
	}

	if (!found) {
407
		fprintf(stderr, "Sorry, %s is not active.\n", buf);
408 409 410 411 412 413 414 415 416
		sleep(1);
		return;
	} else
		parse_source(found);

out_free:
	free(buf);
}

417
static void print_mapped_keys(void)
418
{
419 420
	char *name = NULL;

421 422
	if (top.sym_filter_entry) {
		struct symbol *sym = sym_entry__symbol(top.sym_filter_entry);
423 424 425 426
		name = sym->name;
	}

	fprintf(stdout, "\nMapped keys:\n");
427 428
	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", top.delay_secs);
	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", top.print_entries);
429

430 431
	if (top.evlist->nr_entries > 1)
		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", event_name(top.sym_evsel));
432

433
	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", top.count_filter);
434

435 436 437
	fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
	fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
	fprintf(stdout, "\t[S]     stop annotation.\n");
438

439 440
	if (top.evlist->nr_entries > 1)
		fprintf(stdout, "\t[w]     toggle display weighted/count[E]r. \t(%d)\n", top.display_weighted ? 1 : 0);
441

442
	fprintf(stdout,
443
		"\t[K]     hide kernel_symbols symbols.     \t(%s)\n",
444
		top.hide_kernel_symbols ? "yes" : "no");
445 446
	fprintf(stdout,
		"\t[U]     hide user symbols.               \t(%s)\n",
447 448
		top.hide_user_symbols ? "yes" : "no");
	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", top.zero ? 1 : 0);
449 450 451 452 453 454 455 456 457 458 459 460
	fprintf(stdout, "\t[qQ]    quit.\n");
}

static int key_mapped(int c)
{
	switch (c) {
		case 'd':
		case 'e':
		case 'f':
		case 'z':
		case 'q':
		case 'Q':
461 462
		case 'K':
		case 'U':
463 464 465
		case 'F':
		case 's':
		case 'S':
466 467 468
			return 1;
		case 'E':
		case 'w':
469
			return top.evlist->nr_entries > 1 ? 1 : 0;
470 471
		default:
			break;
472 473 474
	}

	return 0;
475 476
}

477
static void handle_keypress(struct perf_session *session, int c)
478
{
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
	if (!key_mapped(c)) {
		struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
		struct termios tc, save;

		print_mapped_keys();
		fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
		fflush(stdout);

		tcgetattr(0, &save);
		tc = save;
		tc.c_lflag &= ~(ICANON | ECHO);
		tc.c_cc[VMIN] = 0;
		tc.c_cc[VTIME] = 0;
		tcsetattr(0, TCSANOW, &tc);

		poll(&stdin_poll, 1, -1);
		c = getc(stdin);

		tcsetattr(0, TCSAFLUSH, &save);
		if (!key_mapped(c))
			return;
	}

502 503
	switch (c) {
		case 'd':
504 505 506
			prompt_integer(&top.delay_secs, "Enter display delay");
			if (top.delay_secs < 1)
				top.delay_secs = 1;
507 508
			break;
		case 'e':
509 510
			prompt_integer(&top.print_entries, "Enter display entries (lines)");
			if (top.print_entries == 0) {
511
				sig_winch_handler(SIGWINCH);
512 513 514
				signal(SIGWINCH, sig_winch_handler);
			} else
				signal(SIGWINCH, SIG_DFL);
515 516
			break;
		case 'E':
517
			if (top.evlist->nr_entries > 1) {
518
				fprintf(stderr, "\nAvailable events:");
519

520 521
				list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
					fprintf(stderr, "\n\t%d %s", top.sym_evsel->idx, event_name(top.sym_evsel));
522

523
				prompt_integer(&top.sym_counter, "Enter details event counter");
524

525 526 527 528
				if (top.sym_counter >= top.evlist->nr_entries) {
					top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
					top.sym_counter = 0;
					fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top.sym_evsel));
529
					sleep(1);
530
					break;
531
				}
532 533
				list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
					if (top.sym_evsel->idx == top.sym_counter)
534
						break;
535
			} else top.sym_counter = 0;
536 537
			break;
		case 'f':
538
			prompt_integer(&top.count_filter, "Enter display event count filter");
539 540 541 542
			break;
		case 'F':
			prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
			break;
543
		case 'K':
544
			top.hide_kernel_symbols = !top.hide_kernel_symbols;
545
			break;
546 547 548
		case 'q':
		case 'Q':
			printf("exiting.\n");
549
			if (dump_symtab)
550
				perf_session__fprintf_dsos(session, stderr);
551 552
			exit(0);
		case 's':
553
			prompt_symbol(&top.sym_filter_entry, "Enter details symbol");
554 555
			break;
		case 'S':
556
			if (!top.sym_filter_entry)
557 558
				break;
			else {
559
				struct sym_entry *syme = top.sym_filter_entry;
560

561
				top.sym_filter_entry = NULL;
562 563 564
				__zero_source_counters(syme);
			}
			break;
565
		case 'U':
566
			top.hide_user_symbols = !top.hide_user_symbols;
567
			break;
568
		case 'w':
569
			top.display_weighted = ~top.display_weighted;
570
			break;
571
		case 'z':
572
			top.zero = !top.zero;
573
			break;
574 575
		default:
			break;
576 577 578
	}
}

579 580
static void *display_thread_tui(void *arg __used)
{
581 582 583 584 585 586 587 588 589 590 591
	int err = 0;
	pthread_mutex_lock(&top.active_symbols_lock);
	while (list_empty(&top.active_symbols)) {
		err = pthread_cond_wait(&top.active_symbols_cond,
					&top.active_symbols_lock);
		if (err)
			break;
	}
	pthread_mutex_unlock(&top.active_symbols_lock);
	if (!err)
		perf_top__tui_browser(&top);
592 593 594 595 596
	exit_browser(0);
	exit(0);
	return NULL;
}

597
static void *display_thread(void *arg __used)
598
{
599
	struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
600 601
	struct termios tc, save;
	int delay_msecs, c;
602
	struct perf_session *session = (struct perf_session *) arg;
603 604 605 606 607 608

	tcgetattr(0, &save);
	tc = save;
	tc.c_lflag &= ~(ICANON | ECHO);
	tc.c_cc[VMIN] = 0;
	tc.c_cc[VTIME] = 0;
609

610
repeat:
611
	delay_msecs = top.delay_secs * 1000;
612 613 614
	tcsetattr(0, TCSANOW, &tc);
	/* trash return*/
	getc(stdin);
615

616
	do {
617
		print_sym_table(session);
618 619
	} while (!poll(&stdin_poll, 1, delay_msecs) == 1);

620 621 622
	c = getc(stdin);
	tcsetattr(0, TCSAFLUSH, &save);

623
	handle_keypress(session, c);
624
	goto repeat;
625 626 627 628

	return NULL;
}

629
/* Tag samples to be skipped. */
630
static const char *skip_symbols[] = {
631
	"default_idle",
632
	"native_safe_halt",
633 634 635 636
	"cpu_idle",
	"enter_idle",
	"exit_idle",
	"mwait_idle",
637
	"mwait_idle_with_hints",
638
	"poll_idle",
639 640
	"ppc64_runlatch_off",
	"pseries_dedicated_idle_sleep",
641 642 643
	NULL
};

644
static int symbol_filter(struct map *map, struct symbol *sym)
645
{
646 647
	struct sym_entry *syme;
	const char *name = sym->name;
648
	int i;
649

650 651 652 653 654 655 656
	/*
	 * ppc64 uses function descriptors and appends a '.' to the
	 * start of every instruction address. Remove it.
	 */
	if (name[0] == '.')
		name++;

657 658 659 660 661 662 663
	if (!strcmp(name, "_text") ||
	    !strcmp(name, "_etext") ||
	    !strcmp(name, "_sinittext") ||
	    !strncmp("init_module", name, 11) ||
	    !strncmp("cleanup_module", name, 14) ||
	    strstr(name, "_text_start") ||
	    strstr(name, "_text_end"))
664 665
		return 1;

666
	syme = symbol__priv(sym);
667
	syme->map = map;
668
	symbol__annotate_init(map, sym);
669

670
	if (!top.sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
671 672 673 674
		/* schedule initial sym_filter_entry setup */
		sym_filter_entry_sched = syme;
		sym_filter = NULL;
	}
675

676 677 678 679 680 681
	for (i = 0; skip_symbols[i]; i++) {
		if (!strcmp(skip_symbols[i], name)) {
			syme->skip = 1;
			break;
		}
	}
682 683 684 685

	return 0;
}

686 687 688
static void perf_event__process_sample(const union perf_event *event,
				       struct perf_sample *sample,
				       struct perf_session *session)
689
{
690
	u64 ip = event->ip.ip;
691
	struct sym_entry *syme;
692
	struct addr_location al;
693
	struct machine *machine;
694
	u8 origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
695

696
	++top.samples;
697

698
	switch (origin) {
699
	case PERF_RECORD_MISC_USER:
700 701
		++top.us_samples;
		if (top.hide_user_symbols)
702
			return;
703
		machine = perf_session__find_host_machine(session);
704
		break;
705
	case PERF_RECORD_MISC_KERNEL:
706 707
		++top.kernel_samples;
		if (top.hide_kernel_symbols)
708
			return;
709
		machine = perf_session__find_host_machine(session);
710 711
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
712
		++top.guest_kernel_samples;
713
		machine = perf_session__find_machine(session, event->ip.pid);
714
		break;
715
	case PERF_RECORD_MISC_GUEST_USER:
716
		++top.guest_us_samples;
717 718 719 720 721
		/*
		 * TODO: we don't process guest user from host side
		 * except simple counting.
		 */
		return;
722 723 724 725
	default:
		return;
	}

726
	if (!machine && perf_guest) {
727
		pr_err("Can't find guest [%d]'s kernel information\n",
728
			event->ip.pid);
729 730 731
		return;
	}

732
	if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
733
		top.exact_samples++;
734

735 736
	if (perf_event__preprocess_sample(event, session, &al, sample,
					  symbol_filter) < 0 ||
737
	    al.filtered)
738
		return;
739

740 741 742 743 744 745 746 747 748 749 750 751
	if (al.sym == NULL) {
		/*
		 * As we do lazy loading of symtabs we only will know if the
		 * specified vmlinux file is invalid when we actually have a
		 * hit in kernel space and then try to load it. So if we get
		 * here and there are _no_ symbols in the DSO backing the
		 * kernel map, bail out.
		 *
		 * We may never get here, for instance, if we use -K/
		 * --hide-kernel-symbols, even if the user specifies an
		 * invalid --vmlinux ;-)
		 */
752
		if (al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
753
		    RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
754 755 756
			ui__warning("The %s file can't be used\n",
				    symbol_conf.vmlinux_name);
			exit_browser(0);
757 758 759 760 761 762
			exit(1);
		}

		return;
	}

763 764
	/* let's see, whether we need to install initial sym_filter_entry */
	if (sym_filter_entry_sched) {
765
		top.sym_filter_entry = sym_filter_entry_sched;
766
		sym_filter_entry_sched = NULL;
767 768
		if (parse_source(top.sym_filter_entry) < 0) {
			struct symbol *sym = sym_entry__symbol(top.sym_filter_entry);
769 770

			pr_err("Can't annotate %s", sym->name);
771
			if (top.sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
772
				pr_err(": No vmlinux file was found in the path:\n");
773
				machine__fprintf_vmlinux_path(machine, stderr);
774 775 776 777
			} else
				pr_err(".\n");
			exit(1);
		}
778 779
	}

780
	syme = symbol__priv(al.sym);
781
	if (!syme->skip) {
782 783
		struct perf_evsel *evsel;

784
		syme->origin = origin;
785
		evsel = perf_evlist__id2evsel(top.evlist, sample->id);
786 787
		assert(evsel != NULL);
		syme->count[evsel->idx]++;
788
		record_precise_ip(syme, evsel->idx, ip);
789
		pthread_mutex_lock(&top.active_symbols_lock);
790 791
		if (list_empty(&syme->node) || !syme->node.next) {
			static bool first = true;
792
			__list_insert_active_sym(syme);
793 794 795 796 797
			if (first) {
				pthread_cond_broadcast(&top.active_symbols_cond);
				first = false;
			}
		}
798
		pthread_mutex_unlock(&top.active_symbols_lock);
799
	}
800 801
}

802
static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu)
803
{
804
	struct perf_sample sample;
805
	union perf_event *event;
806

807
	while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) {
808
		perf_session__parse_sample(self, event, &sample);
809

810
		if (event->header.type == PERF_RECORD_SAMPLE)
811
			perf_event__process_sample(event, &sample, self);
812
		else
813
			perf_event__process(event, &sample, self);
814 815 816
	}
}

817
static void perf_session__mmap_read(struct perf_session *self)
818
{
819 820
	int i;

821
	for (i = 0; i < top.evlist->cpus->nr; i++)
822
		perf_session__mmap_read_cpu(self, i);
823 824
}

825 826 827
static void start_counters(struct perf_evlist *evlist)
{
	struct perf_evsel *counter;
828

829 830
	list_for_each_entry(counter, &evlist->entries, node) {
		struct perf_event_attr *attr = &counter->attr;
831

832 833
		attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;

834
		if (top.freq) {
835 836
			attr->sample_type |= PERF_SAMPLE_PERIOD;
			attr->freq	  = 1;
837
			attr->sample_freq = top.freq;
838
		}
839

840 841 842 843 844
		if (evlist->nr_entries > 1) {
			attr->sample_type |= PERF_SAMPLE_ID;
			attr->read_format |= PERF_FORMAT_ID;
		}

845 846
		attr->mmap = 1;
try_again:
847 848
		if (perf_evsel__open(counter, top.evlist->cpus,
				     top.evlist->threads, group, inherit) < 0) {
849 850 851
			int err = errno;

			if (err == EPERM || err == EACCES)
852 853 854
				die("Permission error - are you root?\n"
					"\t Consider tweaking"
					" /proc/sys/kernel/perf_event_paranoid.\n");
855 856 857 858 859
			/*
			 * If it's cycles then fall back to hrtimer
			 * based cpu-clock-tick sw counter, which
			 * is always available even if no PMU support:
			 */
860 861
			if (attr->type == PERF_TYPE_HARDWARE &&
			    attr->config == PERF_COUNT_HW_CPU_CYCLES) {
862 863 864 865 866 867 868 869 870

				if (verbose)
					warning(" ... trying to fall back to cpu-clock-ticks\n");

				attr->type = PERF_TYPE_SOFTWARE;
				attr->config = PERF_COUNT_SW_CPU_CLOCK;
				goto try_again;
			}
			printf("\n");
871 872 873
			error("sys_perf_event_open() syscall returned with %d "
			      "(%s).  /bin/dmesg may provide additional information.\n",
			      err, strerror(err));
874 875 876
			die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
			exit(-1);
		}
877
	}
878

879
	if (perf_evlist__mmap(evlist, mmap_pages, false) < 0)
880
		die("failed to mmap with %d (%s)\n", errno, strerror(errno));
881 882 883 884 885
}

static int __cmd_top(void)
{
	pthread_t thread;
886
	struct perf_evsel *first;
K
Kyle McMartin 已提交
887
	int ret __used;
888
	/*
889 890
	 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
	 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
891
	 */
892
	struct perf_session *session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
893 894
	if (session == NULL)
		return -ENOMEM;
895

896
	if (top.target_tid != -1)
897 898
		perf_event__synthesize_thread_map(top.evlist->threads,
						  perf_event__process, session);
899
	else
900
		perf_event__synthesize_threads(perf_event__process, session);
901

902 903
	start_counters(top.evlist);
	first = list_entry(top.evlist->entries.next, struct perf_evsel, node);
904
	perf_session__set_sample_type(session, first->attr.sample_type);
905

906
	/* Wait for a minimal set of events before starting the snapshot */
907
	poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
908

909
	perf_session__mmap_read(session);
910

911 912
	if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
							     display_thread), session)) {
913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
		printf("Could not create display thread.\n");
		exit(-1);
	}

	if (realtime_prio) {
		struct sched_param param;

		param.sched_priority = realtime_prio;
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
			printf("Could not set realtime priority.\n");
			exit(-1);
		}
	}

	while (1) {
928
		u64 hits = top.samples;
929

930
		perf_session__mmap_read(session);
931

932 933
		if (hits == top.samples)
			ret = poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
934 935 936 937
	}

	return 0;
}
938 939 940 941 942 943 944

static const char * const top_usage[] = {
	"perf top [<options>]",
	NULL
};

static const struct option options[] = {
945
	OPT_CALLBACK('e', "event", &top.evlist, "event",
946 947
		     "event selector. use 'perf list' to list available events",
		     parse_events),
948 949
	OPT_INTEGER('c', "count", &default_interval,
		    "event period to sample"),
950
	OPT_INTEGER('p', "pid", &top.target_pid,
951
		    "profile events on existing process id"),
952
	OPT_INTEGER('t', "tid", &top.target_tid,
953
		    "profile events on existing thread id"),
954 955
	OPT_BOOLEAN('a', "all-cpus", &system_wide,
			    "system-wide collection from all CPUs"),
956
	OPT_STRING('C', "cpu", &top.cpu_list, "cpu",
957
		    "list of cpus to monitor"),
958 959
	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
		   "file", "vmlinux pathname"),
960
	OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
961
		    "hide kernel symbols"),
962
	OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
963 964
	OPT_INTEGER('r', "realtime", &realtime_prio,
		    "collect data with this RT SCHED_FIFO priority"),
965
	OPT_INTEGER('d', "delay", &top.delay_secs,
966 967 968
		    "number of seconds to delay between refreshes"),
	OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
			    "dump the symbol table used for profiling"),
969
	OPT_INTEGER('f', "count-filter", &top.count_filter,
970 971 972
		    "only display functions with more events than this"),
	OPT_BOOLEAN('g', "group", &group,
			    "put the counters into a counter group"),
973 974
	OPT_BOOLEAN('i', "inherit", &inherit,
		    "child tasks inherit counters"),
975
	OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
976
		    "symbol to annotate"),
977
	OPT_BOOLEAN('z', "zero", &top.zero,
978
		    "zero history across updates"),
979
	OPT_INTEGER('F', "freq", &top.freq,
980
		    "profile at this frequency"),
981
	OPT_INTEGER('E', "entries", &top.print_entries,
982
		    "display this many functions"),
983
	OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
984
		    "hide user symbols"),
985 986
	OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
	OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
987
	OPT_INCR('v', "verbose", &verbose,
988
		    "be more verbose (show counter open errors, etc)"),
989 990 991
	OPT_END()
};

992
int cmd_top(int argc, const char **argv, const char *prefix __used)
993
{
994 995
	struct perf_evsel *pos;
	int status = -ENOMEM;
996

997 998
	top.evlist = perf_evlist__new(NULL, NULL);
	if (top.evlist == NULL)
999 1000
		return -ENOMEM;

1001 1002 1003 1004 1005 1006
	page_size = sysconf(_SC_PAGE_SIZE);

	argc = parse_options(argc, argv, options, top_usage, 0);
	if (argc)
		usage_with_options(top_usage, options);

1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
	/*
 	 * XXX For now start disabled, only using TUI if explicitely asked for.
 	 * Change that when handle_keys equivalent gets written, live annotation
 	 * done, etc.
 	 */
	use_browser = 0;

	if (use_stdio)
		use_browser = 0;
	else if (use_tui)
		use_browser = 1;

	setup_browser(false);

1021
	/* CPU and PID are mutually exclusive */
1022
	if (top.target_tid > 0 && top.cpu_list) {
1023 1024
		printf("WARNING: PID switch overriding CPU\n");
		sleep(1);
1025
		top.cpu_list = NULL;
1026 1027
	}

1028 1029
	if (top.target_pid != -1)
		top.target_tid = top.target_pid;
1030

1031 1032
	if (perf_evlist__create_maps(top.evlist, top.target_pid,
				     top.target_tid, top.cpu_list) < 0)
1033 1034
		usage_with_options(top_usage, options);

1035 1036
	if (!top.evlist->nr_entries &&
	    perf_evlist__add_default(top.evlist) < 0) {
1037 1038 1039
		pr_err("Not enough memory for event selector list\n");
		return -ENOMEM;
	}
1040

1041 1042
	if (top.delay_secs < 1)
		top.delay_secs = 1;
1043

1044 1045 1046 1047
	/*
	 * User specified count overrides default frequency.
	 */
	if (default_interval)
1048 1049 1050
		top.freq = 0;
	else if (top.freq) {
		default_interval = top.freq;
1051 1052 1053 1054 1055
	} else {
		fprintf(stderr, "frequency and count are zero, aborting\n");
		exit(EXIT_FAILURE);
	}

1056 1057 1058
	list_for_each_entry(pos, &top.evlist->entries, node) {
		if (perf_evsel__alloc_fd(pos, top.evlist->cpus->nr,
					 top.evlist->threads->nr) < 0)
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068
			goto out_free_fd;
		/*
		 * Fill in the ones not specifically initialized via -c:
		 */
		if (pos->attr.sample_period)
			continue;

		pos->attr.sample_period = default_interval;
	}

1069 1070
	if (perf_evlist__alloc_pollfd(top.evlist) < 0 ||
	    perf_evlist__alloc_mmap(top.evlist) < 0)
1071 1072
		goto out_free_fd;

1073
	top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
1074

1075
	symbol_conf.priv_size = (sizeof(struct sym_entry) + sizeof(struct annotation) +
1076
				 (top.evlist->nr_entries + 1) * sizeof(unsigned long));
1077 1078 1079 1080 1081

	symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
	if (symbol__init() < 0)
		return -1;

1082
	get_term_dimensions(&winsize);
1083
	if (top.print_entries == 0) {
1084
		update_print_entries(&winsize);
1085 1086 1087
		signal(SIGWINCH, sig_winch_handler);
	}

1088 1089
	status = __cmd_top();
out_free_fd:
1090
	perf_evlist__delete(top.evlist);
1091 1092

	return status;
1093
}