builtin-top.c 27.0 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
 * builtin-top.c
 *
 * Builtin top command: Display a continuously updated profile of
 * any workload, CPU or specific PID.
 *
 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
 *
 * Improvements and fixes by:
 *
 *   Arjan van de Ven <arjan@linux.intel.com>
 *   Yanmin Zhang <yanmin.zhang@intel.com>
 *   Wu Fengguang <fengguang.wu@intel.com>
 *   Mike Galbraith <efault@gmx.de>
 *   Paul Mackerras <paulus@samba.org>
 *
 * Released under the GPL v2. (and only v2, not any later version)
18
 */
19
#include "builtin.h"
20

21
#include "perf.h"
22

23
#include "util/annotate.h"
24
#include "util/cache.h"
25
#include "util/color.h"
26
#include "util/evlist.h"
27
#include "util/evsel.h"
28 29
#include "util/session.h"
#include "util/symbol.h"
30
#include "util/thread.h"
31
#include "util/thread_map.h"
32
#include "util/top.h"
33
#include "util/util.h"
34
#include <linux/rbtree.h>
35 36
#include "util/parse-options.h"
#include "util/parse-events.h"
37
#include "util/cpumap.h"
38
#include "util/xyarray.h"
39

40 41
#include "util/debug.h"

42 43
#include <assert.h>
#include <fcntl.h>
44

45
#include <stdio.h>
46 47
#include <termios.h>
#include <unistd.h>
48
#include <inttypes.h>
49

50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#include <errno.h>
#include <time.h>
#include <sched.h>

#include <sys/syscall.h>
#include <sys/ioctl.h>
#include <sys/poll.h>
#include <sys/prctl.h>
#include <sys/wait.h>
#include <sys/uio.h>
#include <sys/mman.h>

#include <linux/unistd.h>
#include <linux/types.h>

65
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
66

67 68 69 70 71 72 73 74
static struct perf_top top = {
	.count_filter		= 5,
	.delay_secs		= 2,
	.display_weighted	= -1,
	.target_pid		= -1,
	.target_tid		= -1,
	.active_symbols		= LIST_HEAD_INIT(top.active_symbols),
	.active_symbols_lock	= PTHREAD_MUTEX_INITIALIZER,
75
	.active_symbols_cond	= PTHREAD_COND_INITIALIZER,
76 77
	.freq			= 1000, /* 1 KHz */
};
78

79
static bool			system_wide			=  false;
80

81 82
static bool			use_tui, use_stdio;

83
static int			default_interval		=      0;
84

85
static bool			inherit				=  false;
86
static int			realtime_prio			=      0;
87
static bool			group				=  false;
88
static unsigned int		page_size;
89
static unsigned int		mmap_pages			=    128;
90

91
static bool			dump_symtab                     =  false;
92

93
static struct winsize		winsize;
94

95
static const char		*sym_filter			=   NULL;
96
struct sym_entry		*sym_filter_entry_sched		=   NULL;
97
static int			sym_pcnt_filter			=      5;
98

99 100 101 102
/*
 * Source functions
 */

103
void get_term_dimensions(struct winsize *ws)
104
{
105 106 107 108 109 110 111 112 113 114
	char *s = getenv("LINES");

	if (s != NULL) {
		ws->ws_row = atoi(s);
		s = getenv("COLUMNS");
		if (s != NULL) {
			ws->ws_col = atoi(s);
			if (ws->ws_row && ws->ws_col)
				return;
		}
115
	}
116 117 118 119
#ifdef TIOCGWINSZ
	if (ioctl(1, TIOCGWINSZ, ws) == 0 &&
	    ws->ws_row && ws->ws_col)
		return;
120
#endif
121 122
	ws->ws_row = 25;
	ws->ws_col = 80;
123 124
}

125
static void update_print_entries(struct winsize *ws)
126
{
127
	top.print_entries = ws->ws_row;
128

129 130
	if (top.print_entries > 9)
		top.print_entries -= 9;
131 132 133 134
}

static void sig_winch_handler(int sig __used)
{
135 136
	get_term_dimensions(&winsize);
	update_print_entries(&winsize);
137 138
}

139
static int parse_source(struct sym_entry *syme)
140 141
{
	struct symbol *sym;
142
	struct annotation *notes;
143
	struct map *map;
144
	int err = -1;
145 146

	if (!syme)
147 148 149 150 151 152 153 154
		return -1;

	sym = sym_entry__symbol(syme);
	map = syme->map;

	/*
	 * We can't annotate with just /proc/kallsyms
	 */
155
	if (map->dso->symtab_type == SYMTAB__KALLSYMS) {
156 157 158
		pr_err("Can't annotate %s: No vmlinux file was found in the "
		       "path\n", sym->name);
		sleep(1);
159
		return -1;
160 161
	}

162 163 164
	notes = symbol__annotation(sym);
	if (notes->src != NULL) {
		pthread_mutex_lock(&notes->lock);
165 166 167
		goto out_assign;
	}

168
	pthread_mutex_lock(&notes->lock);
169

170
	if (symbol__alloc_hist(sym, top.evlist->nr_entries) < 0) {
171
		pthread_mutex_unlock(&notes->lock);
172 173
		pr_err("Not enough memory for annotating '%s' symbol!\n",
		       sym->name);
174
		sleep(1);
175
		return err;
176
	}
177

178
	err = symbol__annotate(sym, syme->map, 0);
179
	if (err == 0) {
180
out_assign:
181
		top.sym_filter_entry = syme;
182
	}
183

184
	pthread_mutex_unlock(&notes->lock);
185
	return err;
186 187 188 189
}

static void __zero_source_counters(struct sym_entry *syme)
{
190 191
	struct symbol *sym = sym_entry__symbol(syme);
	symbol__annotate_zero_histograms(sym);
192 193 194 195
}

static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip)
{
196 197 198
	struct annotation *notes;
	struct symbol *sym;

199
	if (syme != top.sym_filter_entry)
200 201
		return;

202 203 204 205
	sym = sym_entry__symbol(syme);
	notes = symbol__annotation(sym);

	if (pthread_mutex_trylock(&notes->lock))
206 207
		return;

208
	ip = syme->map->map_ip(syme->map, ip);
209
	symbol__inc_addr_samples(sym, syme->map, counter, ip);
210

211
	pthread_mutex_unlock(&notes->lock);
212 213 214 215
}

static void show_details(struct sym_entry *syme)
{
216
	struct annotation *notes;
217
	struct symbol *symbol;
218
	int more;
219 220 221 222

	if (!syme)
		return;

223
	symbol = sym_entry__symbol(syme);
224 225 226 227 228 229
	notes = symbol__annotation(symbol);

	pthread_mutex_lock(&notes->lock);

	if (notes->src == NULL)
		goto out_unlock;
230

231
	printf("Showing %s for %s\n", event_name(top.sym_evsel), symbol->name);
232 233
	printf("  Events  Pcnt (>=%d%%)\n", sym_pcnt_filter);

234
	more = symbol__annotate_printf(symbol, syme->map, top.sym_evsel->idx,
235
				       0, sym_pcnt_filter, top.print_entries, 4);
236 237 238
	if (top.zero)
		symbol__annotate_zero_histogram(symbol, top.sym_evsel->idx);
	else
239
		symbol__annotate_decay_histogram(symbol, top.sym_evsel->idx);
240
	if (more != 0)
241
		printf("%d lines not displayed, maybe increase display entries [e]\n", more);
242 243
out_unlock:
	pthread_mutex_unlock(&notes->lock);
244
}
245 246 247

static const char		CONSOLE_CLEAR[] = "";

248
static void __list_insert_active_sym(struct sym_entry *syme)
249
{
250
	list_add(&syme->node, &top.active_symbols);
251
}
252

253
static void print_sym_table(struct perf_session *session)
254
{
255 256
	char bf[160];
	int printed = 0;
257
	struct rb_node *nd;
258 259
	struct sym_entry *syme;
	struct rb_root tmp = RB_ROOT;
260
	const int win_width = winsize.ws_col - 1;
261 262
	int sym_width, dso_width, dso_short_width;
	float sum_ksamples = perf_top__decay_samples(&top, &tmp);
263

264
	puts(CONSOLE_CLEAR);
265

266 267
	perf_top__header_snprintf(&top, bf, sizeof(bf));
	printf("%s\n", bf);
268

269
	perf_top__reset_sample_counters(&top);
270

271
	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
272

273 274 275 276 277 278
	if (session->hists.stats.total_lost != 0) {
		color_fprintf(stdout, PERF_COLOR_RED, "WARNING:");
		printf(" LOST %" PRIu64 " events, Check IO/CPU overload\n",
		       session->hists.stats.total_lost);
	}

279 280
	if (top.sym_filter_entry) {
		show_details(top.sym_filter_entry);
281 282 283
		return;
	}

284 285
	perf_top__find_widths(&top, &tmp, &dso_width, &dso_short_width,
			      &sym_width);
286

287 288 289 290 291
	if (sym_width + dso_width > winsize.ws_col - 29) {
		dso_width = dso_short_width;
		if (sym_width + dso_width > winsize.ws_col - 29)
			sym_width = winsize.ws_col - dso_width - 29;
	}
292
	putchar('\n');
293
	if (top.evlist->nr_entries == 1)
294
		printf("             samples  pcnt");
295
	else
296
		printf("   weight    samples  pcnt");
297

298 299
	if (verbose)
		printf("         RIP       ");
300
	printf(" %-*.*s DSO\n", sym_width, sym_width, "function");
301
	printf("   %s    _______ _____",
302
	       top.evlist->nr_entries == 1 ? "      " : "______");
303
	if (verbose)
304
		printf(" ________________");
305
	printf(" %-*.*s", sym_width, sym_width, graph_line);
306
	printf(" %-*.*s", dso_width, dso_width, graph_line);
307
	puts("\n");
308

309
	for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
310
		struct symbol *sym;
311
		double pcnt;
312

313
		syme = rb_entry(nd, struct sym_entry, rb_node);
314
		sym = sym_entry__symbol(syme);
315 316
		if (++printed > top.print_entries ||
		    (int)syme->snap_count < top.count_filter)
317
			continue;
318

319 320
		pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
					 sum_ksamples));
321

322
		if (top.evlist->nr_entries == 1 || !top.display_weighted)
323
			printf("%20.2f ", syme->weight);
324
		else
325
			printf("%9.1f %10ld ", syme->weight, syme->snap_count);
326

327
		percent_color_fprintf(stdout, "%4.1f%%", pcnt);
328
		if (verbose)
329
			printf(" %016" PRIx64, sym->start);
330
		printf(" %-*.*s", sym_width, sym_width, sym->name);
331 332 333 334
		printf(" %-*.*s\n", dso_width, dso_width,
		       dso_width >= syme->map->dso->long_name_len ?
					syme->map->dso->long_name :
					syme->map->dso->short_name);
335 336 337
	}
}

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
static void prompt_integer(int *target, const char *msg)
{
	char *buf = malloc(0), *p;
	size_t dummy = 0;
	int tmp;

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		return;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

	p = buf;
	while(*p) {
		if (!isdigit(*p))
			goto out_free;
		p++;
	}
	tmp = strtoul(buf, NULL, 10);
	*target = tmp;
out_free:
	free(buf);
}

static void prompt_percent(int *target, const char *msg)
{
	int tmp = 0;

	prompt_integer(&tmp, msg);
	if (tmp >= 0 && tmp <= 100)
		*target = tmp;
}

static void prompt_symbol(struct sym_entry **target, const char *msg)
{
	char *buf = malloc(0), *p;
	struct sym_entry *syme = *target, *n, *found = NULL;
	size_t dummy = 0;

	/* zero counters of active symbol */
	if (syme) {
		__zero_source_counters(syme);
		*target = NULL;
	}

	fprintf(stdout, "\n%s: ", msg);
	if (getline(&buf, &dummy, stdin) < 0)
		goto out_free;

	p = strchr(buf, '\n');
	if (p)
		*p = 0;

393 394 395
	pthread_mutex_lock(&top.active_symbols_lock);
	syme = list_entry(top.active_symbols.next, struct sym_entry, node);
	pthread_mutex_unlock(&top.active_symbols_lock);
396

397
	list_for_each_entry_safe_from(syme, n, &top.active_symbols, node) {
398
		struct symbol *sym = sym_entry__symbol(syme);
399 400 401 402 403 404 405 406

		if (!strcmp(buf, sym->name)) {
			found = syme;
			break;
		}
	}

	if (!found) {
407
		fprintf(stderr, "Sorry, %s is not active.\n", buf);
408 409 410 411 412 413 414 415 416
		sleep(1);
		return;
	} else
		parse_source(found);

out_free:
	free(buf);
}

417
static void print_mapped_keys(void)
418
{
419 420
	char *name = NULL;

421 422
	if (top.sym_filter_entry) {
		struct symbol *sym = sym_entry__symbol(top.sym_filter_entry);
423 424 425 426
		name = sym->name;
	}

	fprintf(stdout, "\nMapped keys:\n");
427 428
	fprintf(stdout, "\t[d]     display refresh delay.             \t(%d)\n", top.delay_secs);
	fprintf(stdout, "\t[e]     display entries (lines).           \t(%d)\n", top.print_entries);
429

430 431
	if (top.evlist->nr_entries > 1)
		fprintf(stdout, "\t[E]     active event counter.              \t(%s)\n", event_name(top.sym_evsel));
432

433
	fprintf(stdout, "\t[f]     profile display filter (count).    \t(%d)\n", top.count_filter);
434

435 436 437
	fprintf(stdout, "\t[F]     annotate display filter (percent). \t(%d%%)\n", sym_pcnt_filter);
	fprintf(stdout, "\t[s]     annotate symbol.                   \t(%s)\n", name?: "NULL");
	fprintf(stdout, "\t[S]     stop annotation.\n");
438

439 440
	if (top.evlist->nr_entries > 1)
		fprintf(stdout, "\t[w]     toggle display weighted/count[E]r. \t(%d)\n", top.display_weighted ? 1 : 0);
441

442
	fprintf(stdout,
443
		"\t[K]     hide kernel_symbols symbols.     \t(%s)\n",
444
		top.hide_kernel_symbols ? "yes" : "no");
445 446
	fprintf(stdout,
		"\t[U]     hide user symbols.               \t(%s)\n",
447 448
		top.hide_user_symbols ? "yes" : "no");
	fprintf(stdout, "\t[z]     toggle sample zeroing.             \t(%d)\n", top.zero ? 1 : 0);
449 450 451 452 453 454 455 456 457 458 459 460
	fprintf(stdout, "\t[qQ]    quit.\n");
}

static int key_mapped(int c)
{
	switch (c) {
		case 'd':
		case 'e':
		case 'f':
		case 'z':
		case 'q':
		case 'Q':
461 462
		case 'K':
		case 'U':
463 464 465
		case 'F':
		case 's':
		case 'S':
466 467 468
			return 1;
		case 'E':
		case 'w':
469
			return top.evlist->nr_entries > 1 ? 1 : 0;
470 471
		default:
			break;
472 473 474
	}

	return 0;
475 476
}

477
static void handle_keypress(struct perf_session *session, int c)
478
{
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
	if (!key_mapped(c)) {
		struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
		struct termios tc, save;

		print_mapped_keys();
		fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
		fflush(stdout);

		tcgetattr(0, &save);
		tc = save;
		tc.c_lflag &= ~(ICANON | ECHO);
		tc.c_cc[VMIN] = 0;
		tc.c_cc[VTIME] = 0;
		tcsetattr(0, TCSANOW, &tc);

		poll(&stdin_poll, 1, -1);
		c = getc(stdin);

		tcsetattr(0, TCSAFLUSH, &save);
		if (!key_mapped(c))
			return;
	}

502 503
	switch (c) {
		case 'd':
504 505 506
			prompt_integer(&top.delay_secs, "Enter display delay");
			if (top.delay_secs < 1)
				top.delay_secs = 1;
507 508
			break;
		case 'e':
509 510
			prompt_integer(&top.print_entries, "Enter display entries (lines)");
			if (top.print_entries == 0) {
511
				sig_winch_handler(SIGWINCH);
512 513 514
				signal(SIGWINCH, sig_winch_handler);
			} else
				signal(SIGWINCH, SIG_DFL);
515 516
			break;
		case 'E':
517
			if (top.evlist->nr_entries > 1) {
518 519 520
				/* Select 0 as the default event: */
				int counter = 0;

521
				fprintf(stderr, "\nAvailable events:");
522

523 524
				list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
					fprintf(stderr, "\n\t%d %s", top.sym_evsel->idx, event_name(top.sym_evsel));
525

526
				prompt_integer(&counter, "Enter details event counter");
527

528
				if (counter >= top.evlist->nr_entries) {
529 530
					top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
					fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(top.sym_evsel));
531
					sleep(1);
532
					break;
533
				}
534
				list_for_each_entry(top.sym_evsel, &top.evlist->entries, node)
535
					if (top.sym_evsel->idx == counter)
536
						break;
537 538
			} else
				top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
539 540
			break;
		case 'f':
541
			prompt_integer(&top.count_filter, "Enter display event count filter");
542 543 544 545
			break;
		case 'F':
			prompt_percent(&sym_pcnt_filter, "Enter details display event filter (percent)");
			break;
546
		case 'K':
547
			top.hide_kernel_symbols = !top.hide_kernel_symbols;
548
			break;
549 550 551
		case 'q':
		case 'Q':
			printf("exiting.\n");
552
			if (dump_symtab)
553
				perf_session__fprintf_dsos(session, stderr);
554 555
			exit(0);
		case 's':
556
			prompt_symbol(&top.sym_filter_entry, "Enter details symbol");
557 558
			break;
		case 'S':
559
			if (!top.sym_filter_entry)
560 561
				break;
			else {
562
				struct sym_entry *syme = top.sym_filter_entry;
563

564
				top.sym_filter_entry = NULL;
565 566 567
				__zero_source_counters(syme);
			}
			break;
568
		case 'U':
569
			top.hide_user_symbols = !top.hide_user_symbols;
570
			break;
571
		case 'w':
572
			top.display_weighted = ~top.display_weighted;
573
			break;
574
		case 'z':
575
			top.zero = !top.zero;
576
			break;
577 578
		default:
			break;
579 580 581
	}
}

582 583
static void *display_thread_tui(void *arg __used)
{
584 585 586 587 588 589 590 591 592 593 594
	int err = 0;
	pthread_mutex_lock(&top.active_symbols_lock);
	while (list_empty(&top.active_symbols)) {
		err = pthread_cond_wait(&top.active_symbols_cond,
					&top.active_symbols_lock);
		if (err)
			break;
	}
	pthread_mutex_unlock(&top.active_symbols_lock);
	if (!err)
		perf_top__tui_browser(&top);
595 596 597 598 599
	exit_browser(0);
	exit(0);
	return NULL;
}

600
static void *display_thread(void *arg __used)
601
{
602
	struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
603 604
	struct termios tc, save;
	int delay_msecs, c;
605
	struct perf_session *session = (struct perf_session *) arg;
606 607 608 609 610 611

	tcgetattr(0, &save);
	tc = save;
	tc.c_lflag &= ~(ICANON | ECHO);
	tc.c_cc[VMIN] = 0;
	tc.c_cc[VTIME] = 0;
612

613
repeat:
614
	delay_msecs = top.delay_secs * 1000;
615 616 617
	tcsetattr(0, TCSANOW, &tc);
	/* trash return*/
	getc(stdin);
618

619
	do {
620
		print_sym_table(session);
621 622
	} while (!poll(&stdin_poll, 1, delay_msecs) == 1);

623 624 625
	c = getc(stdin);
	tcsetattr(0, TCSAFLUSH, &save);

626
	handle_keypress(session, c);
627
	goto repeat;
628 629 630 631

	return NULL;
}

632
/* Tag samples to be skipped. */
633
static const char *skip_symbols[] = {
634
	"default_idle",
635
	"native_safe_halt",
636 637 638 639
	"cpu_idle",
	"enter_idle",
	"exit_idle",
	"mwait_idle",
640
	"mwait_idle_with_hints",
641
	"poll_idle",
642 643
	"ppc64_runlatch_off",
	"pseries_dedicated_idle_sleep",
644 645 646
	NULL
};

647
static int symbol_filter(struct map *map, struct symbol *sym)
648
{
649 650
	struct sym_entry *syme;
	const char *name = sym->name;
651
	int i;
652

653 654 655 656 657 658 659
	/*
	 * ppc64 uses function descriptors and appends a '.' to the
	 * start of every instruction address. Remove it.
	 */
	if (name[0] == '.')
		name++;

660 661 662 663 664 665 666
	if (!strcmp(name, "_text") ||
	    !strcmp(name, "_etext") ||
	    !strcmp(name, "_sinittext") ||
	    !strncmp("init_module", name, 11) ||
	    !strncmp("cleanup_module", name, 14) ||
	    strstr(name, "_text_start") ||
	    strstr(name, "_text_end"))
667 668
		return 1;

669
	syme = symbol__priv(sym);
670
	syme->map = map;
671
	symbol__annotate_init(map, sym);
672

673
	if (!top.sym_filter_entry && sym_filter && !strcmp(name, sym_filter)) {
674 675 676 677
		/* schedule initial sym_filter_entry setup */
		sym_filter_entry_sched = syme;
		sym_filter = NULL;
	}
678

679 680
	for (i = 0; skip_symbols[i]; i++) {
		if (!strcmp(skip_symbols[i], name)) {
681
			sym->ignore = true;
682 683 684
			break;
		}
	}
685 686 687 688

	return 0;
}

689 690 691
static void perf_event__process_sample(const union perf_event *event,
				       struct perf_sample *sample,
				       struct perf_session *session)
692
{
693
	u64 ip = event->ip.ip;
694
	struct sym_entry *syme;
695
	struct addr_location al;
696
	struct machine *machine;
697
	u8 origin = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
698

699
	++top.samples;
700

701
	switch (origin) {
702
	case PERF_RECORD_MISC_USER:
703 704
		++top.us_samples;
		if (top.hide_user_symbols)
705
			return;
706
		machine = perf_session__find_host_machine(session);
707
		break;
708
	case PERF_RECORD_MISC_KERNEL:
709 710
		++top.kernel_samples;
		if (top.hide_kernel_symbols)
711
			return;
712
		machine = perf_session__find_host_machine(session);
713 714
		break;
	case PERF_RECORD_MISC_GUEST_KERNEL:
715
		++top.guest_kernel_samples;
716
		machine = perf_session__find_machine(session, event->ip.pid);
717
		break;
718
	case PERF_RECORD_MISC_GUEST_USER:
719
		++top.guest_us_samples;
720 721 722 723 724
		/*
		 * TODO: we don't process guest user from host side
		 * except simple counting.
		 */
		return;
725 726 727 728
	default:
		return;
	}

729
	if (!machine && perf_guest) {
730
		pr_err("Can't find guest [%d]'s kernel information\n",
731
			event->ip.pid);
732 733 734
		return;
	}

735
	if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
736
		top.exact_samples++;
737

738 739
	if (perf_event__preprocess_sample(event, session, &al, sample,
					  symbol_filter) < 0 ||
740
	    al.filtered)
741
		return;
742

743 744 745 746 747 748 749 750 751 752 753 754
	if (al.sym == NULL) {
		/*
		 * As we do lazy loading of symtabs we only will know if the
		 * specified vmlinux file is invalid when we actually have a
		 * hit in kernel space and then try to load it. So if we get
		 * here and there are _no_ symbols in the DSO backing the
		 * kernel map, bail out.
		 *
		 * We may never get here, for instance, if we use -K/
		 * --hide-kernel-symbols, even if the user specifies an
		 * invalid --vmlinux ;-)
		 */
755
		if (al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
756
		    RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
757 758 759
			ui__warning("The %s file can't be used\n",
				    symbol_conf.vmlinux_name);
			exit_browser(0);
760 761 762 763 764 765
			exit(1);
		}

		return;
	}

766 767
	/* let's see, whether we need to install initial sym_filter_entry */
	if (sym_filter_entry_sched) {
768
		top.sym_filter_entry = sym_filter_entry_sched;
769
		sym_filter_entry_sched = NULL;
770 771
		if (parse_source(top.sym_filter_entry) < 0) {
			struct symbol *sym = sym_entry__symbol(top.sym_filter_entry);
772 773

			pr_err("Can't annotate %s", sym->name);
774
			if (top.sym_filter_entry->map->dso->symtab_type == SYMTAB__KALLSYMS) {
775
				pr_err(": No vmlinux file was found in the path:\n");
776
				machine__fprintf_vmlinux_path(machine, stderr);
777 778 779 780
			} else
				pr_err(".\n");
			exit(1);
		}
781 782
	}

783
	syme = symbol__priv(al.sym);
784
	if (!al.sym->ignore) {
785 786
		struct perf_evsel *evsel;

787
		evsel = perf_evlist__id2evsel(top.evlist, sample->id);
788 789
		assert(evsel != NULL);
		syme->count[evsel->idx]++;
790
		record_precise_ip(syme, evsel->idx, ip);
791
		pthread_mutex_lock(&top.active_symbols_lock);
792 793
		if (list_empty(&syme->node) || !syme->node.next) {
			static bool first = true;
794
			__list_insert_active_sym(syme);
795 796 797 798 799
			if (first) {
				pthread_cond_broadcast(&top.active_symbols_cond);
				first = false;
			}
		}
800
		pthread_mutex_unlock(&top.active_symbols_lock);
801
	}
802 803
}

804
static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu)
805
{
806
	struct perf_sample sample;
807
	union perf_event *event;
808

809
	while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) {
810
		perf_session__parse_sample(self, event, &sample);
811

812
		if (event->header.type == PERF_RECORD_SAMPLE)
813
			perf_event__process_sample(event, &sample, self);
814
		else
815
			perf_event__process(event, &sample, self);
816 817 818
	}
}

819
static void perf_session__mmap_read(struct perf_session *self)
820
{
821 822
	int i;

823
	for (i = 0; i < top.evlist->cpus->nr; i++)
824
		perf_session__mmap_read_cpu(self, i);
825 826
}

827 828 829
static void start_counters(struct perf_evlist *evlist)
{
	struct perf_evsel *counter;
830

831 832
	list_for_each_entry(counter, &evlist->entries, node) {
		struct perf_event_attr *attr = &counter->attr;
833

834 835
		attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;

836
		if (top.freq) {
837 838
			attr->sample_type |= PERF_SAMPLE_PERIOD;
			attr->freq	  = 1;
839
			attr->sample_freq = top.freq;
840
		}
841

842 843 844 845 846
		if (evlist->nr_entries > 1) {
			attr->sample_type |= PERF_SAMPLE_ID;
			attr->read_format |= PERF_FORMAT_ID;
		}

847
		attr->mmap = 1;
848
		attr->inherit = inherit;
849
try_again:
850
		if (perf_evsel__open(counter, top.evlist->cpus,
851
				     top.evlist->threads, group) < 0) {
852 853
			int err = errno;

854 855 856 857
			if (err == EPERM || err == EACCES) {
				ui__warning_paranoid();
				goto out_err;
			}
858 859 860 861 862
			/*
			 * If it's cycles then fall back to hrtimer
			 * based cpu-clock-tick sw counter, which
			 * is always available even if no PMU support:
			 */
863 864
			if (attr->type == PERF_TYPE_HARDWARE &&
			    attr->config == PERF_COUNT_HW_CPU_CYCLES) {
865
				if (verbose)
866 867
					ui__warning("Cycles event not supported,\n"
						    "trying to fall back to cpu-clock-ticks\n");
868 869 870 871 872

				attr->type = PERF_TYPE_SOFTWARE;
				attr->config = PERF_COUNT_SW_CPU_CLOCK;
				goto try_again;
			}
873

874 875 876 877 878 879
			if (err == ENOENT) {
				ui__warning("The %s event is not supported.\n",
					    event_name(counter));
				goto out_err;
			}

880 881 882 883 884 885
			ui__warning("The sys_perf_event_open() syscall "
				    "returned with %d (%s).  /bin/dmesg "
				    "may provide additional information.\n"
				    "No CONFIG_PERF_EVENTS=y kernel support "
				    "configured?\n", err, strerror(err));
			goto out_err;
886
		}
887
	}
888

889 890 891 892 893 894 895 896 897 898 899
	if (perf_evlist__mmap(evlist, mmap_pages, false) < 0) {
		ui__warning("Failed to mmap with %d (%s)\n",
			    errno, strerror(errno));
		goto out_err;
	}

	return;

out_err:
	exit_browser(0);
	exit(0);
900 901 902 903 904
}

static int __cmd_top(void)
{
	pthread_t thread;
K
Kyle McMartin 已提交
905
	int ret __used;
906
	/*
907 908
	 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
	 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
909
	 */
910
	struct perf_session *session = perf_session__new(NULL, O_WRONLY, false, false, NULL);
911 912
	if (session == NULL)
		return -ENOMEM;
913

914
	if (top.target_tid != -1)
915 916
		perf_event__synthesize_thread_map(top.evlist->threads,
						  perf_event__process, session);
917
	else
918
		perf_event__synthesize_threads(perf_event__process, session);
919

920
	start_counters(top.evlist);
921 922
	session->evlist = top.evlist;
	perf_session__update_sample_type(session);
923

924
	/* Wait for a minimal set of events before starting the snapshot */
925
	poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
926

927
	perf_session__mmap_read(session);
928

929 930
	if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
							     display_thread), session)) {
931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
		printf("Could not create display thread.\n");
		exit(-1);
	}

	if (realtime_prio) {
		struct sched_param param;

		param.sched_priority = realtime_prio;
		if (sched_setscheduler(0, SCHED_FIFO, &param)) {
			printf("Could not set realtime priority.\n");
			exit(-1);
		}
	}

	while (1) {
946
		u64 hits = top.samples;
947

948
		perf_session__mmap_read(session);
949

950 951
		if (hits == top.samples)
			ret = poll(top.evlist->pollfd, top.evlist->nr_fds, 100);
952 953 954 955
	}

	return 0;
}
956 957 958 959 960 961 962

static const char * const top_usage[] = {
	"perf top [<options>]",
	NULL
};

static const struct option options[] = {
963
	OPT_CALLBACK('e', "event", &top.evlist, "event",
964 965
		     "event selector. use 'perf list' to list available events",
		     parse_events),
966 967
	OPT_INTEGER('c', "count", &default_interval,
		    "event period to sample"),
968
	OPT_INTEGER('p', "pid", &top.target_pid,
969
		    "profile events on existing process id"),
970
	OPT_INTEGER('t', "tid", &top.target_tid,
971
		    "profile events on existing thread id"),
972 973
	OPT_BOOLEAN('a', "all-cpus", &system_wide,
			    "system-wide collection from all CPUs"),
974
	OPT_STRING('C', "cpu", &top.cpu_list, "cpu",
975
		    "list of cpus to monitor"),
976 977
	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
		   "file", "vmlinux pathname"),
978
	OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
979
		    "hide kernel symbols"),
980
	OPT_UINTEGER('m', "mmap-pages", &mmap_pages, "number of mmap data pages"),
981 982
	OPT_INTEGER('r', "realtime", &realtime_prio,
		    "collect data with this RT SCHED_FIFO priority"),
983
	OPT_INTEGER('d', "delay", &top.delay_secs,
984 985 986
		    "number of seconds to delay between refreshes"),
	OPT_BOOLEAN('D', "dump-symtab", &dump_symtab,
			    "dump the symbol table used for profiling"),
987
	OPT_INTEGER('f', "count-filter", &top.count_filter,
988 989 990
		    "only display functions with more events than this"),
	OPT_BOOLEAN('g', "group", &group,
			    "put the counters into a counter group"),
991 992
	OPT_BOOLEAN('i', "inherit", &inherit,
		    "child tasks inherit counters"),
993
	OPT_STRING('s', "sym-annotate", &sym_filter, "symbol name",
994
		    "symbol to annotate"),
995
	OPT_BOOLEAN('z', "zero", &top.zero,
996
		    "zero history across updates"),
997
	OPT_INTEGER('F', "freq", &top.freq,
998
		    "profile at this frequency"),
999
	OPT_INTEGER('E', "entries", &top.print_entries,
1000
		    "display this many functions"),
1001
	OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
1002
		    "hide user symbols"),
1003 1004
	OPT_BOOLEAN(0, "tui", &use_tui, "Use the TUI interface"),
	OPT_BOOLEAN(0, "stdio", &use_stdio, "Use the stdio interface"),
1005
	OPT_INCR('v', "verbose", &verbose,
1006
		    "be more verbose (show counter open errors, etc)"),
1007 1008 1009
	OPT_END()
};

1010
int cmd_top(int argc, const char **argv, const char *prefix __used)
1011
{
1012 1013
	struct perf_evsel *pos;
	int status = -ENOMEM;
1014

1015 1016
	top.evlist = perf_evlist__new(NULL, NULL);
	if (top.evlist == NULL)
1017 1018
		return -ENOMEM;

1019 1020 1021 1022 1023 1024
	page_size = sysconf(_SC_PAGE_SIZE);

	argc = parse_options(argc, argv, options, top_usage, 0);
	if (argc)
		usage_with_options(top_usage, options);

1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
	/*
 	 * XXX For now start disabled, only using TUI if explicitely asked for.
 	 * Change that when handle_keys equivalent gets written, live annotation
 	 * done, etc.
 	 */
	use_browser = 0;

	if (use_stdio)
		use_browser = 0;
	else if (use_tui)
		use_browser = 1;

	setup_browser(false);

1039
	/* CPU and PID are mutually exclusive */
1040
	if (top.target_tid > 0 && top.cpu_list) {
1041 1042
		printf("WARNING: PID switch overriding CPU\n");
		sleep(1);
1043
		top.cpu_list = NULL;
1044 1045
	}

1046 1047
	if (top.target_pid != -1)
		top.target_tid = top.target_pid;
1048

1049 1050
	if (perf_evlist__create_maps(top.evlist, top.target_pid,
				     top.target_tid, top.cpu_list) < 0)
1051 1052
		usage_with_options(top_usage, options);

1053 1054
	if (!top.evlist->nr_entries &&
	    perf_evlist__add_default(top.evlist) < 0) {
1055 1056 1057
		pr_err("Not enough memory for event selector list\n");
		return -ENOMEM;
	}
1058

1059 1060
	if (top.delay_secs < 1)
		top.delay_secs = 1;
1061

1062 1063 1064 1065
	/*
	 * User specified count overrides default frequency.
	 */
	if (default_interval)
1066 1067 1068
		top.freq = 0;
	else if (top.freq) {
		default_interval = top.freq;
1069 1070 1071 1072 1073
	} else {
		fprintf(stderr, "frequency and count are zero, aborting\n");
		exit(EXIT_FAILURE);
	}

1074 1075 1076
	list_for_each_entry(pos, &top.evlist->entries, node) {
		if (perf_evsel__alloc_fd(pos, top.evlist->cpus->nr,
					 top.evlist->threads->nr) < 0)
1077 1078 1079 1080 1081 1082 1083 1084 1085 1086
			goto out_free_fd;
		/*
		 * Fill in the ones not specifically initialized via -c:
		 */
		if (pos->attr.sample_period)
			continue;

		pos->attr.sample_period = default_interval;
	}

1087 1088
	if (perf_evlist__alloc_pollfd(top.evlist) < 0 ||
	    perf_evlist__alloc_mmap(top.evlist) < 0)
1089 1090
		goto out_free_fd;

1091
	top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node);
1092

1093
	symbol_conf.priv_size = (sizeof(struct sym_entry) + sizeof(struct annotation) +
1094
				 (top.evlist->nr_entries + 1) * sizeof(unsigned long));
1095 1096 1097 1098 1099

	symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
	if (symbol__init() < 0)
		return -1;

1100
	get_term_dimensions(&winsize);
1101
	if (top.print_entries == 0) {
1102
		update_print_entries(&winsize);
1103 1104 1105
		signal(SIGWINCH, sig_winch_handler);
	}

1106 1107
	status = __cmd_top();
out_free_fd:
1108
	perf_evlist__delete(top.evlist);
1109 1110

	return status;
1111
}